Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6:
[SCSI] qla2xxx: Return DID_NO_CONNECT when FC device is lost.
[SCSI] mptfusion: Bump version 03.04.18
[SCSI] mptfusion: Fix Incorrect return value in mptscsih_dev_reset
[SCSI] mptfusion: mptctl_release is required in mptctl.c
[SCSI] target: fix use after free detected by SLUB poison
[SCSI] target: Remove procfs based target_core_mib.c code
[SCSI] target: Fix SCF_SCSI_CONTROL_SG_IO_CDB breakage
[SCSI] target: Fix top-level configfs_subsystem default_group shutdown breakage
[SCSI] target: fixed missing lock drop in error path
[SCSI] target: Fix demo-mode MappedLUN shutdown UA/PR breakage
[SCSI] target/iblock: Fix failed bd claim NULL pointer dereference
[SCSI] target: iblock/pscsi claim checking for NULL instead of IS_ERR
[SCSI] scsi_debug: Fix 32-bit overflow in do_device_access causing memory corruption
[SCSI] qla2xxx: Change from irq to irqsave with host_lock
[SCSI] qla2xxx: Fix race that could hang kthread_stop()

+278 -1256
+2 -2
drivers/message/fusion/mptbase.h
··· 76 #define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR 77 #endif 78 79 - #define MPT_LINUX_VERSION_COMMON "3.04.17" 80 - #define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.17" 81 #define WHAT_MAGIC_STRING "@" "(" "#" ")" 82 83 #define show_mptmod_ver(s,ver) \
··· 76 #define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR 77 #endif 78 79 + #define MPT_LINUX_VERSION_COMMON "3.04.18" 80 + #define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.18" 81 #define WHAT_MAGIC_STRING "@" "(" "#" ")" 82 83 #define show_mptmod_ver(s,ver) \
+8
drivers/message/fusion/mptctl.c
··· 597 } 598 599 static int 600 mptctl_fasync(int fd, struct file *filep, int mode) 601 { 602 MPT_ADAPTER *ioc; ··· 2822 .llseek = no_llseek, 2823 .fasync = mptctl_fasync, 2824 .unlocked_ioctl = mptctl_ioctl, 2825 #ifdef CONFIG_COMPAT 2826 .compat_ioctl = compat_mpctl_ioctl, 2827 #endif
··· 597 } 598 599 static int 600 + mptctl_release(struct inode *inode, struct file *filep) 601 + { 602 + fasync_helper(-1, filep, 0, &async_queue); 603 + return 0; 604 + } 605 + 606 + static int 607 mptctl_fasync(int fd, struct file *filep, int mode) 608 { 609 MPT_ADAPTER *ioc; ··· 2815 .llseek = no_llseek, 2816 .fasync = mptctl_fasync, 2817 .unlocked_ioctl = mptctl_ioctl, 2818 + .release = mptctl_release, 2819 #ifdef CONFIG_COMPAT 2820 .compat_ioctl = compat_mpctl_ioctl, 2821 #endif
+4 -3
drivers/message/fusion/mptscsih.c
··· 1873 } 1874 1875 out: 1876 - printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n", 1877 - ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt); 1878 1879 return retval; 1880 } ··· 1912 1913 vdevice = SCpnt->device->hostdata; 1914 if (!vdevice || !vdevice->vtarget) { 1915 - retval = SUCCESS; 1916 goto out; 1917 } 1918
··· 1873 } 1874 1875 out: 1876 + printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p) (sn=%ld)\n", 1877 + ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval, 1878 + SCpnt, SCpnt->serial_number); 1879 1880 return retval; 1881 } ··· 1911 1912 vdevice = SCpnt->device->hostdata; 1913 if (!vdevice || !vdevice->vtarget) { 1914 + retval = 0; 1915 goto out; 1916 } 1917
+3 -2
drivers/scsi/qla2xxx/qla_attr.c
··· 1561 { 1562 struct Scsi_Host *host = rport_to_shost(rport); 1563 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 1564 1565 if (!fcport) 1566 return; ··· 1574 * Transport has effectively 'deleted' the rport, clear 1575 * all local references. 1576 */ 1577 - spin_lock_irq(host->host_lock); 1578 fcport->rport = fcport->drport = NULL; 1579 *((fc_port_t **)rport->dd_data) = NULL; 1580 - spin_unlock_irq(host->host_lock); 1581 1582 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) 1583 return;
··· 1561 { 1562 struct Scsi_Host *host = rport_to_shost(rport); 1563 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 1564 + unsigned long flags; 1565 1566 if (!fcport) 1567 return; ··· 1573 * Transport has effectively 'deleted' the rport, clear 1574 * all local references. 1575 */ 1576 + spin_lock_irqsave(host->host_lock, flags); 1577 fcport->rport = fcport->drport = NULL; 1578 *((fc_port_t **)rport->dd_data) = NULL; 1579 + spin_unlock_irqrestore(host->host_lock, flags); 1580 1581 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) 1582 return;
+6 -4
drivers/scsi/qla2xxx/qla_init.c
··· 2505 { 2506 fc_port_t *fcport = data; 2507 struct fc_rport *rport; 2508 2509 - spin_lock_irq(fcport->vha->host->host_lock); 2510 rport = fcport->drport ? fcport->drport: fcport->rport; 2511 fcport->drport = NULL; 2512 - spin_unlock_irq(fcport->vha->host->host_lock); 2513 if (rport) 2514 fc_remote_port_delete(rport); 2515 } ··· 2880 struct fc_rport_identifiers rport_ids; 2881 struct fc_rport *rport; 2882 struct qla_hw_data *ha = vha->hw; 2883 2884 qla2x00_rport_del(fcport); 2885 ··· 2895 "Unable to allocate fc remote port!\n"); 2896 return; 2897 } 2898 - spin_lock_irq(fcport->vha->host->host_lock); 2899 *((fc_port_t **)rport->dd_data) = fcport; 2900 - spin_unlock_irq(fcport->vha->host->host_lock); 2901 2902 rport->supported_classes = fcport->supported_classes; 2903
··· 2505 { 2506 fc_port_t *fcport = data; 2507 struct fc_rport *rport; 2508 + unsigned long flags; 2509 2510 + spin_lock_irqsave(fcport->vha->host->host_lock, flags); 2511 rport = fcport->drport ? fcport->drport: fcport->rport; 2512 fcport->drport = NULL; 2513 + spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 2514 if (rport) 2515 fc_remote_port_delete(rport); 2516 } ··· 2879 struct fc_rport_identifiers rport_ids; 2880 struct fc_rport *rport; 2881 struct qla_hw_data *ha = vha->hw; 2882 + unsigned long flags; 2883 2884 qla2x00_rport_del(fcport); 2885 ··· 2893 "Unable to allocate fc remote port!\n"); 2894 return; 2895 } 2896 + spin_lock_irqsave(fcport->vha->host->host_lock, flags); 2897 *((fc_port_t **)rport->dd_data) = fcport; 2898 + spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 2899 2900 rport->supported_classes = fcport->supported_classes; 2901
+6 -4
drivers/scsi/qla2xxx/qla_os.c
··· 562 } 563 if (atomic_read(&fcport->state) != FCS_ONLINE) { 564 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 565 - atomic_read(&fcport->state) == FCS_DEVICE_LOST || 566 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 567 cmd->result = DID_NO_CONNECT << 16; 568 goto qc24_fail_command; ··· 2512 { 2513 struct fc_rport *rport; 2514 scsi_qla_host_t *base_vha; 2515 2516 if (!fcport->rport) 2517 return; ··· 2520 rport = fcport->rport; 2521 if (defer) { 2522 base_vha = pci_get_drvdata(vha->hw->pdev); 2523 - spin_lock_irq(vha->host->host_lock); 2524 fcport->drport = rport; 2525 - spin_unlock_irq(vha->host->host_lock); 2526 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); 2527 qla2xxx_wake_dpc(base_vha); 2528 } else ··· 3282 3283 set_user_nice(current, -20); 3284 3285 while (!kthread_should_stop()) { 3286 DEBUG3(printk("qla2x00: DPC handler sleeping\n")); 3287 3288 - set_current_state(TASK_INTERRUPTIBLE); 3289 schedule(); 3290 __set_current_state(TASK_RUNNING); 3291 ··· 3454 qla2x00_do_dpc_all_vps(base_vha); 3455 3456 ha->dpc_active = 0; 3457 } /* End of while(1) */ 3458 3459 DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no)); 3460
··· 562 } 563 if (atomic_read(&fcport->state) != FCS_ONLINE) { 564 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 565 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 566 cmd->result = DID_NO_CONNECT << 16; 567 goto qc24_fail_command; ··· 2513 { 2514 struct fc_rport *rport; 2515 scsi_qla_host_t *base_vha; 2516 + unsigned long flags; 2517 2518 if (!fcport->rport) 2519 return; ··· 2520 rport = fcport->rport; 2521 if (defer) { 2522 base_vha = pci_get_drvdata(vha->hw->pdev); 2523 + spin_lock_irqsave(vha->host->host_lock, flags); 2524 fcport->drport = rport; 2525 + spin_unlock_irqrestore(vha->host->host_lock, flags); 2526 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); 2527 qla2xxx_wake_dpc(base_vha); 2528 } else ··· 3282 3283 set_user_nice(current, -20); 3284 3285 + set_current_state(TASK_INTERRUPTIBLE); 3286 while (!kthread_should_stop()) { 3287 DEBUG3(printk("qla2x00: DPC handler sleeping\n")); 3288 3289 schedule(); 3290 __set_current_state(TASK_RUNNING); 3291 ··· 3454 qla2x00_do_dpc_all_vps(base_vha); 3455 3456 ha->dpc_active = 0; 3457 + set_current_state(TASK_INTERRUPTIBLE); 3458 } /* End of while(1) */ 3459 + __set_current_state(TASK_RUNNING); 3460 3461 DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no)); 3462
+1 -1
drivers/scsi/scsi_debug.c
··· 1671 unsigned long long lba, unsigned int num, int write) 1672 { 1673 int ret; 1674 - unsigned int block, rest = 0; 1675 int (*func)(struct scsi_cmnd *, unsigned char *, int); 1676 1677 func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
··· 1671 unsigned long long lba, unsigned int num, int write) 1672 { 1673 int ret; 1674 + unsigned long long block, rest = 0; 1675 int (*func)(struct scsi_cmnd *, unsigned char *, int); 1676 1677 func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
+1 -2
drivers/target/Makefile
··· 13 target_core_transport.o \ 14 target_core_cdb.o \ 15 target_core_ua.o \ 16 - target_core_rd.o \ 17 - target_core_mib.o 18 19 obj-$(CONFIG_TARGET_CORE) += target_core_mod.o 20
··· 13 target_core_transport.o \ 14 target_core_cdb.o \ 15 target_core_ua.o \ 16 + target_core_rd.o 17 18 obj-$(CONFIG_TARGET_CORE) += target_core_mod.o 19
+85 -70
drivers/target/target_core_configfs.c
··· 37 #include <linux/parser.h> 38 #include <linux/syscalls.h> 39 #include <linux/configfs.h> 40 - #include <linux/proc_fs.h> 41 42 #include <target/target_core_base.h> 43 #include <target/target_core_device.h> ··· 1970 { 1971 struct se_subsystem_dev *se_dev = container_of(to_config_group(item), 1972 struct se_subsystem_dev, se_dev_group); 1973 - struct config_group *dev_cg; 1974 1975 - if (!(se_dev)) 1976 - return; 1977 - 1978 - dev_cg = &se_dev->se_dev_group; 1979 kfree(dev_cg->default_groups); 1980 } 1981 1982 static ssize_t target_core_dev_show(struct config_item *item, ··· 2161 NULL, 2162 }; 2163 2164 static struct configfs_item_operations target_core_alua_lu_gp_ops = { 2165 .show_attribute = target_core_alua_lu_gp_attr_show, 2166 .store_attribute = target_core_alua_lu_gp_attr_store, 2167 }; ··· 2221 printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit" 2222 " Group: core/alua/lu_gps/%s, ID: %hu\n", 2223 config_item_name(item), lu_gp->lu_gp_id); 2224 - 2225 config_item_put(item); 2226 - core_alua_free_lu_gp(lu_gp); 2227 } 2228 2229 static struct configfs_group_operations target_core_alua_lu_gps_group_ops = { ··· 2581 NULL, 2582 }; 2583 2584 static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = { 2585 .show_attribute = target_core_alua_tg_pt_gp_attr_show, 2586 .store_attribute = target_core_alua_tg_pt_gp_attr_store, 2587 }; ··· 2643 printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port" 2644 " Group: alua/tg_pt_gps/%s, ID: %hu\n", 2645 config_item_name(item), tg_pt_gp->tg_pt_gp_id); 2646 - 2647 config_item_put(item); 2648 - core_alua_free_tg_pt_gp(tg_pt_gp); 2649 } 2650 2651 static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = { ··· 2814 struct se_subsystem_api *t; 2815 struct config_item *df_item; 2816 struct config_group *dev_cg, *tg_pt_gp_cg; 2817 - int i, ret; 2818 2819 hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); 2820 2821 - if (mutex_lock_interruptible(&hba->hba_access_mutex)) 2822 - goto out; 2823 - 2824 t = hba->transport; 2825 2826 spin_lock(&se_global->g_device_lock); ··· 2832 config_item_put(df_item); 2833 } 2834 kfree(tg_pt_gp_cg->default_groups); 2835 - core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp); 2836 T10_ALUA(se_dev)->default_tg_pt_gp = NULL; 2837 2838 dev_cg = &se_dev->se_dev_group; ··· 2844 dev_cg->default_groups[i] = NULL; 2845 config_item_put(df_item); 2846 } 2847 - 2848 - config_item_put(item); 2849 /* 2850 - * This pointer will set when the storage is enabled with: 2851 - * `echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable` 2852 */ 2853 - if (se_dev->se_dev_ptr) { 2854 - printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_" 2855 - "virtual_device() for se_dev_ptr: %p\n", 2856 - se_dev->se_dev_ptr); 2857 - 2858 - ret = se_free_virtual_device(se_dev->se_dev_ptr, hba); 2859 - if (ret < 0) 2860 - goto hba_out; 2861 - } else { 2862 - /* 2863 - * Release struct se_subsystem_dev->se_dev_su_ptr.. 2864 - */ 2865 - printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_" 2866 - "device() for se_dev_su_ptr: %p\n", 2867 - se_dev->se_dev_su_ptr); 2868 - 2869 - t->free_device(se_dev->se_dev_su_ptr); 2870 - } 2871 - 2872 - printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem" 2873 - "_dev_t: %p\n", se_dev); 2874 - 2875 - hba_out: 2876 mutex_unlock(&hba->hba_access_mutex); 2877 - out: 2878 - kfree(se_dev); 2879 } 2880 2881 static struct configfs_group_operations target_core_hba_group_ops = { ··· 2932 2933 CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group); 2934 2935 static struct configfs_attribute *target_core_hba_attrs[] = { 2936 &target_core_hba_hba_info.attr, 2937 &target_core_hba_hba_mode.attr, ··· 2946 }; 2947 2948 static struct configfs_item_operations target_core_hba_item_ops = { 2949 .show_attribute = target_core_hba_attr_show, 2950 .store_attribute = target_core_hba_attr_store, 2951 }; ··· 3023 struct config_group *group, 3024 struct config_item *item) 3025 { 3026 - struct se_hba *hba = item_to_hba(item); 3027 - 3028 config_item_put(item); 3029 - core_delete_hba(hba); 3030 } 3031 3032 static struct configfs_group_operations target_core_group_ops = { ··· 3049 struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; 3050 struct config_group *lu_gp_cg = NULL; 3051 struct configfs_subsystem *subsys; 3052 - struct proc_dir_entry *scsi_target_proc = NULL; 3053 struct t10_alua_lu_gp *lu_gp; 3054 int ret; 3055 ··· 3154 if (core_dev_setup_virtual_lun0() < 0) 3155 goto out; 3156 3157 - scsi_target_proc = proc_mkdir("scsi_target", 0); 3158 - if (!(scsi_target_proc)) { 3159 - printk(KERN_ERR "proc_mkdir(scsi_target, 0) failed\n"); 3160 - goto out; 3161 - } 3162 - ret = init_scsi_target_mib(); 3163 - if (ret < 0) 3164 - goto out; 3165 - 3166 return 0; 3167 3168 out: 3169 configfs_unregister_subsystem(subsys); 3170 - if (scsi_target_proc) 3171 - remove_proc_entry("scsi_target", 0); 3172 core_dev_release_virtual_lun0(); 3173 rd_module_exit(); 3174 out_global: ··· 3193 config_item_put(item); 3194 } 3195 kfree(lu_gp_cg->default_groups); 3196 - core_alua_free_lu_gp(se_global->default_lu_gp); 3197 - se_global->default_lu_gp = NULL; 3198 3199 alua_cg = &se_global->alua_group; 3200 for (i = 0; alua_cg->default_groups[i]; i++) { ··· 3202 config_item_put(item); 3203 } 3204 kfree(alua_cg->default_groups); 3205 3206 hba_cg = &se_global->target_core_hbagroup; 3207 for (i = 0; hba_cg->default_groups[i]; i++) { ··· 3211 config_item_put(item); 3212 } 3213 kfree(hba_cg->default_groups); 3214 - 3215 - for (i = 0; subsys->su_group.default_groups[i]; i++) { 3216 - item = &subsys->su_group.default_groups[i]->cg_item; 3217 - subsys->su_group.default_groups[i] = NULL; 3218 - config_item_put(item); 3219 - } 3220 kfree(subsys->su_group.default_groups); 3221 3222 - configfs_unregister_subsystem(subsys); 3223 printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric" 3224 " Infrastructure\n"); 3225 3226 - remove_scsi_target_mib(); 3227 - remove_proc_entry("scsi_target", 0); 3228 core_dev_release_virtual_lun0(); 3229 rd_module_exit(); 3230 release_se_global();
··· 37 #include <linux/parser.h> 38 #include <linux/syscalls.h> 39 #include <linux/configfs.h> 40 41 #include <target/target_core_base.h> 42 #include <target/target_core_device.h> ··· 1971 { 1972 struct se_subsystem_dev *se_dev = container_of(to_config_group(item), 1973 struct se_subsystem_dev, se_dev_group); 1974 + struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); 1975 + struct se_subsystem_api *t = hba->transport; 1976 + struct config_group *dev_cg = &se_dev->se_dev_group; 1977 1978 kfree(dev_cg->default_groups); 1979 + /* 1980 + * This pointer will set when the storage is enabled with: 1981 + *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable` 1982 + */ 1983 + if (se_dev->se_dev_ptr) { 1984 + printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_" 1985 + "virtual_device() for se_dev_ptr: %p\n", 1986 + se_dev->se_dev_ptr); 1987 + 1988 + se_free_virtual_device(se_dev->se_dev_ptr, hba); 1989 + } else { 1990 + /* 1991 + * Release struct se_subsystem_dev->se_dev_su_ptr.. 1992 + */ 1993 + printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_" 1994 + "device() for se_dev_su_ptr: %p\n", 1995 + se_dev->se_dev_su_ptr); 1996 + 1997 + t->free_device(se_dev->se_dev_su_ptr); 1998 + } 1999 + 2000 + printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem" 2001 + "_dev_t: %p\n", se_dev); 2002 + kfree(se_dev); 2003 } 2004 2005 static ssize_t target_core_dev_show(struct config_item *item, ··· 2140 NULL, 2141 }; 2142 2143 + static void target_core_alua_lu_gp_release(struct config_item *item) 2144 + { 2145 + struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), 2146 + struct t10_alua_lu_gp, lu_gp_group); 2147 + 2148 + core_alua_free_lu_gp(lu_gp); 2149 + } 2150 + 2151 static struct configfs_item_operations target_core_alua_lu_gp_ops = { 2152 + .release = target_core_alua_lu_gp_release, 2153 .show_attribute = target_core_alua_lu_gp_attr_show, 2154 .store_attribute = target_core_alua_lu_gp_attr_store, 2155 }; ··· 2191 printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit" 2192 " Group: core/alua/lu_gps/%s, ID: %hu\n", 2193 config_item_name(item), lu_gp->lu_gp_id); 2194 + /* 2195 + * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release() 2196 + * -> target_core_alua_lu_gp_release() 2197 + */ 2198 config_item_put(item); 2199 } 2200 2201 static struct configfs_group_operations target_core_alua_lu_gps_group_ops = { ··· 2549 NULL, 2550 }; 2551 2552 + static void target_core_alua_tg_pt_gp_release(struct config_item *item) 2553 + { 2554 + struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), 2555 + struct t10_alua_tg_pt_gp, tg_pt_gp_group); 2556 + 2557 + core_alua_free_tg_pt_gp(tg_pt_gp); 2558 + } 2559 + 2560 static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = { 2561 + .release = target_core_alua_tg_pt_gp_release, 2562 .show_attribute = target_core_alua_tg_pt_gp_attr_show, 2563 .store_attribute = target_core_alua_tg_pt_gp_attr_store, 2564 }; ··· 2602 printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port" 2603 " Group: alua/tg_pt_gps/%s, ID: %hu\n", 2604 config_item_name(item), tg_pt_gp->tg_pt_gp_id); 2605 + /* 2606 + * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release() 2607 + * -> target_core_alua_tg_pt_gp_release(). 2608 + */ 2609 config_item_put(item); 2610 } 2611 2612 static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = { ··· 2771 struct se_subsystem_api *t; 2772 struct config_item *df_item; 2773 struct config_group *dev_cg, *tg_pt_gp_cg; 2774 + int i; 2775 2776 hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); 2777 2778 + mutex_lock(&hba->hba_access_mutex); 2779 t = hba->transport; 2780 2781 spin_lock(&se_global->g_device_lock); ··· 2791 config_item_put(df_item); 2792 } 2793 kfree(tg_pt_gp_cg->default_groups); 2794 + /* 2795 + * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp 2796 + * directly from target_core_alua_tg_pt_gp_release(). 2797 + */ 2798 T10_ALUA(se_dev)->default_tg_pt_gp = NULL; 2799 2800 dev_cg = &se_dev->se_dev_group; ··· 2800 dev_cg->default_groups[i] = NULL; 2801 config_item_put(df_item); 2802 } 2803 /* 2804 + * The releasing of se_dev and associated se_dev->se_dev_ptr is done 2805 + * from target_core_dev_item_ops->release() ->target_core_dev_release(). 2806 */ 2807 + config_item_put(item); 2808 mutex_unlock(&hba->hba_access_mutex); 2809 } 2810 2811 static struct configfs_group_operations target_core_hba_group_ops = { ··· 2914 2915 CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group); 2916 2917 + static void target_core_hba_release(struct config_item *item) 2918 + { 2919 + struct se_hba *hba = container_of(to_config_group(item), 2920 + struct se_hba, hba_group); 2921 + core_delete_hba(hba); 2922 + } 2923 + 2924 static struct configfs_attribute *target_core_hba_attrs[] = { 2925 &target_core_hba_hba_info.attr, 2926 &target_core_hba_hba_mode.attr, ··· 2921 }; 2922 2923 static struct configfs_item_operations target_core_hba_item_ops = { 2924 + .release = target_core_hba_release, 2925 .show_attribute = target_core_hba_attr_show, 2926 .store_attribute = target_core_hba_attr_store, 2927 }; ··· 2997 struct config_group *group, 2998 struct config_item *item) 2999 { 3000 + /* 3001 + * core_delete_hba() is called from target_core_hba_item_ops->release() 3002 + * -> target_core_hba_release() 3003 + */ 3004 config_item_put(item); 3005 } 3006 3007 static struct configfs_group_operations target_core_group_ops = { ··· 3022 struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; 3023 struct config_group *lu_gp_cg = NULL; 3024 struct configfs_subsystem *subsys; 3025 struct t10_alua_lu_gp *lu_gp; 3026 int ret; 3027 ··· 3128 if (core_dev_setup_virtual_lun0() < 0) 3129 goto out; 3130 3131 return 0; 3132 3133 out: 3134 configfs_unregister_subsystem(subsys); 3135 core_dev_release_virtual_lun0(); 3136 rd_module_exit(); 3137 out_global: ··· 3178 config_item_put(item); 3179 } 3180 kfree(lu_gp_cg->default_groups); 3181 + lu_gp_cg->default_groups = NULL; 3182 3183 alua_cg = &se_global->alua_group; 3184 for (i = 0; alua_cg->default_groups[i]; i++) { ··· 3188 config_item_put(item); 3189 } 3190 kfree(alua_cg->default_groups); 3191 + alua_cg->default_groups = NULL; 3192 3193 hba_cg = &se_global->target_core_hbagroup; 3194 for (i = 0; hba_cg->default_groups[i]; i++) { ··· 3196 config_item_put(item); 3197 } 3198 kfree(hba_cg->default_groups); 3199 + hba_cg->default_groups = NULL; 3200 + /* 3201 + * We expect subsys->su_group.default_groups to be released 3202 + * by configfs subsystem provider logic.. 3203 + */ 3204 + configfs_unregister_subsystem(subsys); 3205 kfree(subsys->su_group.default_groups); 3206 3207 + core_alua_free_lu_gp(se_global->default_lu_gp); 3208 + se_global->default_lu_gp = NULL; 3209 + 3210 printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric" 3211 " Infrastructure\n"); 3212 3213 core_dev_release_virtual_lun0(); 3214 rd_module_exit(); 3215 release_se_global();
+6 -7
drivers/target/target_core_device.c
··· 373 /* 374 * deve->se_lun_acl will be NULL for demo-mode created LUNs 375 * that have not been explictly concerted to MappedLUNs -> 376 - * struct se_lun_acl. 377 */ 378 - if (!(deve->se_lun_acl)) 379 - return 0; 380 - 381 spin_lock_bh(&port->sep_alua_lock); 382 list_del(&deve->alua_port_list); 383 spin_unlock_bh(&port->sep_alua_lock); ··· 395 printk(KERN_ERR "struct se_dev_entry->se_lun_acl" 396 " already set for demo mode -> explict" 397 " LUN ACL transition\n"); 398 return -1; 399 } 400 if (deve->se_lun != lun) { 401 printk(KERN_ERR "struct se_dev_entry->se_lun does" 402 " match passed struct se_lun for demo mode" 403 " -> explict LUN ACL transition\n"); 404 return -1; 405 } 406 deve->se_lun_acl = lun_acl; ··· 867 } 868 } 869 spin_unlock(&hba->device_lock); 870 - 871 - while (atomic_read(&hba->dev_mib_access_count)) 872 - cpu_relax(); 873 } 874 875 int se_dev_check_online(struct se_device *dev)
··· 373 /* 374 * deve->se_lun_acl will be NULL for demo-mode created LUNs 375 * that have not been explictly concerted to MappedLUNs -> 376 + * struct se_lun_acl, but we remove deve->alua_port_list from 377 + * port->sep_alua_list. This also means that active UAs and 378 + * NodeACL context specific PR metadata for demo-mode 379 + * MappedLUN *deve will be released below.. 380 */ 381 spin_lock_bh(&port->sep_alua_lock); 382 list_del(&deve->alua_port_list); 383 spin_unlock_bh(&port->sep_alua_lock); ··· 395 printk(KERN_ERR "struct se_dev_entry->se_lun_acl" 396 " already set for demo mode -> explict" 397 " LUN ACL transition\n"); 398 + spin_unlock_irq(&nacl->device_list_lock); 399 return -1; 400 } 401 if (deve->se_lun != lun) { 402 printk(KERN_ERR "struct se_dev_entry->se_lun does" 403 " match passed struct se_lun for demo mode" 404 " -> explict LUN ACL transition\n"); 405 + spin_unlock_irq(&nacl->device_list_lock); 406 return -1; 407 } 408 deve->se_lun_acl = lun_acl; ··· 865 } 866 } 867 spin_unlock(&hba->device_lock); 868 } 869 870 int se_dev_check_online(struct se_device *dev)
+65 -27
drivers/target/target_core_fabric_configfs.c
··· 214 215 CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group); 216 217 static struct configfs_attribute *target_fabric_mappedlun_attrs[] = { 218 &target_fabric_mappedlun_write_protect.attr, 219 NULL, 220 }; 221 222 static struct configfs_item_operations target_fabric_mappedlun_item_ops = { 223 .show_attribute = target_fabric_mappedlun_attr_show, 224 .store_attribute = target_fabric_mappedlun_attr_store, 225 .allow_link = target_fabric_mappedlun_link, ··· 347 struct config_group *group, 348 struct config_item *item) 349 { 350 - struct se_lun_acl *lacl = container_of(to_config_group(item), 351 - struct se_lun_acl, se_lun_group); 352 - struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg; 353 - 354 config_item_put(item); 355 - core_dev_free_initiator_node_lun_acl(se_tpg, lacl); 356 } 357 358 static struct configfs_item_operations target_fabric_nacl_base_item_ops = { 359 .show_attribute = target_fabric_nacl_base_attr_show, 360 .store_attribute = target_fabric_nacl_base_attr_store, 361 }; ··· 420 struct config_group *group, 421 struct config_item *item) 422 { 423 - struct se_portal_group *se_tpg = container_of(group, 424 - struct se_portal_group, tpg_acl_group); 425 - struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; 426 struct se_node_acl *se_nacl = container_of(to_config_group(item), 427 struct se_node_acl, acl_group); 428 struct config_item *df_item; ··· 432 nacl_cg->default_groups[i] = NULL; 433 config_item_put(df_item); 434 } 435 - 436 config_item_put(item); 437 - tf->tf_ops.fabric_drop_nodeacl(se_nacl); 438 } 439 440 static struct configfs_group_operations target_fabric_nacl_group_ops = { ··· 451 452 CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group); 453 454 static struct configfs_item_operations target_fabric_np_base_item_ops = { 455 .show_attribute = target_fabric_np_base_attr_show, 456 .store_attribute = target_fabric_np_base_attr_store, 457 }; ··· 491 if (!(se_tpg_np) || IS_ERR(se_tpg_np)) 492 return ERR_PTR(-EINVAL); 493 494 config_group_init_type_name(&se_tpg_np->tpg_np_group, name, 495 &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit); 496 ··· 502 struct config_group *group, 503 struct config_item *item) 504 { 505 - struct se_portal_group *se_tpg = container_of(group, 506 - struct se_portal_group, tpg_np_group); 507 - struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; 508 - struct se_tpg_np *se_tpg_np = container_of(to_config_group(item), 509 - struct se_tpg_np, tpg_np_group); 510 - 511 config_item_put(item); 512 - tf->tf_ops.fabric_drop_np(se_tpg_np); 513 } 514 515 static struct configfs_group_operations target_fabric_np_group_ops = { ··· 836 */ 837 CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group); 838 839 static struct configfs_item_operations target_fabric_tpg_base_item_ops = { 840 .show_attribute = target_fabric_tpg_attr_show, 841 .store_attribute = target_fabric_tpg_attr_store, 842 }; ··· 905 struct config_group *group, 906 struct config_item *item) 907 { 908 - struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group); 909 - struct target_fabric_configfs *tf = wwn->wwn_tf; 910 struct se_portal_group *se_tpg = container_of(to_config_group(item), 911 struct se_portal_group, tpg_group); 912 struct config_group *tpg_cg = &se_tpg->tpg_group; ··· 921 } 922 923 config_item_put(item); 924 - tf->tf_ops.fabric_drop_tpg(se_tpg); 925 } 926 927 static struct configfs_group_operations target_fabric_tpg_group_ops = { 928 .make_group = target_fabric_make_tpg, 929 .drop_item = target_fabric_drop_tpg, 930 }; 931 932 - TF_CIT_SETUP(tpg, NULL, &target_fabric_tpg_group_ops, NULL); 933 934 /* End of tfc_tpg_cit */ 935 ··· 976 struct config_group *group, 977 struct config_item *item) 978 { 979 - struct target_fabric_configfs *tf = container_of(group, 980 - struct target_fabric_configfs, tf_group); 981 - struct se_wwn *wwn = container_of(to_config_group(item), 982 - struct se_wwn, wwn_group); 983 - 984 config_item_put(item); 985 - tf->tf_ops.fabric_drop_wwn(wwn); 986 } 987 988 static struct configfs_group_operations target_fabric_wwn_group_ops = {
··· 214 215 CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group); 216 217 + static void target_fabric_mappedlun_release(struct config_item *item) 218 + { 219 + struct se_lun_acl *lacl = container_of(to_config_group(item), 220 + struct se_lun_acl, se_lun_group); 221 + struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg; 222 + 223 + core_dev_free_initiator_node_lun_acl(se_tpg, lacl); 224 + } 225 + 226 static struct configfs_attribute *target_fabric_mappedlun_attrs[] = { 227 &target_fabric_mappedlun_write_protect.attr, 228 NULL, 229 }; 230 231 static struct configfs_item_operations target_fabric_mappedlun_item_ops = { 232 + .release = target_fabric_mappedlun_release, 233 .show_attribute = target_fabric_mappedlun_attr_show, 234 .store_attribute = target_fabric_mappedlun_attr_store, 235 .allow_link = target_fabric_mappedlun_link, ··· 337 struct config_group *group, 338 struct config_item *item) 339 { 340 config_item_put(item); 341 + } 342 + 343 + static void target_fabric_nacl_base_release(struct config_item *item) 344 + { 345 + struct se_node_acl *se_nacl = container_of(to_config_group(item), 346 + struct se_node_acl, acl_group); 347 + struct se_portal_group *se_tpg = se_nacl->se_tpg; 348 + struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; 349 + 350 + tf->tf_ops.fabric_drop_nodeacl(se_nacl); 351 } 352 353 static struct configfs_item_operations target_fabric_nacl_base_item_ops = { 354 + .release = target_fabric_nacl_base_release, 355 .show_attribute = target_fabric_nacl_base_attr_show, 356 .store_attribute = target_fabric_nacl_base_attr_store, 357 }; ··· 404 struct config_group *group, 405 struct config_item *item) 406 { 407 struct se_node_acl *se_nacl = container_of(to_config_group(item), 408 struct se_node_acl, acl_group); 409 struct config_item *df_item; ··· 419 nacl_cg->default_groups[i] = NULL; 420 config_item_put(df_item); 421 } 422 + /* 423 + * struct se_node_acl free is done in target_fabric_nacl_base_release() 424 + */ 425 config_item_put(item); 426 } 427 428 static struct configfs_group_operations target_fabric_nacl_group_ops = { ··· 437 438 CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group); 439 440 + static void target_fabric_np_base_release(struct config_item *item) 441 + { 442 + struct se_tpg_np *se_tpg_np = container_of(to_config_group(item), 443 + struct se_tpg_np, tpg_np_group); 444 + struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent; 445 + struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; 446 + 447 + tf->tf_ops.fabric_drop_np(se_tpg_np); 448 + } 449 + 450 static struct configfs_item_operations target_fabric_np_base_item_ops = { 451 + .release = target_fabric_np_base_release, 452 .show_attribute = target_fabric_np_base_attr_show, 453 .store_attribute = target_fabric_np_base_attr_store, 454 }; ··· 466 if (!(se_tpg_np) || IS_ERR(se_tpg_np)) 467 return ERR_PTR(-EINVAL); 468 469 + se_tpg_np->tpg_np_parent = se_tpg; 470 config_group_init_type_name(&se_tpg_np->tpg_np_group, name, 471 &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit); 472 ··· 476 struct config_group *group, 477 struct config_item *item) 478 { 479 + /* 480 + * struct se_tpg_np is released via target_fabric_np_base_release() 481 + */ 482 config_item_put(item); 483 } 484 485 static struct configfs_group_operations target_fabric_np_group_ops = { ··· 814 */ 815 CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group); 816 817 + static void target_fabric_tpg_release(struct config_item *item) 818 + { 819 + struct se_portal_group *se_tpg = container_of(to_config_group(item), 820 + struct se_portal_group, tpg_group); 821 + struct se_wwn *wwn = se_tpg->se_tpg_wwn; 822 + struct target_fabric_configfs *tf = wwn->wwn_tf; 823 + 824 + tf->tf_ops.fabric_drop_tpg(se_tpg); 825 + } 826 + 827 static struct configfs_item_operations target_fabric_tpg_base_item_ops = { 828 + .release = target_fabric_tpg_release, 829 .show_attribute = target_fabric_tpg_attr_show, 830 .store_attribute = target_fabric_tpg_attr_store, 831 }; ··· 872 struct config_group *group, 873 struct config_item *item) 874 { 875 struct se_portal_group *se_tpg = container_of(to_config_group(item), 876 struct se_portal_group, tpg_group); 877 struct config_group *tpg_cg = &se_tpg->tpg_group; ··· 890 } 891 892 config_item_put(item); 893 } 894 + 895 + static void target_fabric_release_wwn(struct config_item *item) 896 + { 897 + struct se_wwn *wwn = container_of(to_config_group(item), 898 + struct se_wwn, wwn_group); 899 + struct target_fabric_configfs *tf = wwn->wwn_tf; 900 + 901 + tf->tf_ops.fabric_drop_wwn(wwn); 902 + } 903 + 904 + static struct configfs_item_operations target_fabric_tpg_item_ops = { 905 + .release = target_fabric_release_wwn, 906 + }; 907 908 static struct configfs_group_operations target_fabric_tpg_group_ops = { 909 .make_group = target_fabric_make_tpg, 910 .drop_item = target_fabric_drop_tpg, 911 }; 912 913 + TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops, 914 + NULL); 915 916 /* End of tfc_tpg_cit */ 917 ··· 932 struct config_group *group, 933 struct config_item *item) 934 { 935 config_item_put(item); 936 } 937 938 static struct configfs_group_operations target_fabric_wwn_group_ops = {
+5 -3
drivers/target/target_core_iblock.c
··· 154 155 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, 156 FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); 157 - if (!(bd)) 158 goto failed; 159 /* 160 * Setup the local scope queue_limits from struct request_queue->limits ··· 220 { 221 struct iblock_dev *ib_dev = p; 222 223 - blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 224 - bioset_free(ib_dev->ibd_bio_set); 225 kfree(ib_dev); 226 } 227
··· 154 155 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, 156 FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); 157 + if (IS_ERR(bd)) 158 goto failed; 159 /* 160 * Setup the local scope queue_limits from struct request_queue->limits ··· 220 { 221 struct iblock_dev *ib_dev = p; 222 223 + if (ib_dev->ibd_bd != NULL) 224 + blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 225 + if (ib_dev->ibd_bio_set != NULL) 226 + bioset_free(ib_dev->ibd_bio_set); 227 kfree(ib_dev); 228 } 229
-1078
drivers/target/target_core_mib.c
··· 1 - /******************************************************************************* 2 - * Filename: target_core_mib.c 3 - * 4 - * Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved. 5 - * Copyright (c) 2007-2010 Rising Tide Systems 6 - * Copyright (c) 2008-2010 Linux-iSCSI.org 7 - * 8 - * Nicholas A. Bellinger <nab@linux-iscsi.org> 9 - * 10 - * This program is free software; you can redistribute it and/or modify 11 - * it under the terms of the GNU General Public License as published by 12 - * the Free Software Foundation; either version 2 of the License, or 13 - * (at your option) any later version. 14 - * 15 - * This program is distributed in the hope that it will be useful, 16 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 - * GNU General Public License for more details. 19 - * 20 - * You should have received a copy of the GNU General Public License 21 - * along with this program; if not, write to the Free Software 22 - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 - * 24 - ******************************************************************************/ 25 - 26 - 27 - #include <linux/kernel.h> 28 - #include <linux/module.h> 29 - #include <linux/delay.h> 30 - #include <linux/timer.h> 31 - #include <linux/string.h> 32 - #include <linux/version.h> 33 - #include <generated/utsrelease.h> 34 - #include <linux/utsname.h> 35 - #include <linux/proc_fs.h> 36 - #include <linux/seq_file.h> 37 - #include <linux/blkdev.h> 38 - #include <scsi/scsi.h> 39 - #include <scsi/scsi_device.h> 40 - #include <scsi/scsi_host.h> 41 - 42 - #include <target/target_core_base.h> 43 - #include <target/target_core_transport.h> 44 - #include <target/target_core_fabric_ops.h> 45 - #include <target/target_core_configfs.h> 46 - 47 - #include "target_core_hba.h" 48 - #include "target_core_mib.h" 49 - 50 - /* SCSI mib table index */ 51 - static struct scsi_index_table scsi_index_table; 52 - 53 - #ifndef INITIAL_JIFFIES 54 - #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) 55 - #endif 56 - 57 - /* SCSI Instance Table */ 58 - #define SCSI_INST_SW_INDEX 1 59 - #define SCSI_TRANSPORT_INDEX 1 60 - 61 - #define NONE "None" 62 - #define ISPRINT(a) ((a >= ' ') && (a <= '~')) 63 - 64 - static inline int list_is_first(const struct list_head *list, 65 - const struct list_head *head) 66 - { 67 - return list->prev == head; 68 - } 69 - 70 - static void *locate_hba_start( 71 - struct seq_file *seq, 72 - loff_t *pos) 73 - { 74 - spin_lock(&se_global->g_device_lock); 75 - return seq_list_start(&se_global->g_se_dev_list, *pos); 76 - } 77 - 78 - static void *locate_hba_next( 79 - struct seq_file *seq, 80 - void *v, 81 - loff_t *pos) 82 - { 83 - return seq_list_next(v, &se_global->g_se_dev_list, pos); 84 - } 85 - 86 - static void locate_hba_stop(struct seq_file *seq, void *v) 87 - { 88 - spin_unlock(&se_global->g_device_lock); 89 - } 90 - 91 - /**************************************************************************** 92 - * SCSI MIB Tables 93 - ****************************************************************************/ 94 - 95 - /* 96 - * SCSI Instance Table 97 - */ 98 - static void *scsi_inst_seq_start( 99 - struct seq_file *seq, 100 - loff_t *pos) 101 - { 102 - spin_lock(&se_global->hba_lock); 103 - return seq_list_start(&se_global->g_hba_list, *pos); 104 - } 105 - 106 - static void *scsi_inst_seq_next( 107 - struct seq_file *seq, 108 - void *v, 109 - loff_t *pos) 110 - { 111 - return seq_list_next(v, &se_global->g_hba_list, pos); 112 - } 113 - 114 - static void scsi_inst_seq_stop(struct seq_file *seq, void *v) 115 - { 116 - spin_unlock(&se_global->hba_lock); 117 - } 118 - 119 - static int scsi_inst_seq_show(struct seq_file *seq, void *v) 120 - { 121 - struct se_hba *hba = list_entry(v, struct se_hba, hba_list); 122 - 123 - if (list_is_first(&hba->hba_list, &se_global->g_hba_list)) 124 - seq_puts(seq, "inst sw_indx\n"); 125 - 126 - seq_printf(seq, "%u %u\n", hba->hba_index, SCSI_INST_SW_INDEX); 127 - seq_printf(seq, "plugin: %s version: %s\n", 128 - hba->transport->name, TARGET_CORE_VERSION); 129 - 130 - return 0; 131 - } 132 - 133 - static const struct seq_operations scsi_inst_seq_ops = { 134 - .start = scsi_inst_seq_start, 135 - .next = scsi_inst_seq_next, 136 - .stop = scsi_inst_seq_stop, 137 - .show = scsi_inst_seq_show 138 - }; 139 - 140 - static int scsi_inst_seq_open(struct inode *inode, struct file *file) 141 - { 142 - return seq_open(file, &scsi_inst_seq_ops); 143 - } 144 - 145 - static const struct file_operations scsi_inst_seq_fops = { 146 - .owner = THIS_MODULE, 147 - .open = scsi_inst_seq_open, 148 - .read = seq_read, 149 - .llseek = seq_lseek, 150 - .release = seq_release, 151 - }; 152 - 153 - /* 154 - * SCSI Device Table 155 - */ 156 - static void *scsi_dev_seq_start(struct seq_file *seq, loff_t *pos) 157 - { 158 - return locate_hba_start(seq, pos); 159 - } 160 - 161 - static void *scsi_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) 162 - { 163 - return locate_hba_next(seq, v, pos); 164 - } 165 - 166 - static void scsi_dev_seq_stop(struct seq_file *seq, void *v) 167 - { 168 - locate_hba_stop(seq, v); 169 - } 170 - 171 - static int scsi_dev_seq_show(struct seq_file *seq, void *v) 172 - { 173 - struct se_hba *hba; 174 - struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, 175 - g_se_dev_list); 176 - struct se_device *dev = se_dev->se_dev_ptr; 177 - char str[28]; 178 - int k; 179 - 180 - if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) 181 - seq_puts(seq, "inst indx role ports\n"); 182 - 183 - if (!(dev)) 184 - return 0; 185 - 186 - hba = dev->se_hba; 187 - if (!(hba)) { 188 - /* Log error ? */ 189 - return 0; 190 - } 191 - 192 - seq_printf(seq, "%u %u %s %u\n", hba->hba_index, 193 - dev->dev_index, "Target", dev->dev_port_count); 194 - 195 - memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); 196 - 197 - /* vendor */ 198 - for (k = 0; k < 8; k++) 199 - str[k] = ISPRINT(DEV_T10_WWN(dev)->vendor[k]) ? 200 - DEV_T10_WWN(dev)->vendor[k] : 0x20; 201 - str[k] = 0x20; 202 - 203 - /* model */ 204 - for (k = 0; k < 16; k++) 205 - str[k+9] = ISPRINT(DEV_T10_WWN(dev)->model[k]) ? 206 - DEV_T10_WWN(dev)->model[k] : 0x20; 207 - str[k + 9] = 0; 208 - 209 - seq_printf(seq, "dev_alias: %s\n", str); 210 - 211 - return 0; 212 - } 213 - 214 - static const struct seq_operations scsi_dev_seq_ops = { 215 - .start = scsi_dev_seq_start, 216 - .next = scsi_dev_seq_next, 217 - .stop = scsi_dev_seq_stop, 218 - .show = scsi_dev_seq_show 219 - }; 220 - 221 - static int scsi_dev_seq_open(struct inode *inode, struct file *file) 222 - { 223 - return seq_open(file, &scsi_dev_seq_ops); 224 - } 225 - 226 - static const struct file_operations scsi_dev_seq_fops = { 227 - .owner = THIS_MODULE, 228 - .open = scsi_dev_seq_open, 229 - .read = seq_read, 230 - .llseek = seq_lseek, 231 - .release = seq_release, 232 - }; 233 - 234 - /* 235 - * SCSI Port Table 236 - */ 237 - static void *scsi_port_seq_start(struct seq_file *seq, loff_t *pos) 238 - { 239 - return locate_hba_start(seq, pos); 240 - } 241 - 242 - static void *scsi_port_seq_next(struct seq_file *seq, void *v, loff_t *pos) 243 - { 244 - return locate_hba_next(seq, v, pos); 245 - } 246 - 247 - static void scsi_port_seq_stop(struct seq_file *seq, void *v) 248 - { 249 - locate_hba_stop(seq, v); 250 - } 251 - 252 - static int scsi_port_seq_show(struct seq_file *seq, void *v) 253 - { 254 - struct se_hba *hba; 255 - struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, 256 - g_se_dev_list); 257 - struct se_device *dev = se_dev->se_dev_ptr; 258 - struct se_port *sep, *sep_tmp; 259 - 260 - if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) 261 - seq_puts(seq, "inst device indx role busy_count\n"); 262 - 263 - if (!(dev)) 264 - return 0; 265 - 266 - hba = dev->se_hba; 267 - if (!(hba)) { 268 - /* Log error ? */ 269 - return 0; 270 - } 271 - 272 - /* FIXME: scsiPortBusyStatuses count */ 273 - spin_lock(&dev->se_port_lock); 274 - list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) { 275 - seq_printf(seq, "%u %u %u %s%u %u\n", hba->hba_index, 276 - dev->dev_index, sep->sep_index, "Device", 277 - dev->dev_index, 0); 278 - } 279 - spin_unlock(&dev->se_port_lock); 280 - 281 - return 0; 282 - } 283 - 284 - static const struct seq_operations scsi_port_seq_ops = { 285 - .start = scsi_port_seq_start, 286 - .next = scsi_port_seq_next, 287 - .stop = scsi_port_seq_stop, 288 - .show = scsi_port_seq_show 289 - }; 290 - 291 - static int scsi_port_seq_open(struct inode *inode, struct file *file) 292 - { 293 - return seq_open(file, &scsi_port_seq_ops); 294 - } 295 - 296 - static const struct file_operations scsi_port_seq_fops = { 297 - .owner = THIS_MODULE, 298 - .open = scsi_port_seq_open, 299 - .read = seq_read, 300 - .llseek = seq_lseek, 301 - .release = seq_release, 302 - }; 303 - 304 - /* 305 - * SCSI Transport Table 306 - */ 307 - static void *scsi_transport_seq_start(struct seq_file *seq, loff_t *pos) 308 - { 309 - return locate_hba_start(seq, pos); 310 - } 311 - 312 - static void *scsi_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos) 313 - { 314 - return locate_hba_next(seq, v, pos); 315 - } 316 - 317 - static void scsi_transport_seq_stop(struct seq_file *seq, void *v) 318 - { 319 - locate_hba_stop(seq, v); 320 - } 321 - 322 - static int scsi_transport_seq_show(struct seq_file *seq, void *v) 323 - { 324 - struct se_hba *hba; 325 - struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, 326 - g_se_dev_list); 327 - struct se_device *dev = se_dev->se_dev_ptr; 328 - struct se_port *se, *se_tmp; 329 - struct se_portal_group *tpg; 330 - struct t10_wwn *wwn; 331 - char buf[64]; 332 - 333 - if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) 334 - seq_puts(seq, "inst device indx dev_name\n"); 335 - 336 - if (!(dev)) 337 - return 0; 338 - 339 - hba = dev->se_hba; 340 - if (!(hba)) { 341 - /* Log error ? */ 342 - return 0; 343 - } 344 - 345 - wwn = DEV_T10_WWN(dev); 346 - 347 - spin_lock(&dev->se_port_lock); 348 - list_for_each_entry_safe(se, se_tmp, &dev->dev_sep_list, sep_list) { 349 - tpg = se->sep_tpg; 350 - sprintf(buf, "scsiTransport%s", 351 - TPG_TFO(tpg)->get_fabric_name()); 352 - 353 - seq_printf(seq, "%u %s %u %s+%s\n", 354 - hba->hba_index, /* scsiTransportIndex */ 355 - buf, /* scsiTransportType */ 356 - (TPG_TFO(tpg)->tpg_get_inst_index != NULL) ? 357 - TPG_TFO(tpg)->tpg_get_inst_index(tpg) : 358 - 0, 359 - TPG_TFO(tpg)->tpg_get_wwn(tpg), 360 - (strlen(wwn->unit_serial)) ? 361 - /* scsiTransportDevName */ 362 - wwn->unit_serial : wwn->vendor); 363 - } 364 - spin_unlock(&dev->se_port_lock); 365 - 366 - return 0; 367 - } 368 - 369 - static const struct seq_operations scsi_transport_seq_ops = { 370 - .start = scsi_transport_seq_start, 371 - .next = scsi_transport_seq_next, 372 - .stop = scsi_transport_seq_stop, 373 - .show = scsi_transport_seq_show 374 - }; 375 - 376 - static int scsi_transport_seq_open(struct inode *inode, struct file *file) 377 - { 378 - return seq_open(file, &scsi_transport_seq_ops); 379 - } 380 - 381 - static const struct file_operations scsi_transport_seq_fops = { 382 - .owner = THIS_MODULE, 383 - .open = scsi_transport_seq_open, 384 - .read = seq_read, 385 - .llseek = seq_lseek, 386 - .release = seq_release, 387 - }; 388 - 389 - /* 390 - * SCSI Target Device Table 391 - */ 392 - static void *scsi_tgt_dev_seq_start(struct seq_file *seq, loff_t *pos) 393 - { 394 - return locate_hba_start(seq, pos); 395 - } 396 - 397 - static void *scsi_tgt_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) 398 - { 399 - return locate_hba_next(seq, v, pos); 400 - } 401 - 402 - static void scsi_tgt_dev_seq_stop(struct seq_file *seq, void *v) 403 - { 404 - locate_hba_stop(seq, v); 405 - } 406 - 407 - 408 - #define LU_COUNT 1 /* for now */ 409 - static int scsi_tgt_dev_seq_show(struct seq_file *seq, void *v) 410 - { 411 - struct se_hba *hba; 412 - struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, 413 - g_se_dev_list); 414 - struct se_device *dev = se_dev->se_dev_ptr; 415 - int non_accessible_lus = 0; 416 - char status[16]; 417 - 418 - if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) 419 - seq_puts(seq, "inst indx num_LUs status non_access_LUs" 420 - " resets\n"); 421 - 422 - if (!(dev)) 423 - return 0; 424 - 425 - hba = dev->se_hba; 426 - if (!(hba)) { 427 - /* Log error ? */ 428 - return 0; 429 - } 430 - 431 - switch (dev->dev_status) { 432 - case TRANSPORT_DEVICE_ACTIVATED: 433 - strcpy(status, "activated"); 434 - break; 435 - case TRANSPORT_DEVICE_DEACTIVATED: 436 - strcpy(status, "deactivated"); 437 - non_accessible_lus = 1; 438 - break; 439 - case TRANSPORT_DEVICE_SHUTDOWN: 440 - strcpy(status, "shutdown"); 441 - non_accessible_lus = 1; 442 - break; 443 - case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: 444 - case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: 445 - strcpy(status, "offline"); 446 - non_accessible_lus = 1; 447 - break; 448 - default: 449 - sprintf(status, "unknown(%d)", dev->dev_status); 450 - non_accessible_lus = 1; 451 - } 452 - 453 - seq_printf(seq, "%u %u %u %s %u %u\n", 454 - hba->hba_index, dev->dev_index, LU_COUNT, 455 - status, non_accessible_lus, dev->num_resets); 456 - 457 - return 0; 458 - } 459 - 460 - static const struct seq_operations scsi_tgt_dev_seq_ops = { 461 - .start = scsi_tgt_dev_seq_start, 462 - .next = scsi_tgt_dev_seq_next, 463 - .stop = scsi_tgt_dev_seq_stop, 464 - .show = scsi_tgt_dev_seq_show 465 - }; 466 - 467 - static int scsi_tgt_dev_seq_open(struct inode *inode, struct file *file) 468 - { 469 - return seq_open(file, &scsi_tgt_dev_seq_ops); 470 - } 471 - 472 - static const struct file_operations scsi_tgt_dev_seq_fops = { 473 - .owner = THIS_MODULE, 474 - .open = scsi_tgt_dev_seq_open, 475 - .read = seq_read, 476 - .llseek = seq_lseek, 477 - .release = seq_release, 478 - }; 479 - 480 - /* 481 - * SCSI Target Port Table 482 - */ 483 - static void *scsi_tgt_port_seq_start(struct seq_file *seq, loff_t *pos) 484 - { 485 - return locate_hba_start(seq, pos); 486 - } 487 - 488 - static void *scsi_tgt_port_seq_next(struct seq_file *seq, void *v, loff_t *pos) 489 - { 490 - return locate_hba_next(seq, v, pos); 491 - } 492 - 493 - static void scsi_tgt_port_seq_stop(struct seq_file *seq, void *v) 494 - { 495 - locate_hba_stop(seq, v); 496 - } 497 - 498 - static int scsi_tgt_port_seq_show(struct seq_file *seq, void *v) 499 - { 500 - struct se_hba *hba; 501 - struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, 502 - g_se_dev_list); 503 - struct se_device *dev = se_dev->se_dev_ptr; 504 - struct se_port *sep, *sep_tmp; 505 - struct se_portal_group *tpg; 506 - u32 rx_mbytes, tx_mbytes; 507 - unsigned long long num_cmds; 508 - char buf[64]; 509 - 510 - if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) 511 - seq_puts(seq, "inst device indx name port_index in_cmds" 512 - " write_mbytes read_mbytes hs_in_cmds\n"); 513 - 514 - if (!(dev)) 515 - return 0; 516 - 517 - hba = dev->se_hba; 518 - if (!(hba)) { 519 - /* Log error ? */ 520 - return 0; 521 - } 522 - 523 - spin_lock(&dev->se_port_lock); 524 - list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) { 525 - tpg = sep->sep_tpg; 526 - sprintf(buf, "%sPort#", 527 - TPG_TFO(tpg)->get_fabric_name()); 528 - 529 - seq_printf(seq, "%u %u %u %s%d %s%s%d ", 530 - hba->hba_index, 531 - dev->dev_index, 532 - sep->sep_index, 533 - buf, sep->sep_index, 534 - TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+", 535 - TPG_TFO(tpg)->tpg_get_tag(tpg)); 536 - 537 - spin_lock(&sep->sep_lun->lun_sep_lock); 538 - num_cmds = sep->sep_stats.cmd_pdus; 539 - rx_mbytes = (sep->sep_stats.rx_data_octets >> 20); 540 - tx_mbytes = (sep->sep_stats.tx_data_octets >> 20); 541 - spin_unlock(&sep->sep_lun->lun_sep_lock); 542 - 543 - seq_printf(seq, "%llu %u %u %u\n", num_cmds, 544 - rx_mbytes, tx_mbytes, 0); 545 - } 546 - spin_unlock(&dev->se_port_lock); 547 - 548 - return 0; 549 - } 550 - 551 - static const struct seq_operations scsi_tgt_port_seq_ops = { 552 - .start = scsi_tgt_port_seq_start, 553 - .next = scsi_tgt_port_seq_next, 554 - .stop = scsi_tgt_port_seq_stop, 555 - .show = scsi_tgt_port_seq_show 556 - }; 557 - 558 - static int scsi_tgt_port_seq_open(struct inode *inode, struct file *file) 559 - { 560 - return seq_open(file, &scsi_tgt_port_seq_ops); 561 - } 562 - 563 - static const struct file_operations scsi_tgt_port_seq_fops = { 564 - .owner = THIS_MODULE, 565 - .open = scsi_tgt_port_seq_open, 566 - .read = seq_read, 567 - .llseek = seq_lseek, 568 - .release = seq_release, 569 - }; 570 - 571 - /* 572 - * SCSI Authorized Initiator Table: 573 - * It contains the SCSI Initiators authorized to be attached to one of the 574 - * local Target ports. 575 - * Iterates through all active TPGs and extracts the info from the ACLs 576 - */ 577 - static void *scsi_auth_intr_seq_start(struct seq_file *seq, loff_t *pos) 578 - { 579 - spin_lock_bh(&se_global->se_tpg_lock); 580 - return seq_list_start(&se_global->g_se_tpg_list, *pos); 581 - } 582 - 583 - static void *scsi_auth_intr_seq_next(struct seq_file *seq, void *v, 584 - loff_t *pos) 585 - { 586 - return seq_list_next(v, &se_global->g_se_tpg_list, pos); 587 - } 588 - 589 - static void scsi_auth_intr_seq_stop(struct seq_file *seq, void *v) 590 - { 591 - spin_unlock_bh(&se_global->se_tpg_lock); 592 - } 593 - 594 - static int scsi_auth_intr_seq_show(struct seq_file *seq, void *v) 595 - { 596 - struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group, 597 - se_tpg_list); 598 - struct se_dev_entry *deve; 599 - struct se_lun *lun; 600 - struct se_node_acl *se_nacl; 601 - int j; 602 - 603 - if (list_is_first(&se_tpg->se_tpg_list, 604 - &se_global->g_se_tpg_list)) 605 - seq_puts(seq, "inst dev port indx dev_or_port intr_name " 606 - "map_indx att_count num_cmds read_mbytes " 607 - "write_mbytes hs_num_cmds creation_time row_status\n"); 608 - 609 - if (!(se_tpg)) 610 - return 0; 611 - 612 - spin_lock(&se_tpg->acl_node_lock); 613 - list_for_each_entry(se_nacl, &se_tpg->acl_node_list, acl_list) { 614 - 615 - atomic_inc(&se_nacl->mib_ref_count); 616 - smp_mb__after_atomic_inc(); 617 - spin_unlock(&se_tpg->acl_node_lock); 618 - 619 - spin_lock_irq(&se_nacl->device_list_lock); 620 - for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) { 621 - deve = &se_nacl->device_list[j]; 622 - if (!(deve->lun_flags & 623 - TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) || 624 - (!deve->se_lun)) 625 - continue; 626 - lun = deve->se_lun; 627 - if (!lun->lun_se_dev) 628 - continue; 629 - 630 - seq_printf(seq, "%u %u %u %u %u %s %u %u %u %u %u %u" 631 - " %u %s\n", 632 - /* scsiInstIndex */ 633 - (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ? 634 - TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) : 635 - 0, 636 - /* scsiDeviceIndex */ 637 - lun->lun_se_dev->dev_index, 638 - /* scsiAuthIntrTgtPortIndex */ 639 - TPG_TFO(se_tpg)->tpg_get_tag(se_tpg), 640 - /* scsiAuthIntrIndex */ 641 - se_nacl->acl_index, 642 - /* scsiAuthIntrDevOrPort */ 643 - 1, 644 - /* scsiAuthIntrName */ 645 - se_nacl->initiatorname[0] ? 646 - se_nacl->initiatorname : NONE, 647 - /* FIXME: scsiAuthIntrLunMapIndex */ 648 - 0, 649 - /* scsiAuthIntrAttachedTimes */ 650 - deve->attach_count, 651 - /* scsiAuthIntrOutCommands */ 652 - deve->total_cmds, 653 - /* scsiAuthIntrReadMegaBytes */ 654 - (u32)(deve->read_bytes >> 20), 655 - /* scsiAuthIntrWrittenMegaBytes */ 656 - (u32)(deve->write_bytes >> 20), 657 - /* FIXME: scsiAuthIntrHSOutCommands */ 658 - 0, 659 - /* scsiAuthIntrLastCreation */ 660 - (u32)(((u32)deve->creation_time - 661 - INITIAL_JIFFIES) * 100 / HZ), 662 - /* FIXME: scsiAuthIntrRowStatus */ 663 - "Ready"); 664 - } 665 - spin_unlock_irq(&se_nacl->device_list_lock); 666 - 667 - spin_lock(&se_tpg->acl_node_lock); 668 - atomic_dec(&se_nacl->mib_ref_count); 669 - smp_mb__after_atomic_dec(); 670 - } 671 - spin_unlock(&se_tpg->acl_node_lock); 672 - 673 - return 0; 674 - } 675 - 676 - static const struct seq_operations scsi_auth_intr_seq_ops = { 677 - .start = scsi_auth_intr_seq_start, 678 - .next = scsi_auth_intr_seq_next, 679 - .stop = scsi_auth_intr_seq_stop, 680 - .show = scsi_auth_intr_seq_show 681 - }; 682 - 683 - static int scsi_auth_intr_seq_open(struct inode *inode, struct file *file) 684 - { 685 - return seq_open(file, &scsi_auth_intr_seq_ops); 686 - } 687 - 688 - static const struct file_operations scsi_auth_intr_seq_fops = { 689 - .owner = THIS_MODULE, 690 - .open = scsi_auth_intr_seq_open, 691 - .read = seq_read, 692 - .llseek = seq_lseek, 693 - .release = seq_release, 694 - }; 695 - 696 - /* 697 - * SCSI Attached Initiator Port Table: 698 - * It lists the SCSI Initiators attached to one of the local Target ports. 699 - * Iterates through all active TPGs and use active sessions from each TPG 700 - * to list the info fo this table. 701 - */ 702 - static void *scsi_att_intr_port_seq_start(struct seq_file *seq, loff_t *pos) 703 - { 704 - spin_lock_bh(&se_global->se_tpg_lock); 705 - return seq_list_start(&se_global->g_se_tpg_list, *pos); 706 - } 707 - 708 - static void *scsi_att_intr_port_seq_next(struct seq_file *seq, void *v, 709 - loff_t *pos) 710 - { 711 - return seq_list_next(v, &se_global->g_se_tpg_list, pos); 712 - } 713 - 714 - static void scsi_att_intr_port_seq_stop(struct seq_file *seq, void *v) 715 - { 716 - spin_unlock_bh(&se_global->se_tpg_lock); 717 - } 718 - 719 - static int scsi_att_intr_port_seq_show(struct seq_file *seq, void *v) 720 - { 721 - struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group, 722 - se_tpg_list); 723 - struct se_dev_entry *deve; 724 - struct se_lun *lun; 725 - struct se_node_acl *se_nacl; 726 - struct se_session *se_sess; 727 - unsigned char buf[64]; 728 - int j; 729 - 730 - if (list_is_first(&se_tpg->se_tpg_list, 731 - &se_global->g_se_tpg_list)) 732 - seq_puts(seq, "inst dev port indx port_auth_indx port_name" 733 - " port_ident\n"); 734 - 735 - if (!(se_tpg)) 736 - return 0; 737 - 738 - spin_lock(&se_tpg->session_lock); 739 - list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { 740 - if ((TPG_TFO(se_tpg)->sess_logged_in(se_sess)) || 741 - (!se_sess->se_node_acl) || 742 - (!se_sess->se_node_acl->device_list)) 743 - continue; 744 - 745 - atomic_inc(&se_sess->mib_ref_count); 746 - smp_mb__after_atomic_inc(); 747 - se_nacl = se_sess->se_node_acl; 748 - atomic_inc(&se_nacl->mib_ref_count); 749 - smp_mb__after_atomic_inc(); 750 - spin_unlock(&se_tpg->session_lock); 751 - 752 - spin_lock_irq(&se_nacl->device_list_lock); 753 - for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) { 754 - deve = &se_nacl->device_list[j]; 755 - if (!(deve->lun_flags & 756 - TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) || 757 - (!deve->se_lun)) 758 - continue; 759 - 760 - lun = deve->se_lun; 761 - if (!lun->lun_se_dev) 762 - continue; 763 - 764 - memset(buf, 0, 64); 765 - if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) 766 - TPG_TFO(se_tpg)->sess_get_initiator_sid( 767 - se_sess, (unsigned char *)&buf[0], 64); 768 - 769 - seq_printf(seq, "%u %u %u %u %u %s+i+%s\n", 770 - /* scsiInstIndex */ 771 - (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ? 772 - TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) : 773 - 0, 774 - /* scsiDeviceIndex */ 775 - lun->lun_se_dev->dev_index, 776 - /* scsiPortIndex */ 777 - TPG_TFO(se_tpg)->tpg_get_tag(se_tpg), 778 - /* scsiAttIntrPortIndex */ 779 - (TPG_TFO(se_tpg)->sess_get_index != NULL) ? 780 - TPG_TFO(se_tpg)->sess_get_index(se_sess) : 781 - 0, 782 - /* scsiAttIntrPortAuthIntrIdx */ 783 - se_nacl->acl_index, 784 - /* scsiAttIntrPortName */ 785 - se_nacl->initiatorname[0] ? 786 - se_nacl->initiatorname : NONE, 787 - /* scsiAttIntrPortIdentifier */ 788 - buf); 789 - } 790 - spin_unlock_irq(&se_nacl->device_list_lock); 791 - 792 - spin_lock(&se_tpg->session_lock); 793 - atomic_dec(&se_nacl->mib_ref_count); 794 - smp_mb__after_atomic_dec(); 795 - atomic_dec(&se_sess->mib_ref_count); 796 - smp_mb__after_atomic_dec(); 797 - } 798 - spin_unlock(&se_tpg->session_lock); 799 - 800 - return 0; 801 - } 802 - 803 - static const struct seq_operations scsi_att_intr_port_seq_ops = { 804 - .start = scsi_att_intr_port_seq_start, 805 - .next = scsi_att_intr_port_seq_next, 806 - .stop = scsi_att_intr_port_seq_stop, 807 - .show = scsi_att_intr_port_seq_show 808 - }; 809 - 810 - static int scsi_att_intr_port_seq_open(struct inode *inode, struct file *file) 811 - { 812 - return seq_open(file, &scsi_att_intr_port_seq_ops); 813 - } 814 - 815 - static const struct file_operations scsi_att_intr_port_seq_fops = { 816 - .owner = THIS_MODULE, 817 - .open = scsi_att_intr_port_seq_open, 818 - .read = seq_read, 819 - .llseek = seq_lseek, 820 - .release = seq_release, 821 - }; 822 - 823 - /* 824 - * SCSI Logical Unit Table 825 - */ 826 - static void *scsi_lu_seq_start(struct seq_file *seq, loff_t *pos) 827 - { 828 - return locate_hba_start(seq, pos); 829 - } 830 - 831 - static void *scsi_lu_seq_next(struct seq_file *seq, void *v, loff_t *pos) 832 - { 833 - return locate_hba_next(seq, v, pos); 834 - } 835 - 836 - static void scsi_lu_seq_stop(struct seq_file *seq, void *v) 837 - { 838 - locate_hba_stop(seq, v); 839 - } 840 - 841 - #define SCSI_LU_INDEX 1 842 - static int scsi_lu_seq_show(struct seq_file *seq, void *v) 843 - { 844 - struct se_hba *hba; 845 - struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, 846 - g_se_dev_list); 847 - struct se_device *dev = se_dev->se_dev_ptr; 848 - int j; 849 - char str[28]; 850 - 851 - if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) 852 - seq_puts(seq, "inst dev indx LUN lu_name vend prod rev" 853 - " dev_type status state-bit num_cmds read_mbytes" 854 - " write_mbytes resets full_stat hs_num_cmds creation_time\n"); 855 - 856 - if (!(dev)) 857 - return 0; 858 - 859 - hba = dev->se_hba; 860 - if (!(hba)) { 861 - /* Log error ? */ 862 - return 0; 863 - } 864 - 865 - /* Fix LU state, if we can read it from the device */ 866 - seq_printf(seq, "%u %u %u %llu %s", hba->hba_index, 867 - dev->dev_index, SCSI_LU_INDEX, 868 - (unsigned long long)0, /* FIXME: scsiLuDefaultLun */ 869 - (strlen(DEV_T10_WWN(dev)->unit_serial)) ? 870 - /* scsiLuWwnName */ 871 - (char *)&DEV_T10_WWN(dev)->unit_serial[0] : 872 - "None"); 873 - 874 - memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); 875 - /* scsiLuVendorId */ 876 - for (j = 0; j < 8; j++) 877 - str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ? 878 - DEV_T10_WWN(dev)->vendor[j] : 0x20; 879 - str[8] = 0; 880 - seq_printf(seq, " %s", str); 881 - 882 - /* scsiLuProductId */ 883 - for (j = 0; j < 16; j++) 884 - str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ? 885 - DEV_T10_WWN(dev)->model[j] : 0x20; 886 - str[16] = 0; 887 - seq_printf(seq, " %s", str); 888 - 889 - /* scsiLuRevisionId */ 890 - for (j = 0; j < 4; j++) 891 - str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ? 892 - DEV_T10_WWN(dev)->revision[j] : 0x20; 893 - str[4] = 0; 894 - seq_printf(seq, " %s", str); 895 - 896 - seq_printf(seq, " %u %s %s %llu %u %u %u %u %u %u\n", 897 - /* scsiLuPeripheralType */ 898 - TRANSPORT(dev)->get_device_type(dev), 899 - (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ? 900 - "available" : "notavailable", /* scsiLuStatus */ 901 - "exposed", /* scsiLuState */ 902 - (unsigned long long)dev->num_cmds, 903 - /* scsiLuReadMegaBytes */ 904 - (u32)(dev->read_bytes >> 20), 905 - /* scsiLuWrittenMegaBytes */ 906 - (u32)(dev->write_bytes >> 20), 907 - dev->num_resets, /* scsiLuInResets */ 908 - 0, /* scsiLuOutTaskSetFullStatus */ 909 - 0, /* scsiLuHSInCommands */ 910 - (u32)(((u32)dev->creation_time - INITIAL_JIFFIES) * 911 - 100 / HZ)); 912 - 913 - return 0; 914 - } 915 - 916 - static const struct seq_operations scsi_lu_seq_ops = { 917 - .start = scsi_lu_seq_start, 918 - .next = scsi_lu_seq_next, 919 - .stop = scsi_lu_seq_stop, 920 - .show = scsi_lu_seq_show 921 - }; 922 - 923 - static int scsi_lu_seq_open(struct inode *inode, struct file *file) 924 - { 925 - return seq_open(file, &scsi_lu_seq_ops); 926 - } 927 - 928 - static const struct file_operations scsi_lu_seq_fops = { 929 - .owner = THIS_MODULE, 930 - .open = scsi_lu_seq_open, 931 - .read = seq_read, 932 - .llseek = seq_lseek, 933 - .release = seq_release, 934 - }; 935 - 936 - /****************************************************************************/ 937 - 938 - /* 939 - * Remove proc fs entries 940 - */ 941 - void remove_scsi_target_mib(void) 942 - { 943 - remove_proc_entry("scsi_target/mib/scsi_inst", NULL); 944 - remove_proc_entry("scsi_target/mib/scsi_dev", NULL); 945 - remove_proc_entry("scsi_target/mib/scsi_port", NULL); 946 - remove_proc_entry("scsi_target/mib/scsi_transport", NULL); 947 - remove_proc_entry("scsi_target/mib/scsi_tgt_dev", NULL); 948 - remove_proc_entry("scsi_target/mib/scsi_tgt_port", NULL); 949 - remove_proc_entry("scsi_target/mib/scsi_auth_intr", NULL); 950 - remove_proc_entry("scsi_target/mib/scsi_att_intr_port", NULL); 951 - remove_proc_entry("scsi_target/mib/scsi_lu", NULL); 952 - remove_proc_entry("scsi_target/mib", NULL); 953 - } 954 - 955 - /* 956 - * Create proc fs entries for the mib tables 957 - */ 958 - int init_scsi_target_mib(void) 959 - { 960 - struct proc_dir_entry *dir_entry; 961 - struct proc_dir_entry *scsi_inst_entry; 962 - struct proc_dir_entry *scsi_dev_entry; 963 - struct proc_dir_entry *scsi_port_entry; 964 - struct proc_dir_entry *scsi_transport_entry; 965 - struct proc_dir_entry *scsi_tgt_dev_entry; 966 - struct proc_dir_entry *scsi_tgt_port_entry; 967 - struct proc_dir_entry *scsi_auth_intr_entry; 968 - struct proc_dir_entry *scsi_att_intr_port_entry; 969 - struct proc_dir_entry *scsi_lu_entry; 970 - 971 - dir_entry = proc_mkdir("scsi_target/mib", NULL); 972 - if (!(dir_entry)) { 973 - printk(KERN_ERR "proc_mkdir() failed.\n"); 974 - return -1; 975 - } 976 - 977 - scsi_inst_entry = 978 - create_proc_entry("scsi_target/mib/scsi_inst", 0, NULL); 979 - if (scsi_inst_entry) 980 - scsi_inst_entry->proc_fops = &scsi_inst_seq_fops; 981 - else 982 - goto error; 983 - 984 - scsi_dev_entry = 985 - create_proc_entry("scsi_target/mib/scsi_dev", 0, NULL); 986 - if (scsi_dev_entry) 987 - scsi_dev_entry->proc_fops = &scsi_dev_seq_fops; 988 - else 989 - goto error; 990 - 991 - scsi_port_entry = 992 - create_proc_entry("scsi_target/mib/scsi_port", 0, NULL); 993 - if (scsi_port_entry) 994 - scsi_port_entry->proc_fops = &scsi_port_seq_fops; 995 - else 996 - goto error; 997 - 998 - scsi_transport_entry = 999 - create_proc_entry("scsi_target/mib/scsi_transport", 0, NULL); 1000 - if (scsi_transport_entry) 1001 - scsi_transport_entry->proc_fops = &scsi_transport_seq_fops; 1002 - else 1003 - goto error; 1004 - 1005 - scsi_tgt_dev_entry = 1006 - create_proc_entry("scsi_target/mib/scsi_tgt_dev", 0, NULL); 1007 - if (scsi_tgt_dev_entry) 1008 - scsi_tgt_dev_entry->proc_fops = &scsi_tgt_dev_seq_fops; 1009 - else 1010 - goto error; 1011 - 1012 - scsi_tgt_port_entry = 1013 - create_proc_entry("scsi_target/mib/scsi_tgt_port", 0, NULL); 1014 - if (scsi_tgt_port_entry) 1015 - scsi_tgt_port_entry->proc_fops = &scsi_tgt_port_seq_fops; 1016 - else 1017 - goto error; 1018 - 1019 - scsi_auth_intr_entry = 1020 - create_proc_entry("scsi_target/mib/scsi_auth_intr", 0, NULL); 1021 - if (scsi_auth_intr_entry) 1022 - scsi_auth_intr_entry->proc_fops = &scsi_auth_intr_seq_fops; 1023 - else 1024 - goto error; 1025 - 1026 - scsi_att_intr_port_entry = 1027 - create_proc_entry("scsi_target/mib/scsi_att_intr_port", 0, NULL); 1028 - if (scsi_att_intr_port_entry) 1029 - scsi_att_intr_port_entry->proc_fops = 1030 - &scsi_att_intr_port_seq_fops; 1031 - else 1032 - goto error; 1033 - 1034 - scsi_lu_entry = create_proc_entry("scsi_target/mib/scsi_lu", 0, NULL); 1035 - if (scsi_lu_entry) 1036 - scsi_lu_entry->proc_fops = &scsi_lu_seq_fops; 1037 - else 1038 - goto error; 1039 - 1040 - return 0; 1041 - 1042 - error: 1043 - printk(KERN_ERR "create_proc_entry() failed.\n"); 1044 - remove_scsi_target_mib(); 1045 - return -1; 1046 - } 1047 - 1048 - /* 1049 - * Initialize the index table for allocating unique row indexes to various mib 1050 - * tables 1051 - */ 1052 - void init_scsi_index_table(void) 1053 - { 1054 - memset(&scsi_index_table, 0, sizeof(struct scsi_index_table)); 1055 - spin_lock_init(&scsi_index_table.lock); 1056 - } 1057 - 1058 - /* 1059 - * Allocate a new row index for the entry type specified 1060 - */ 1061 - u32 scsi_get_new_index(scsi_index_t type) 1062 - { 1063 - u32 new_index; 1064 - 1065 - if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) { 1066 - printk(KERN_ERR "Invalid index type %d\n", type); 1067 - return -1; 1068 - } 1069 - 1070 - spin_lock(&scsi_index_table.lock); 1071 - new_index = ++scsi_index_table.scsi_mib_index[type]; 1072 - if (new_index == 0) 1073 - new_index = ++scsi_index_table.scsi_mib_index[type]; 1074 - spin_unlock(&scsi_index_table.lock); 1075 - 1076 - return new_index; 1077 - } 1078 - EXPORT_SYMBOL(scsi_get_new_index);
···
-28
drivers/target/target_core_mib.h
··· 1 - #ifndef TARGET_CORE_MIB_H 2 - #define TARGET_CORE_MIB_H 3 - 4 - typedef enum { 5 - SCSI_INST_INDEX, 6 - SCSI_DEVICE_INDEX, 7 - SCSI_AUTH_INTR_INDEX, 8 - SCSI_INDEX_TYPE_MAX 9 - } scsi_index_t; 10 - 11 - struct scsi_index_table { 12 - spinlock_t lock; 13 - u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 14 - } ____cacheline_aligned; 15 - 16 - /* SCSI Port stats */ 17 - struct scsi_port_stats { 18 - u64 cmd_pdus; 19 - u64 tx_data_octets; 20 - u64 rx_data_octets; 21 - } ____cacheline_aligned; 22 - 23 - extern int init_scsi_target_mib(void); 24 - extern void remove_scsi_target_mib(void); 25 - extern void init_scsi_index_table(void); 26 - extern u32 scsi_get_new_index(scsi_index_t); 27 - 28 - #endif /*** TARGET_CORE_MIB_H ***/
···
+2 -2
drivers/target/target_core_pscsi.c
··· 462 */ 463 bd = blkdev_get_by_path(se_dev->se_dev_udev_path, 464 FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); 465 - if (!(bd)) { 466 - printk("pSCSI: blkdev_get_by_path() failed\n"); 467 scsi_device_put(sd); 468 return NULL; 469 }
··· 462 */ 463 bd = blkdev_get_by_path(se_dev->se_dev_udev_path, 464 FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); 465 + if (IS_ERR(bd)) { 466 + printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n"); 467 scsi_device_put(sd); 468 return NULL; 469 }
+21 -8
drivers/target/target_core_tpg.c
··· 275 spin_lock_init(&acl->device_list_lock); 276 spin_lock_init(&acl->nacl_sess_lock); 277 atomic_set(&acl->acl_pr_ref_count, 0); 278 - atomic_set(&acl->mib_ref_count, 0); 279 acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg); 280 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 281 acl->se_tpg = tpg; ··· 314 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl) 315 { 316 while (atomic_read(&nacl->acl_pr_ref_count) != 0) 317 - cpu_relax(); 318 - } 319 - 320 - void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl) 321 - { 322 - while (atomic_read(&nacl->mib_ref_count) != 0) 323 cpu_relax(); 324 } 325 ··· 473 spin_unlock_bh(&tpg->session_lock); 474 475 core_tpg_wait_for_nacl_pr_ref(acl); 476 - core_tpg_wait_for_mib_ref(acl); 477 core_clear_initiator_node_from_tpg(acl, tpg); 478 core_free_device_list_for_node(acl, tpg); 479 ··· 693 694 int core_tpg_deregister(struct se_portal_group *se_tpg) 695 { 696 printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group" 697 " for endpoint: %s Portal Tag %u\n", 698 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? ··· 708 709 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) 710 cpu_relax(); 711 712 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) 713 core_tpg_release_virtual_lun0(se_tpg);
··· 275 spin_lock_init(&acl->device_list_lock); 276 spin_lock_init(&acl->nacl_sess_lock); 277 atomic_set(&acl->acl_pr_ref_count, 0); 278 acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg); 279 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 280 acl->se_tpg = tpg; ··· 315 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl) 316 { 317 while (atomic_read(&nacl->acl_pr_ref_count) != 0) 318 cpu_relax(); 319 } 320 ··· 480 spin_unlock_bh(&tpg->session_lock); 481 482 core_tpg_wait_for_nacl_pr_ref(acl); 483 core_clear_initiator_node_from_tpg(acl, tpg); 484 core_free_device_list_for_node(acl, tpg); 485 ··· 701 702 int core_tpg_deregister(struct se_portal_group *se_tpg) 703 { 704 + struct se_node_acl *nacl, *nacl_tmp; 705 + 706 printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group" 707 " for endpoint: %s Portal Tag %u\n", 708 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? ··· 714 715 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) 716 cpu_relax(); 717 + /* 718 + * Release any remaining demo-mode generated se_node_acl that have 719 + * not been released because of TFO->tpg_check_demo_mode_cache() == 1 720 + * in transport_deregister_session(). 721 + */ 722 + spin_lock_bh(&se_tpg->acl_node_lock); 723 + list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, 724 + acl_list) { 725 + list_del(&nacl->acl_list); 726 + se_tpg->num_node_acls--; 727 + spin_unlock_bh(&se_tpg->acl_node_lock); 728 + 729 + core_tpg_wait_for_nacl_pr_ref(nacl); 730 + core_free_device_list_for_node(nacl, se_tpg); 731 + TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl); 732 + 733 + spin_lock_bh(&se_tpg->acl_node_lock); 734 + } 735 + spin_unlock_bh(&se_tpg->acl_node_lock); 736 737 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) 738 core_tpg_release_virtual_lun0(se_tpg);
+39 -9
drivers/target/target_core_transport.c
··· 379 se_global = NULL; 380 } 381 382 void transport_init_queue_obj(struct se_queue_obj *qobj) 383 { 384 atomic_set(&qobj->queue_cnt, 0); ··· 471 } 472 INIT_LIST_HEAD(&se_sess->sess_list); 473 INIT_LIST_HEAD(&se_sess->sess_acl_list); 474 - atomic_set(&se_sess->mib_ref_count, 0); 475 476 return se_sess; 477 } ··· 579 transport_free_session(se_sess); 580 return; 581 } 582 - /* 583 - * Wait for possible reference in drivers/target/target_core_mib.c: 584 - * scsi_att_intr_port_seq_show() 585 - */ 586 - while (atomic_read(&se_sess->mib_ref_count) != 0) 587 - cpu_relax(); 588 589 spin_lock_bh(&se_tpg->session_lock); 590 list_del(&se_sess->sess_list); ··· 601 spin_unlock_bh(&se_tpg->acl_node_lock); 602 603 core_tpg_wait_for_nacl_pr_ref(se_nacl); 604 - core_tpg_wait_for_mib_ref(se_nacl); 605 core_free_device_list_for_node(se_nacl, se_tpg); 606 TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, 607 se_nacl); ··· 4853 4854 return ret; 4855 } 4856 /* 4857 * This is the normal path for all normal non BIDI and BIDI-COMMAND 4858 * WRITE payloads.. If we need to do BIDI READ passthrough for ··· 5036 struct se_mem *se_mem = NULL, *se_mem_lout = NULL; 5037 u32 se_mem_cnt = 0, task_offset = 0; 5038 5039 - BUG_ON(list_empty(cmd->t_task->t_mem_list)); 5040 5041 ret = transport_do_se_mem_map(dev, task, 5042 cmd->t_task->t_mem_list, NULL, se_mem,
··· 379 se_global = NULL; 380 } 381 382 + /* SCSI statistics table index */ 383 + static struct scsi_index_table scsi_index_table; 384 + 385 + /* 386 + * Initialize the index table for allocating unique row indexes to various mib 387 + * tables. 388 + */ 389 + void init_scsi_index_table(void) 390 + { 391 + memset(&scsi_index_table, 0, sizeof(struct scsi_index_table)); 392 + spin_lock_init(&scsi_index_table.lock); 393 + } 394 + 395 + /* 396 + * Allocate a new row index for the entry type specified 397 + */ 398 + u32 scsi_get_new_index(scsi_index_t type) 399 + { 400 + u32 new_index; 401 + 402 + if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) { 403 + printk(KERN_ERR "Invalid index type %d\n", type); 404 + return -EINVAL; 405 + } 406 + 407 + spin_lock(&scsi_index_table.lock); 408 + new_index = ++scsi_index_table.scsi_mib_index[type]; 409 + if (new_index == 0) 410 + new_index = ++scsi_index_table.scsi_mib_index[type]; 411 + spin_unlock(&scsi_index_table.lock); 412 + 413 + return new_index; 414 + } 415 + 416 void transport_init_queue_obj(struct se_queue_obj *qobj) 417 { 418 atomic_set(&qobj->queue_cnt, 0); ··· 437 } 438 INIT_LIST_HEAD(&se_sess->sess_list); 439 INIT_LIST_HEAD(&se_sess->sess_acl_list); 440 441 return se_sess; 442 } ··· 546 transport_free_session(se_sess); 547 return; 548 } 549 550 spin_lock_bh(&se_tpg->session_lock); 551 list_del(&se_sess->sess_list); ··· 574 spin_unlock_bh(&se_tpg->acl_node_lock); 575 576 core_tpg_wait_for_nacl_pr_ref(se_nacl); 577 core_free_device_list_for_node(se_nacl, se_tpg); 578 TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, 579 se_nacl); ··· 4827 4828 return ret; 4829 } 4830 + 4831 + BUG_ON(list_empty(se_mem_list)); 4832 /* 4833 * This is the normal path for all normal non BIDI and BIDI-COMMAND 4834 * WRITE payloads.. If we need to do BIDI READ passthrough for ··· 5008 struct se_mem *se_mem = NULL, *se_mem_lout = NULL; 5009 u32 se_mem_cnt = 0, task_offset = 0; 5010 5011 + if (!list_empty(T_TASK(cmd)->t_mem_list)) 5012 + se_mem = list_entry(T_TASK(cmd)->t_mem_list->next, 5013 + struct se_mem, se_list); 5014 5015 ret = transport_do_se_mem_map(dev, task, 5016 cmd->t_task->t_mem_list, NULL, se_mem,
+22 -6
include/target/target_core_base.h
··· 8 #include <scsi/scsi_cmnd.h> 9 #include <net/sock.h> 10 #include <net/tcp.h> 11 - #include "target_core_mib.h" 12 13 #define TARGET_CORE_MOD_VERSION "v4.0.0-rc6" 14 #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) ··· 193 SAM_TASK_ATTR_UNTAGGED, 194 SAM_TASK_ATTR_EMULATED 195 } t10_task_attr_index_t; 196 197 struct se_cmd; 198 ··· 592 spinlock_t stats_lock; 593 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 594 atomic_t acl_pr_ref_count; 595 - /* Used for MIB access */ 596 - atomic_t mib_ref_count; 597 struct se_dev_entry *device_list; 598 struct se_session *nacl_sess; 599 struct se_portal_group *se_tpg; ··· 607 } ____cacheline_aligned; 608 609 struct se_session { 610 - /* Used for MIB access */ 611 - atomic_t mib_ref_count; 612 u64 sess_bin_isid; 613 struct se_node_acl *se_node_acl; 614 struct se_portal_group *se_tpg; ··· 816 /* Virtual iSCSI devices attached. */ 817 u32 dev_count; 818 u32 hba_index; 819 - atomic_t dev_mib_access_count; 820 atomic_t load_balance_queue; 821 atomic_t left_queue_depth; 822 /* Maximum queue depth the HBA can handle. */ ··· 854 855 #define SE_LUN(c) ((struct se_lun *)(c)->se_lun) 856 857 struct se_port { 858 /* RELATIVE TARGET PORT IDENTIFER */ 859 u16 sep_rtpi; ··· 882 } ____cacheline_aligned; 883 884 struct se_tpg_np { 885 struct config_group tpg_np_group; 886 } ____cacheline_aligned; 887
··· 8 #include <scsi/scsi_cmnd.h> 9 #include <net/sock.h> 10 #include <net/tcp.h> 11 12 #define TARGET_CORE_MOD_VERSION "v4.0.0-rc6" 13 #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) ··· 194 SAM_TASK_ATTR_UNTAGGED, 195 SAM_TASK_ATTR_EMULATED 196 } t10_task_attr_index_t; 197 + 198 + /* 199 + * Used for target SCSI statistics 200 + */ 201 + typedef enum { 202 + SCSI_INST_INDEX, 203 + SCSI_DEVICE_INDEX, 204 + SCSI_AUTH_INTR_INDEX, 205 + SCSI_INDEX_TYPE_MAX 206 + } scsi_index_t; 207 + 208 + struct scsi_index_table { 209 + spinlock_t lock; 210 + u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 211 + } ____cacheline_aligned; 212 213 struct se_cmd; 214 ··· 578 spinlock_t stats_lock; 579 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 580 atomic_t acl_pr_ref_count; 581 struct se_dev_entry *device_list; 582 struct se_session *nacl_sess; 583 struct se_portal_group *se_tpg; ··· 595 } ____cacheline_aligned; 596 597 struct se_session { 598 u64 sess_bin_isid; 599 struct se_node_acl *se_node_acl; 600 struct se_portal_group *se_tpg; ··· 806 /* Virtual iSCSI devices attached. */ 807 u32 dev_count; 808 u32 hba_index; 809 atomic_t load_balance_queue; 810 atomic_t left_queue_depth; 811 /* Maximum queue depth the HBA can handle. */ ··· 845 846 #define SE_LUN(c) ((struct se_lun *)(c)->se_lun) 847 848 + struct scsi_port_stats { 849 + u64 cmd_pdus; 850 + u64 tx_data_octets; 851 + u64 rx_data_octets; 852 + } ____cacheline_aligned; 853 + 854 struct se_port { 855 /* RELATIVE TARGET PORT IDENTIFER */ 856 u16 sep_rtpi; ··· 867 } ____cacheline_aligned; 868 869 struct se_tpg_np { 870 + struct se_portal_group *tpg_np_parent; 871 struct config_group tpg_np_group; 872 } ____cacheline_aligned; 873
+2
include/target/target_core_transport.h
··· 111 112 extern int init_se_global(void); 113 extern void release_se_global(void); 114 extern void transport_init_queue_obj(struct se_queue_obj *); 115 extern int transport_subsystem_check_init(void); 116 extern int transport_subsystem_register(struct se_subsystem_api *);
··· 111 112 extern int init_se_global(void); 113 extern void release_se_global(void); 114 + extern void init_scsi_index_table(void); 115 + extern u32 scsi_get_new_index(scsi_index_t); 116 extern void transport_init_queue_obj(struct se_queue_obj *); 117 extern int transport_subsystem_check_init(void); 118 extern int transport_subsystem_register(struct se_subsystem_api *);