Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

target: use 'se_dev_entry' when allocating UAs

We need to use 'se_dev_entry' as argument when allocating
UAs, otherwise we'll never see any UAs for an implicit
ALUA state transition triggered from userspace.

(Add target_ua_allocate_lun() common caller - nab)

Signed-off-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>

authored by

Hannes Reinecke and committed by
Nicholas Bellinger
c51c8e7b 31605813

+55 -43
+19 -10
drivers/target/target_core_alua.c
··· 972 972 list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) { 973 973 lacl = rcu_dereference_check(se_deve->se_lun_acl, 974 974 lockdep_is_held(&lun->lun_deve_lock)); 975 - /* 976 - * se_deve->se_lun_acl pointer may be NULL for a 977 - * entry created without explicit Node+MappedLUN ACLs 978 - */ 979 - if (!lacl) 980 - continue; 981 975 976 + /* 977 + * spc4r37 p.242: 978 + * After an explicit target port asymmetric access 979 + * state change, a device server shall establish a 980 + * unit attention condition with the additional sense 981 + * code set to ASYMMETRIC ACCESS STATE CHANGED for 982 + * the initiator port associated with every I_T nexus 983 + * other than the I_T nexus on which the SET TARGET 984 + * PORT GROUPS command was received. 985 + */ 982 986 if ((tg_pt_gp->tg_pt_gp_alua_access_status == 983 987 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && 984 - (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) && 985 - (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) && 986 988 (tg_pt_gp->tg_pt_gp_alua_lun != NULL) && 987 989 (tg_pt_gp->tg_pt_gp_alua_lun == lun)) 988 990 continue; 989 991 990 - core_scsi3_ua_allocate(lacl->se_lun_nacl, 991 - se_deve->mapped_lun, 0x2A, 992 + /* 993 + * se_deve->se_lun_acl pointer may be NULL for a 994 + * entry created without explicit Node+MappedLUN ACLs 995 + */ 996 + if (lacl && (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) && 997 + (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl)) 998 + continue; 999 + 1000 + core_scsi3_ua_allocate(se_deve, 0x2A, 992 1001 ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED); 993 1002 } 994 1003 spin_unlock_bh(&lun->lun_deve_lock);
+6 -6
drivers/target/target_core_pr.c
··· 2197 2197 &pr_tmpl->registration_list, 2198 2198 pr_reg_list) { 2199 2199 2200 - core_scsi3_ua_allocate( 2200 + target_ua_allocate_lun( 2201 2201 pr_reg_p->pr_reg_nacl, 2202 2202 pr_reg_p->pr_res_mapped_lun, 2203 2203 0x2A, ··· 2624 2624 if (pr_reg_p == pr_reg) 2625 2625 continue; 2626 2626 2627 - core_scsi3_ua_allocate(pr_reg_p->pr_reg_nacl, 2627 + target_ua_allocate_lun(pr_reg_p->pr_reg_nacl, 2628 2628 pr_reg_p->pr_res_mapped_lun, 2629 2629 0x2A, ASCQ_2AH_RESERVATIONS_RELEASED); 2630 2630 } ··· 2709 2709 * additional sense code set to RESERVATIONS PREEMPTED. 2710 2710 */ 2711 2711 if (!calling_it_nexus) 2712 - core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 2712 + target_ua_allocate_lun(pr_reg_nacl, pr_res_mapped_lun, 2713 2713 0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED); 2714 2714 } 2715 2715 spin_unlock(&pr_tmpl->registration_lock); ··· 2918 2918 NULL, 0); 2919 2919 } 2920 2920 if (!calling_it_nexus) 2921 - core_scsi3_ua_allocate(pr_reg_nacl, 2921 + target_ua_allocate_lun(pr_reg_nacl, 2922 2922 pr_res_mapped_lun, 0x2A, 2923 2923 ASCQ_2AH_REGISTRATIONS_PREEMPTED); 2924 2924 } ··· 3024 3024 * persistent reservation and/or registration, with the 3025 3025 * additional sense code set to REGISTRATIONS PREEMPTED; 3026 3026 */ 3027 - core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A, 3027 + target_ua_allocate_lun(pr_reg_nacl, pr_res_mapped_lun, 0x2A, 3028 3028 ASCQ_2AH_REGISTRATIONS_PREEMPTED); 3029 3029 } 3030 3030 spin_unlock(&pr_tmpl->registration_lock); ··· 3057 3057 if (calling_it_nexus) 3058 3058 continue; 3059 3059 3060 - core_scsi3_ua_allocate(pr_reg->pr_reg_nacl, 3060 + target_ua_allocate_lun(pr_reg->pr_reg_nacl, 3061 3061 pr_reg->pr_res_mapped_lun, 0x2A, 3062 3062 ASCQ_2AH_RESERVATIONS_RELEASED); 3063 3063 }
+6 -6
drivers/target/target_core_transport.c
··· 1677 1677 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1678 1678 */ 1679 1679 if (cmd->se_sess && 1680 - cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) 1681 - core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, 1682 - cmd->orig_fe_lun, 0x2C, 1683 - ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1684 - 1680 + cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) { 1681 + target_ua_allocate_lun(cmd->se_sess->se_node_acl, 1682 + cmd->orig_fe_lun, 0x2C, 1683 + ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1684 + } 1685 1685 trace_target_cmd_complete(cmd); 1686 - ret = cmd->se_tfo-> queue_status(cmd); 1686 + ret = cmd->se_tfo->queue_status(cmd); 1687 1687 if (ret == -EAGAIN || ret == -ENOMEM) 1688 1688 goto queue_full; 1689 1689 goto check_stop;
+22 -20
drivers/target/target_core_ua.c
··· 87 87 } 88 88 89 89 int core_scsi3_ua_allocate( 90 - struct se_node_acl *nacl, 91 - u64 unpacked_lun, 90 + struct se_dev_entry *deve, 92 91 u8 asc, 93 92 u8 ascq) 94 93 { 95 - struct se_dev_entry *deve; 96 94 struct se_ua *ua, *ua_p, *ua_tmp; 97 - /* 98 - * PASSTHROUGH OPS 99 - */ 100 - if (!nacl) 101 - return -EINVAL; 102 95 103 96 ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC); 104 97 if (!ua) { ··· 103 110 ua->ua_asc = asc; 104 111 ua->ua_ascq = ascq; 105 112 106 - rcu_read_lock(); 107 - deve = target_nacl_find_deve(nacl, unpacked_lun); 108 - if (!deve) { 109 - rcu_read_unlock(); 110 - return -EINVAL; 111 - } 112 113 spin_lock(&deve->ua_lock); 113 114 list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) { 114 115 /* ··· 110 123 */ 111 124 if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) { 112 125 spin_unlock(&deve->ua_lock); 113 - rcu_read_unlock(); 114 126 kmem_cache_free(se_ua_cache, ua); 115 127 return 0; 116 128 } ··· 156 170 spin_unlock(&deve->ua_lock); 157 171 158 172 atomic_inc_mb(&deve->ua_count); 159 - rcu_read_unlock(); 160 173 return 0; 161 174 } 162 175 list_add_tail(&ua->ua_nacl_list, &deve->ua_list); 163 176 spin_unlock(&deve->ua_lock); 164 177 165 - pr_debug("[%s]: Allocated UNIT ATTENTION, mapped LUN: %llu, ASC:" 166 - " 0x%02x, ASCQ: 0x%02x\n", 167 - nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 178 + pr_debug("Allocated UNIT ATTENTION, mapped LUN: %llu, ASC:" 179 + " 0x%02x, ASCQ: 0x%02x\n", deve->mapped_lun, 168 180 asc, ascq); 169 181 170 182 atomic_inc_mb(&deve->ua_count); 171 - rcu_read_unlock(); 172 183 return 0; 184 + } 185 + 186 + void target_ua_allocate_lun(struct se_node_acl *nacl, 187 + u32 unpacked_lun, u8 asc, u8 ascq) 188 + { 189 + struct se_dev_entry *deve; 190 + 191 + if (!nacl) 192 + return; 193 + 194 + rcu_read_lock(); 195 + deve = target_nacl_find_deve(nacl, unpacked_lun); 196 + if (!deve) { 197 + rcu_read_unlock(); 198 + return; 199 + } 200 + 201 + core_scsi3_ua_allocate(deve, asc, ascq); 202 + rcu_read_unlock(); 173 203 } 174 204 175 205 void core_scsi3_ua_release_all(
+2 -1
drivers/target/target_core_ua.h
··· 28 28 extern struct kmem_cache *se_ua_cache; 29 29 30 30 extern sense_reason_t target_scsi3_ua_check(struct se_cmd *); 31 - extern int core_scsi3_ua_allocate(struct se_node_acl *, u64, u8, u8); 31 + extern int core_scsi3_ua_allocate(struct se_dev_entry *, u8, u8); 32 + extern void target_ua_allocate_lun(struct se_node_acl *, u32, u8, u8); 32 33 extern void core_scsi3_ua_release_all(struct se_dev_entry *); 33 34 extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *); 34 35 extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *,