Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target fixes from Nicholas Bellinger:
"Here are the target-pending fixes for v4.12-rc4:

- ibmviscsis ABORT_TASK handling fixes that missed the v4.12 merge
window. (Bryant Ly and Michael Cyr)

- Re-add a target-core check enforcing WRITE overflow reject that was
relaxed in v4.3, to avoid unsupported iscsi-target immediate data
overflow. (nab)

- Fix a target-core-user OOPs during device removal. (MNC + Bryant
Ly)

- Fix a long standing iscsi-target potential issue where kthread exit
did not wait for kthread_should_stop(). (Jiang Yi)

- Fix a iscsi-target v3.12.y regression OOPs involving initial login
PDU processing during asynchronous TCP connection close. (MNC +
nab)

This is a little larger than usual for an -rc4, primarily due to the
iscsi-target v3.12.y regression OOPs bug-fix.

However, it's an important patch as MNC + Hannes where both able to
trigger it using a reduced iscsi initiator login timeout combined with
a backend taking a long time to complete I/Os during iscsi login
driven session reinstatement"

* git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending:
iscsi-target: Always wait for kthread_should_stop() before kthread exit
iscsi-target: Fix initial login PDU asynchronous socket close OOPs
tcmu: fix crash during device removal
target: Re-add check to reject control WRITEs with overflow data
ibmvscsis: Fix the incorrect req_lim_delta
ibmvscsis: Clear left-over abort_cmd pointers

+242 -93
+23 -4
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
··· 1170 1170 cmd = list_first_entry_or_null(&vscsi->free_cmd, 1171 1171 struct ibmvscsis_cmd, list); 1172 1172 if (cmd) { 1173 + if (cmd->abort_cmd) 1174 + cmd->abort_cmd = NULL; 1173 1175 cmd->flags &= ~(DELAY_SEND); 1174 1176 list_del(&cmd->list); 1175 1177 cmd->iue = iue; ··· 1776 1774 if (cmd->abort_cmd) { 1777 1775 retry = true; 1778 1776 cmd->abort_cmd->flags &= ~(DELAY_SEND); 1777 + cmd->abort_cmd = NULL; 1779 1778 } 1780 1779 1781 1780 /* ··· 1791 1788 list_del(&cmd->list); 1792 1789 ibmvscsis_free_cmd_resources(vscsi, 1793 1790 cmd); 1791 + /* 1792 + * With a successfully aborted op 1793 + * through LIO we want to increment the 1794 + * the vscsi credit so that when we dont 1795 + * send a rsp to the original scsi abort 1796 + * op (h_send_crq), but the tm rsp to 1797 + * the abort is sent, the credit is 1798 + * correctly sent with the abort tm rsp. 1799 + * We would need 1 for the abort tm rsp 1800 + * and 1 credit for the aborted scsi op. 1801 + * Thus we need to increment here. 1802 + * Also we want to increment the credit 1803 + * here because we want to make sure 1804 + * cmd is actually released first 1805 + * otherwise the client will think it 1806 + * it can send a new cmd, and we could 1807 + * find ourselves short of cmd elements. 1808 + */ 1809 + vscsi->credit += 1; 1794 1810 } else { 1795 1811 iue = cmd->iue; 1796 1812 ··· 2984 2962 2985 2963 rsp->opcode = SRP_RSP; 2986 2964 2987 - if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING) 2988 - rsp->req_lim_delta = cpu_to_be32(vscsi->credit); 2989 - else 2990 - rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit); 2965 + rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit); 2991 2966 rsp->tag = cmd->rsp.tag; 2992 2967 rsp->flags = 0; 2993 2968
+24 -6
drivers/target/iscsi/iscsi_target.c
··· 3790 3790 { 3791 3791 int ret = 0; 3792 3792 struct iscsi_conn *conn = arg; 3793 + bool conn_freed = false; 3794 + 3793 3795 /* 3794 3796 * Allow ourselves to be interrupted by SIGINT so that a 3795 3797 * connection recovery / failure event can be triggered externally. ··· 3817 3815 goto transport_err; 3818 3816 3819 3817 ret = iscsit_handle_response_queue(conn); 3820 - if (ret == 1) 3818 + if (ret == 1) { 3821 3819 goto get_immediate; 3822 - else if (ret == -ECONNRESET) 3820 + } else if (ret == -ECONNRESET) { 3821 + conn_freed = true; 3823 3822 goto out; 3824 - else if (ret < 0) 3823 + } else if (ret < 0) { 3825 3824 goto transport_err; 3825 + } 3826 3826 } 3827 3827 3828 3828 transport_err: ··· 3834 3830 * responsible for cleaning up the early connection failure. 3835 3831 */ 3836 3832 if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN) 3837 - iscsit_take_action_for_connection_exit(conn); 3833 + iscsit_take_action_for_connection_exit(conn, &conn_freed); 3838 3834 out: 3835 + if (!conn_freed) { 3836 + while (!kthread_should_stop()) { 3837 + msleep(100); 3838 + } 3839 + } 3839 3840 return 0; 3840 3841 } 3841 3842 ··· 4013 4004 { 4014 4005 int rc; 4015 4006 struct iscsi_conn *conn = arg; 4007 + bool conn_freed = false; 4016 4008 4017 4009 /* 4018 4010 * Allow ourselves to be interrupted by SIGINT so that a ··· 4026 4016 */ 4027 4017 rc = wait_for_completion_interruptible(&conn->rx_login_comp); 4028 4018 if (rc < 0 || iscsi_target_check_conn_state(conn)) 4029 - return 0; 4019 + goto out; 4030 4020 4031 4021 if (!conn->conn_transport->iscsit_get_rx_pdu) 4032 4022 return 0; ··· 4035 4025 4036 4026 if (!signal_pending(current)) 4037 4027 atomic_set(&conn->transport_failed, 1); 4038 - iscsit_take_action_for_connection_exit(conn); 4028 + iscsit_take_action_for_connection_exit(conn, &conn_freed); 4029 + 4030 + out: 4031 + if (!conn_freed) { 4032 + while (!kthread_should_stop()) { 4033 + msleep(100); 4034 + } 4035 + } 4036 + 4039 4037 return 0; 4040 4038 } 4041 4039
+5 -1
drivers/target/iscsi/iscsi_target_erl0.c
··· 930 930 } 931 931 } 932 932 933 - void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) 933 + void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed) 934 934 { 935 + *conn_freed = false; 936 + 935 937 spin_lock_bh(&conn->state_lock); 936 938 if (atomic_read(&conn->connection_exit)) { 937 939 spin_unlock_bh(&conn->state_lock); ··· 944 942 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { 945 943 spin_unlock_bh(&conn->state_lock); 946 944 iscsit_close_connection(conn); 945 + *conn_freed = true; 947 946 return; 948 947 } 949 948 ··· 958 955 spin_unlock_bh(&conn->state_lock); 959 956 960 957 iscsit_handle_connection_cleanup(conn); 958 + *conn_freed = true; 961 959 }
+1 -1
drivers/target/iscsi/iscsi_target_erl0.h
··· 15 15 extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *); 16 16 extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int); 17 17 extern void iscsit_fall_back_to_erl0(struct iscsi_session *); 18 - extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *); 18 + extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *); 19 19 20 20 #endif /*** ISCSI_TARGET_ERL0_H ***/
+4
drivers/target/iscsi/iscsi_target_login.c
··· 1464 1464 break; 1465 1465 } 1466 1466 1467 + while (!kthread_should_stop()) { 1468 + msleep(100); 1469 + } 1470 + 1467 1471 return 0; 1468 1472 }
+133 -63
drivers/target/iscsi/iscsi_target_nego.c
··· 493 493 494 494 static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *); 495 495 496 - static bool iscsi_target_sk_state_check(struct sock *sk) 496 + static bool __iscsi_target_sk_check_close(struct sock *sk) 497 497 { 498 498 if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) { 499 - pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE," 499 + pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE," 500 500 "returning FALSE\n"); 501 - return false; 501 + return true; 502 502 } 503 - return true; 503 + return false; 504 + } 505 + 506 + static bool iscsi_target_sk_check_close(struct iscsi_conn *conn) 507 + { 508 + bool state = false; 509 + 510 + if (conn->sock) { 511 + struct sock *sk = conn->sock->sk; 512 + 513 + read_lock_bh(&sk->sk_callback_lock); 514 + state = (__iscsi_target_sk_check_close(sk) || 515 + test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)); 516 + read_unlock_bh(&sk->sk_callback_lock); 517 + } 518 + return state; 519 + } 520 + 521 + static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag) 522 + { 523 + bool state = false; 524 + 525 + if (conn->sock) { 526 + struct sock *sk = conn->sock->sk; 527 + 528 + read_lock_bh(&sk->sk_callback_lock); 529 + state = test_bit(flag, &conn->login_flags); 530 + read_unlock_bh(&sk->sk_callback_lock); 531 + } 532 + return state; 533 + } 534 + 535 + static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag) 536 + { 537 + bool state = false; 538 + 539 + if (conn->sock) { 540 + struct sock *sk = conn->sock->sk; 541 + 542 + write_lock_bh(&sk->sk_callback_lock); 543 + state = (__iscsi_target_sk_check_close(sk) || 544 + test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)); 545 + if (!state) 546 + clear_bit(flag, &conn->login_flags); 547 + write_unlock_bh(&sk->sk_callback_lock); 548 + } 549 + return state; 504 550 } 505 551 506 552 static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) ··· 586 540 587 541 pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n", 588 542 conn, current->comm, current->pid); 543 + /* 544 + * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready() 545 + * before initial PDU processing in iscsi_target_start_negotiation() 546 + * has completed, go ahead and retry until it's cleared. 547 + * 548 + * Otherwise if the TCP connection drops while this is occuring, 549 + * iscsi_target_start_negotiation() will detect the failure, call 550 + * cancel_delayed_work_sync(&conn->login_work), and cleanup the 551 + * remaining iscsi connection resources from iscsi_np process context. 552 + */ 553 + if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) { 554 + schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10)); 555 + return; 556 + } 589 557 590 558 spin_lock(&tpg->tpg_state_lock); 591 559 state = (tpg->tpg_state == TPG_STATE_ACTIVE); ··· 607 547 608 548 if (!state) { 609 549 pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n"); 610 - iscsi_target_restore_sock_callbacks(conn); 611 - iscsi_target_login_drop(conn, login); 612 - iscsit_deaccess_np(np, tpg, tpg_np); 613 - return; 550 + goto err; 614 551 } 615 552 616 - if (conn->sock) { 617 - struct sock *sk = conn->sock->sk; 618 - 619 - read_lock_bh(&sk->sk_callback_lock); 620 - state = iscsi_target_sk_state_check(sk); 621 - read_unlock_bh(&sk->sk_callback_lock); 622 - 623 - if (!state) { 624 - pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); 625 - iscsi_target_restore_sock_callbacks(conn); 626 - iscsi_target_login_drop(conn, login); 627 - iscsit_deaccess_np(np, tpg, tpg_np); 628 - return; 629 - } 553 + if (iscsi_target_sk_check_close(conn)) { 554 + pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); 555 + goto err; 630 556 } 631 557 632 558 conn->login_kworker = current; ··· 630 584 flush_signals(current); 631 585 conn->login_kworker = NULL; 632 586 633 - if (rc < 0) { 634 - iscsi_target_restore_sock_callbacks(conn); 635 - iscsi_target_login_drop(conn, login); 636 - iscsit_deaccess_np(np, tpg, tpg_np); 637 - return; 638 - } 587 + if (rc < 0) 588 + goto err; 639 589 640 590 pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n", 641 591 conn, current->comm, current->pid); 642 592 643 593 rc = iscsi_target_do_login(conn, login); 644 594 if (rc < 0) { 645 - iscsi_target_restore_sock_callbacks(conn); 646 - iscsi_target_login_drop(conn, login); 647 - iscsit_deaccess_np(np, tpg, tpg_np); 595 + goto err; 648 596 } else if (!rc) { 649 - if (conn->sock) { 650 - struct sock *sk = conn->sock->sk; 651 - 652 - write_lock_bh(&sk->sk_callback_lock); 653 - clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags); 654 - write_unlock_bh(&sk->sk_callback_lock); 655 - } 597 + if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE)) 598 + goto err; 656 599 } else if (rc == 1) { 657 600 iscsi_target_nego_release(conn); 658 601 iscsi_post_login_handler(np, conn, zero_tsih); 659 602 iscsit_deaccess_np(np, tpg, tpg_np); 660 603 } 604 + return; 605 + 606 + err: 607 + iscsi_target_restore_sock_callbacks(conn); 608 + iscsi_target_login_drop(conn, login); 609 + iscsit_deaccess_np(np, tpg, tpg_np); 661 610 } 662 611 663 612 static void iscsi_target_do_cleanup(struct work_struct *work) ··· 700 659 orig_state_change(sk); 701 660 return; 702 661 } 662 + state = __iscsi_target_sk_check_close(sk); 663 + pr_debug("__iscsi_target_sk_close_change: state: %d\n", state); 664 + 703 665 if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { 704 666 pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change" 705 667 " conn: %p\n", conn); 668 + if (state) 669 + set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); 706 670 write_unlock_bh(&sk->sk_callback_lock); 707 671 orig_state_change(sk); 708 672 return; 709 673 } 710 - if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { 674 + if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { 711 675 pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n", 712 676 conn); 713 677 write_unlock_bh(&sk->sk_callback_lock); 714 678 orig_state_change(sk); 715 679 return; 716 680 } 717 - 718 - state = iscsi_target_sk_state_check(sk); 719 - write_unlock_bh(&sk->sk_callback_lock); 720 - 721 - pr_debug("iscsi_target_sk_state_change: state: %d\n", state); 722 - 723 - if (!state) { 681 + /* 682 + * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED, 683 + * but only queue conn->login_work -> iscsi_target_do_login_rx() 684 + * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared. 685 + * 686 + * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close() 687 + * will detect the dropped TCP connection from delayed workqueue context. 688 + * 689 + * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial 690 + * iscsi_target_start_negotiation() is running, iscsi_target_do_login() 691 + * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation() 692 + * via iscsi_target_sk_check_and_clear() is responsible for detecting the 693 + * dropped TCP connection in iscsi_np process context, and cleaning up 694 + * the remaining iscsi connection resources. 695 + */ 696 + if (state) { 724 697 pr_debug("iscsi_target_sk_state_change got failed state\n"); 725 - schedule_delayed_work(&conn->login_cleanup_work, 0); 698 + set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); 699 + state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags); 700 + write_unlock_bh(&sk->sk_callback_lock); 701 + 702 + orig_state_change(sk); 703 + 704 + if (!state) 705 + schedule_delayed_work(&conn->login_work, 0); 726 706 return; 727 707 } 708 + write_unlock_bh(&sk->sk_callback_lock); 709 + 728 710 orig_state_change(sk); 729 711 } 730 712 ··· 1010 946 if (iscsi_target_handle_csg_one(conn, login) < 0) 1011 947 return -1; 1012 948 if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { 949 + /* 950 + * Check to make sure the TCP connection has not 951 + * dropped asynchronously while session reinstatement 952 + * was occuring in this kthread context, before 953 + * transitioning to full feature phase operation. 954 + */ 955 + if (iscsi_target_sk_check_close(conn)) 956 + return -1; 957 + 1013 958 login->tsih = conn->sess->tsih; 1014 959 login->login_complete = 1; 1015 960 iscsi_target_restore_sock_callbacks(conn); ··· 1043 970 login_rsp->flags &= ~ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK; 1044 971 } 1045 972 break; 1046 - } 1047 - 1048 - if (conn->sock) { 1049 - struct sock *sk = conn->sock->sk; 1050 - bool state; 1051 - 1052 - read_lock_bh(&sk->sk_callback_lock); 1053 - state = iscsi_target_sk_state_check(sk); 1054 - read_unlock_bh(&sk->sk_callback_lock); 1055 - 1056 - if (!state) { 1057 - pr_debug("iscsi_target_do_login() failed state for" 1058 - " conn: %p\n", conn); 1059 - return -1; 1060 - } 1061 973 } 1062 974 1063 975 return 0; ··· 1313 1255 1314 1256 write_lock_bh(&sk->sk_callback_lock); 1315 1257 set_bit(LOGIN_FLAGS_READY, &conn->login_flags); 1258 + set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags); 1316 1259 write_unlock_bh(&sk->sk_callback_lock); 1317 1260 } 1318 - 1261 + /* 1262 + * If iscsi_target_do_login returns zero to signal more PDU 1263 + * exchanges are required to complete the login, go ahead and 1264 + * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection 1265 + * is still active. 1266 + * 1267 + * Otherwise if TCP connection dropped asynchronously, go ahead 1268 + * and perform connection cleanup now. 1269 + */ 1319 1270 ret = iscsi_target_do_login(conn, login); 1271 + if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU)) 1272 + ret = -1; 1273 + 1320 1274 if (ret < 0) { 1321 1275 cancel_delayed_work_sync(&conn->login_work); 1322 1276 cancel_delayed_work_sync(&conn->login_cleanup_work);
+18 -5
drivers/target/target_core_transport.c
··· 1160 1160 if (cmd->unknown_data_length) { 1161 1161 cmd->data_length = size; 1162 1162 } else if (size != cmd->data_length) { 1163 - pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" 1163 + pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:" 1164 1164 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1165 1165 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 1166 1166 cmd->data_length, size, cmd->t_task_cdb[0]); 1167 1167 1168 - if (cmd->data_direction == DMA_TO_DEVICE && 1169 - cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1170 - pr_err("Rejecting underflow/overflow WRITE data\n"); 1171 - return TCM_INVALID_CDB_FIELD; 1168 + if (cmd->data_direction == DMA_TO_DEVICE) { 1169 + if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1170 + pr_err_ratelimited("Rejecting underflow/overflow" 1171 + " for WRITE data CDB\n"); 1172 + return TCM_INVALID_CDB_FIELD; 1173 + } 1174 + /* 1175 + * Some fabric drivers like iscsi-target still expect to 1176 + * always reject overflow writes. Reject this case until 1177 + * full fabric driver level support for overflow writes 1178 + * is introduced tree-wide. 1179 + */ 1180 + if (size > cmd->data_length) { 1181 + pr_err_ratelimited("Rejecting overflow for" 1182 + " WRITE control CDB\n"); 1183 + return TCM_INVALID_CDB_FIELD; 1184 + } 1172 1185 } 1173 1186 /* 1174 1187 * Reject READ_* or WRITE_* with overflow/underflow for
+33 -13
drivers/target/target_core_user.c
··· 97 97 98 98 struct tcmu_dev { 99 99 struct list_head node; 100 - 100 + struct kref kref; 101 101 struct se_device se_dev; 102 102 103 103 char *name; ··· 969 969 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); 970 970 if (!udev) 971 971 return NULL; 972 + kref_init(&udev->kref); 972 973 973 974 udev->name = kstrdup(name, GFP_KERNEL); 974 975 if (!udev->name) { ··· 1146 1145 return 0; 1147 1146 } 1148 1147 1148 + static void tcmu_dev_call_rcu(struct rcu_head *p) 1149 + { 1150 + struct se_device *dev = container_of(p, struct se_device, rcu_head); 1151 + struct tcmu_dev *udev = TCMU_DEV(dev); 1152 + 1153 + kfree(udev->uio_info.name); 1154 + kfree(udev->name); 1155 + kfree(udev); 1156 + } 1157 + 1158 + static void tcmu_dev_kref_release(struct kref *kref) 1159 + { 1160 + struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref); 1161 + struct se_device *dev = &udev->se_dev; 1162 + 1163 + call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); 1164 + } 1165 + 1149 1166 static int tcmu_release(struct uio_info *info, struct inode *inode) 1150 1167 { 1151 1168 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); ··· 1171 1152 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); 1172 1153 1173 1154 pr_debug("close\n"); 1174 - 1155 + /* release ref from configure */ 1156 + kref_put(&udev->kref, tcmu_dev_kref_release); 1175 1157 return 0; 1176 1158 } 1177 1159 ··· 1292 1272 dev->dev_attrib.hw_max_sectors = 128; 1293 1273 dev->dev_attrib.hw_queue_depth = 128; 1294 1274 1275 + /* 1276 + * Get a ref incase userspace does a close on the uio device before 1277 + * LIO has initiated tcmu_free_device. 1278 + */ 1279 + kref_get(&udev->kref); 1280 + 1295 1281 ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, 1296 1282 udev->uio_info.uio_dev->minor); 1297 1283 if (ret) ··· 1310 1284 return 0; 1311 1285 1312 1286 err_netlink: 1287 + kref_put(&udev->kref, tcmu_dev_kref_release); 1313 1288 uio_unregister_device(&udev->uio_info); 1314 1289 err_register: 1315 1290 vfree(udev->mb_addr); 1316 1291 err_vzalloc: 1317 1292 kfree(info->name); 1293 + info->name = NULL; 1318 1294 1319 1295 return ret; 1320 1296 } ··· 1328 1300 return 0; 1329 1301 } 1330 1302 return -EINVAL; 1331 - } 1332 - 1333 - static void tcmu_dev_call_rcu(struct rcu_head *p) 1334 - { 1335 - struct se_device *dev = container_of(p, struct se_device, rcu_head); 1336 - struct tcmu_dev *udev = TCMU_DEV(dev); 1337 - 1338 - kfree(udev); 1339 1303 } 1340 1304 1341 1305 static bool tcmu_dev_configured(struct tcmu_dev *udev) ··· 1384 1364 udev->uio_info.uio_dev->minor); 1385 1365 1386 1366 uio_unregister_device(&udev->uio_info); 1387 - kfree(udev->uio_info.name); 1388 - kfree(udev->name); 1389 1367 } 1390 - call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); 1368 + 1369 + /* release ref from init */ 1370 + kref_put(&udev->kref, tcmu_dev_kref_release); 1391 1371 } 1392 1372 1393 1373 enum {
+1
include/target/iscsi/iscsi_target_core.h
··· 557 557 #define LOGIN_FLAGS_READ_ACTIVE 1 558 558 #define LOGIN_FLAGS_CLOSED 2 559 559 #define LOGIN_FLAGS_READY 4 560 + #define LOGIN_FLAGS_INITIAL_PDU 8 560 561 unsigned long login_flags; 561 562 struct delayed_work login_work; 562 563 struct delayed_work login_cleanup_work;