Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

libfc: Do not invoke the response handler after fc_exch_done()

While the FCoE initiator driver invokes fc_exch_done() from inside
the libfc response handler, FCoE target drivers typically invoke
fc_exch_done() from outside the libfc response handler. The object
fc_exch.arg points at may disappear as soon as fc_exch_done() has
finished. So it's important not to invoke the response handler
function after fc_exch_done() has finished. Modify libfc such that
this guarantee is provided if fc_exch_done() is invoked from
outside a response handler. This patch fixes a sporadic crash in
FCoE target implementations after a command has been aborted.

Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Cc: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: Robert Love <robert.w.love@intel.com>

authored by

Bart Van Assche and committed by
Robert Love
7030fd62 f95b35cf

+101 -39
+92 -39
drivers/scsi/libfc/fc_exch.c
··· 381 381 /** 382 382 * fc_exch_done_locked() - Complete an exchange with the exchange lock held 383 383 * @ep: The exchange that is complete 384 + * 385 + * Note: May sleep if invoked from outside a response handler. 384 386 */ 385 387 static int fc_exch_done_locked(struct fc_exch *ep) 386 388 { ··· 394 392 * ep, and in that case we only clear the resp and set it as 395 393 * complete, so it can be reused by the timer to send the rrq. 396 394 */ 397 - ep->resp = NULL; 398 395 if (ep->state & FC_EX_DONE) 399 396 return rc; 400 397 ep->esb_stat |= ESB_ST_COMPLETE; ··· 590 589 591 590 /* 592 591 * Set the response handler for the exchange associated with a sequence. 592 + * 593 + * Note: May sleep if invoked from outside a response handler. 593 594 */ 594 595 static void fc_seq_set_resp(struct fc_seq *sp, 595 596 void (*resp)(struct fc_seq *, struct fc_frame *, ··· 599 596 void *arg) 600 597 { 601 598 struct fc_exch *ep = fc_seq_exch(sp); 599 + DEFINE_WAIT(wait); 602 600 603 601 spin_lock_bh(&ep->ex_lock); 602 + while (ep->resp_active && ep->resp_task != current) { 603 + prepare_to_wait(&ep->resp_wq, &wait, TASK_UNINTERRUPTIBLE); 604 + spin_unlock_bh(&ep->ex_lock); 605 + 606 + schedule(); 607 + 608 + spin_lock_bh(&ep->ex_lock); 609 + } 610 + finish_wait(&ep->resp_wq, &wait); 604 611 ep->resp = resp; 605 612 ep->arg = arg; 606 613 spin_unlock_bh(&ep->ex_lock); ··· 694 681 } 695 682 696 683 /** 684 + * fc_invoke_resp() - invoke ep->resp() 685 + * 686 + * Notes: 687 + * It is assumed that after initialization finished (this means the 688 + * first unlock of ex_lock after fc_exch_alloc()) ep->resp and ep->arg are 689 + * modified only via fc_seq_set_resp(). This guarantees that none of these 690 + * two variables changes if ep->resp_active > 0. 691 + * 692 + * If an fc_seq_set_resp() call is busy modifying ep->resp and ep->arg when 693 + * this function is invoked, the first spin_lock_bh() call in this function 694 + * will wait until fc_seq_set_resp() has finished modifying these variables. 695 + * 696 + * Since fc_exch_done() invokes fc_seq_set_resp() it is guaranteed that that 697 + * ep->resp() won't be invoked after fc_exch_done() has returned. 698 + * 699 + * The response handler itself may invoke fc_exch_done(), which will clear the 700 + * ep->resp pointer. 701 + * 702 + * Return value: 703 + * Returns true if and only if ep->resp has been invoked. 704 + */ 705 + static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp, 706 + struct fc_frame *fp) 707 + { 708 + void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); 709 + void *arg; 710 + bool res = false; 711 + 712 + spin_lock_bh(&ep->ex_lock); 713 + ep->resp_active++; 714 + if (ep->resp_task != current) 715 + ep->resp_task = !ep->resp_task ? current : NULL; 716 + resp = ep->resp; 717 + arg = ep->arg; 718 + spin_unlock_bh(&ep->ex_lock); 719 + 720 + if (resp) { 721 + resp(sp, fp, arg); 722 + res = true; 723 + } else if (!IS_ERR(fp)) { 724 + fc_frame_free(fp); 725 + } 726 + 727 + spin_lock_bh(&ep->ex_lock); 728 + if (--ep->resp_active == 0) 729 + ep->resp_task = NULL; 730 + spin_unlock_bh(&ep->ex_lock); 731 + 732 + if (ep->resp_active == 0) 733 + wake_up(&ep->resp_wq); 734 + 735 + return res; 736 + } 737 + 738 + /** 697 739 * fc_exch_timeout() - Handle exchange timer expiration 698 740 * @work: The work_struct identifying the exchange that timed out 699 741 */ ··· 757 689 struct fc_exch *ep = container_of(work, struct fc_exch, 758 690 timeout_work.work); 759 691 struct fc_seq *sp = &ep->seq; 760 - void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); 761 - void *arg; 762 692 u32 e_stat; 763 693 int rc = 1; 764 694 ··· 774 708 fc_exch_rrq(ep); 775 709 goto done; 776 710 } else { 777 - resp = ep->resp; 778 - arg = ep->arg; 779 - ep->resp = NULL; 780 711 if (e_stat & ESB_ST_ABNORMAL) 781 712 rc = fc_exch_done_locked(ep); 782 713 spin_unlock_bh(&ep->ex_lock); 783 714 if (!rc) 784 715 fc_exch_delete(ep); 785 - if (resp) 786 - resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg); 716 + fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_TIMEOUT)); 717 + fc_seq_set_resp(sp, NULL, ep->arg); 787 718 fc_seq_exch_abort(sp, 2 * ep->r_a_tov); 788 719 goto done; 789 720 } ··· 867 804 ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */ 868 805 ep->rxid = FC_XID_UNKNOWN; 869 806 ep->class = mp->class; 807 + ep->resp_active = 0; 808 + init_waitqueue_head(&ep->resp_wq); 870 809 INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout); 871 810 out: 872 811 return ep; ··· 929 864 * fc_exch_done() - Indicate that an exchange/sequence tuple is complete and 930 865 * the memory allocated for the related objects may be freed. 931 866 * @sp: The sequence that has completed 867 + * 868 + * Note: May sleep if invoked from outside a response handler. 932 869 */ 933 870 static void fc_exch_done(struct fc_seq *sp) 934 871 { ··· 940 873 spin_lock_bh(&ep->ex_lock); 941 874 rc = fc_exch_done_locked(ep); 942 875 spin_unlock_bh(&ep->ex_lock); 876 + 877 + fc_seq_set_resp(sp, NULL, ep->arg); 943 878 if (!rc) 944 879 fc_exch_delete(ep); 945 880 } ··· 1505 1436 * If new exch resp handler is valid then call that 1506 1437 * first. 1507 1438 */ 1508 - if (ep->resp) 1509 - ep->resp(sp, fp, ep->arg); 1510 - else 1439 + if (!fc_invoke_resp(ep, sp, fp)) 1511 1440 lport->tt.lport_recv(lport, fp); 1512 1441 fc_exch_release(ep); /* release from lookup */ 1513 1442 } else { ··· 1529 1462 struct fc_exch *ep; 1530 1463 enum fc_sof sof; 1531 1464 u32 f_ctl; 1532 - void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); 1533 - void *ex_resp_arg; 1534 1465 int rc; 1535 1466 1536 1467 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id)); ··· 1571 1506 1572 1507 if (fc_sof_needs_ack(sof)) 1573 1508 fc_seq_send_ack(sp, fp); 1574 - resp = ep->resp; 1575 - ex_resp_arg = ep->arg; 1576 1509 1577 1510 if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T && 1578 1511 (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == 1579 1512 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { 1580 1513 spin_lock_bh(&ep->ex_lock); 1581 - resp = ep->resp; 1582 1514 rc = fc_exch_done_locked(ep); 1583 1515 WARN_ON(fc_seq_exch(sp) != ep); 1584 1516 spin_unlock_bh(&ep->ex_lock); ··· 1596 1534 * If new exch resp handler is valid then call that 1597 1535 * first. 1598 1536 */ 1599 - if (resp) 1600 - resp(sp, fp, ex_resp_arg); 1601 - else 1602 - fc_frame_free(fp); 1537 + fc_invoke_resp(ep, sp, fp); 1538 + 1603 1539 fc_exch_release(ep); 1604 1540 return; 1605 1541 rel: ··· 1636 1576 */ 1637 1577 static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) 1638 1578 { 1639 - void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); 1640 - void *ex_resp_arg; 1641 1579 struct fc_frame_header *fh; 1642 1580 struct fc_ba_acc *ap; 1643 1581 struct fc_seq *sp; ··· 1680 1622 break; 1681 1623 } 1682 1624 1683 - resp = ep->resp; 1684 - ex_resp_arg = ep->arg; 1685 - 1686 1625 /* do we need to do some other checks here. Can we reuse more of 1687 1626 * fc_exch_recv_seq_resp 1688 1627 */ ··· 1691 1636 ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ) 1692 1637 rc = fc_exch_done_locked(ep); 1693 1638 spin_unlock_bh(&ep->ex_lock); 1639 + 1640 + fc_exch_hold(ep); 1694 1641 if (!rc) 1695 1642 fc_exch_delete(ep); 1696 - 1697 - if (resp) 1698 - resp(sp, fp, ex_resp_arg); 1699 - else 1700 - fc_frame_free(fp); 1701 - 1643 + fc_invoke_resp(ep, sp, fp); 1702 1644 if (has_rec) 1703 1645 fc_exch_timer_set(ep, ep->r_a_tov); 1704 - 1646 + fc_exch_release(ep); 1705 1647 } 1706 1648 1707 1649 /** ··· 1820 1768 /** 1821 1769 * fc_exch_reset() - Reset an exchange 1822 1770 * @ep: The exchange to be reset 1771 + * 1772 + * Note: May sleep if invoked from outside a response handler. 1823 1773 */ 1824 1774 static void fc_exch_reset(struct fc_exch *ep) 1825 1775 { 1826 1776 struct fc_seq *sp; 1827 - void (*resp)(struct fc_seq *, struct fc_frame *, void *); 1828 - void *arg; 1829 1777 int rc = 1; 1830 1778 1831 1779 spin_lock_bh(&ep->ex_lock); 1832 1780 fc_exch_abort_locked(ep, 0); 1833 1781 ep->state |= FC_EX_RST_CLEANUP; 1834 1782 fc_exch_timer_cancel(ep); 1835 - resp = ep->resp; 1836 - ep->resp = NULL; 1837 1783 if (ep->esb_stat & ESB_ST_REC_QUAL) 1838 1784 atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */ 1839 1785 ep->esb_stat &= ~ESB_ST_REC_QUAL; 1840 - arg = ep->arg; 1841 1786 sp = &ep->seq; 1842 1787 rc = fc_exch_done_locked(ep); 1843 1788 spin_unlock_bh(&ep->ex_lock); 1789 + 1790 + fc_exch_hold(ep); 1791 + 1844 1792 if (!rc) 1845 1793 fc_exch_delete(ep); 1846 1794 1847 - if (resp) 1848 - resp(sp, ERR_PTR(-FC_EX_CLOSED), arg); 1795 + fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED)); 1796 + fc_seq_set_resp(sp, NULL, ep->arg); 1797 + fc_exch_release(ep); 1849 1798 } 1850 1799 1851 1800 /**
+9
include/scsi/libfc.h
··· 410 410 * @fh_type: The frame type 411 411 * @class: The class of service 412 412 * @seq: The sequence in use on this exchange 413 + * @resp_active: Number of tasks that are concurrently executing @resp(). 414 + * @resp_task: If @resp_active > 0, either the task executing @resp(), the 415 + * task that has been interrupted to execute the soft-IRQ 416 + * executing @resp() or NULL if more than one task is executing 417 + * @resp concurrently. 418 + * @resp_wq: Waitqueue for the tasks waiting on @resp_active. 413 419 * @resp: Callback for responses on this exchange 414 420 * @destructor: Called when destroying the exchange 415 421 * @arg: Passed as a void pointer to the resp() callback ··· 447 441 u32 r_a_tov; 448 442 u32 f_ctl; 449 443 struct fc_seq seq; 444 + int resp_active; 445 + struct task_struct *resp_task; 446 + wait_queue_head_t resp_wq; 450 447 void (*resp)(struct fc_seq *, struct fc_frame *, void *); 451 448 void *arg; 452 449 void (*destructor)(struct fc_seq *, void *);