[SCSI] lpfc 8.3.42: Fixed race condition between BSG I/O dispatch and timeout handling

Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>

authored by

James Smart and committed by
James Bottomley
b5a9b2df 9a86ed48

+72 -17
+70 -16
drivers/scsi/lpfc/lpfc_bsg.c
··· 317 } 318 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 319 320 iocb = &dd_data->context_un.iocb; 321 ndlp = iocb->ndlp; 322 rmp = iocb->rmp; ··· 392 int request_nseg; 393 int reply_nseg; 394 struct bsg_job_data *dd_data; 395 uint32_t creg_val; 396 int rc = 0; 397 int iocb_stat; ··· 507 } 508 509 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 510 - if (iocb_stat == IOCB_SUCCESS) 511 return 0; /* done for now */ 512 - else if (iocb_stat == IOCB_BUSY) 513 rc = -EAGAIN; 514 - else 515 rc = -EIO; 516 517 /* iocb failed so cleanup */ 518 519 free_rmp: 520 lpfc_free_bsg_buffers(phba, rmp); ··· 593 } 594 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 595 596 rsp = &rspiocbq->iocb; 597 pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2; 598 prsp = (struct lpfc_dmabuf *)pcmd->list.next; ··· 660 struct lpfc_iocbq *cmdiocbq; 661 uint16_t rpi = 0; 662 struct bsg_job_data *dd_data; 663 uint32_t creg_val; 664 int rc = 0; 665 ··· 743 744 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 745 746 - if (rc == IOCB_SUCCESS) 747 return 0; /* done for now */ 748 - else if (rc == IOCB_BUSY) 749 rc = -EAGAIN; 750 - else 751 rc = -EIO; 752 753 linkdown_err: 754 - 755 cmdiocbq->context1 = ndlp; 756 lpfc_els_free_iocb(phba, cmdiocbq); 757 ··· 1402 } 1403 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1404 1405 ndlp = dd_data->context_un.iocb.ndlp; 1406 cmp = cmdiocbq->context2; 1407 bmp = cmdiocbq->context3; ··· 1470 int rc = 0; 1471 struct lpfc_nodelist *ndlp = NULL; 1472 struct bsg_job_data *dd_data; 1473 uint32_t creg_val; 1474 1475 /* allocate our bsg tracking structure */ ··· 1580 1581 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 1582 1583 - if (rc == IOCB_SUCCESS) 1584 return 0; /* done for now */ 1585 1586 issue_ct_rsp_exit: 1587 lpfc_sli_release_iocbq(phba, ctiocb); ··· 5333 * remove it from the txq queue and call cancel iocbs. 5334 * Otherwise, call abort iotag 5335 */ 5336 - 5337 cmdiocb = dd_data->context_un.iocb.cmdiocbq; 5338 - spin_lock_irq(&phba->hbalock); 5339 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, 5340 list) { 5341 if (check_iocb == cmdiocb) { ··· 5351 } 5352 if (list_empty(&completions)) 5353 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 5354 - spin_unlock_irq(&phba->hbalock); 5355 - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5356 if (!list_empty(&completions)) { 5357 lpfc_sli_cancel_iocbs(phba, &completions, 5358 IOSTAT_LOCAL_REJECT, ··· 5375 * remove it from the txq queue and call cancel iocbs. 5376 * Otherwise, call abort iotag. 5377 */ 5378 - 5379 cmdiocb = dd_data->context_un.menlo.cmdiocbq; 5380 - spin_lock_irq(&phba->hbalock); 5381 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, 5382 list) { 5383 if (check_iocb == cmdiocb) { ··· 5388 } 5389 if (list_empty(&completions)) 5390 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 5391 - spin_unlock_irq(&phba->hbalock); 5392 - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5393 if (!list_empty(&completions)) { 5394 lpfc_sli_cancel_iocbs(phba, &completions, 5395 IOSTAT_LOCAL_REJECT,
··· 317 } 318 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 319 320 + /* Close the timeout handler abort window */ 321 + spin_lock_irqsave(&phba->hbalock, flags); 322 + cmdiocbq->iocb_aux_flag &= ~LPFC_IO_CMD_OUTSTANDING; 323 + spin_unlock_irqrestore(&phba->hbalock, flags); 324 + 325 iocb = &dd_data->context_un.iocb; 326 ndlp = iocb->ndlp; 327 rmp = iocb->rmp; ··· 387 int request_nseg; 388 int reply_nseg; 389 struct bsg_job_data *dd_data; 390 + unsigned long flags; 391 uint32_t creg_val; 392 int rc = 0; 393 int iocb_stat; ··· 501 } 502 503 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 504 + 505 + if (iocb_stat == IOCB_SUCCESS) { 506 + spin_lock_irqsave(&phba->hbalock, flags); 507 + /* make sure the I/O had not been completed yet */ 508 + if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) { 509 + /* open up abort window to timeout handler */ 510 + cmdiocbq->iocb_aux_flag |= LPFC_IO_CMD_OUTSTANDING; 511 + } 512 + spin_unlock_irqrestore(&phba->hbalock, flags); 513 return 0; /* done for now */ 514 + } else if (iocb_stat == IOCB_BUSY) { 515 rc = -EAGAIN; 516 + } else { 517 rc = -EIO; 518 + } 519 520 /* iocb failed so cleanup */ 521 + job->dd_data = NULL; 522 523 free_rmp: 524 lpfc_free_bsg_buffers(phba, rmp); ··· 577 } 578 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 579 580 + /* Close the timeout handler abort window */ 581 + spin_lock_irqsave(&phba->hbalock, flags); 582 + cmdiocbq->iocb_aux_flag &= ~LPFC_IO_CMD_OUTSTANDING; 583 + spin_unlock_irqrestore(&phba->hbalock, flags); 584 + 585 rsp = &rspiocbq->iocb; 586 pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2; 587 prsp = (struct lpfc_dmabuf *)pcmd->list.next; ··· 639 struct lpfc_iocbq *cmdiocbq; 640 uint16_t rpi = 0; 641 struct bsg_job_data *dd_data; 642 + unsigned long flags; 643 uint32_t creg_val; 644 int rc = 0; 645 ··· 721 722 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 723 724 + if (rc == IOCB_SUCCESS) { 725 + spin_lock_irqsave(&phba->hbalock, flags); 726 + /* make sure the I/O had not been completed/released */ 727 + if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) { 728 + /* open up abort window to timeout handler */ 729 + cmdiocbq->iocb_aux_flag |= LPFC_IO_CMD_OUTSTANDING; 730 + } 731 + spin_unlock_irqrestore(&phba->hbalock, flags); 732 return 0; /* done for now */ 733 + } else if (rc == IOCB_BUSY) { 734 rc = -EAGAIN; 735 + } else { 736 rc = -EIO; 737 + } 738 + 739 + /* iocb failed so cleanup */ 740 + job->dd_data = NULL; 741 742 linkdown_err: 743 cmdiocbq->context1 = ndlp; 744 lpfc_els_free_iocb(phba, cmdiocbq); 745 ··· 1370 } 1371 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1372 1373 + /* Close the timeout handler abort window */ 1374 + spin_lock_irqsave(&phba->hbalock, flags); 1375 + cmdiocbq->iocb_aux_flag &= ~LPFC_IO_CMD_OUTSTANDING; 1376 + spin_unlock_irqrestore(&phba->hbalock, flags); 1377 + 1378 ndlp = dd_data->context_un.iocb.ndlp; 1379 cmp = cmdiocbq->context2; 1380 bmp = cmdiocbq->context3; ··· 1433 int rc = 0; 1434 struct lpfc_nodelist *ndlp = NULL; 1435 struct bsg_job_data *dd_data; 1436 + unsigned long flags; 1437 uint32_t creg_val; 1438 1439 /* allocate our bsg tracking structure */ ··· 1542 1543 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 1544 1545 + if (rc == IOCB_SUCCESS) { 1546 + spin_lock_irqsave(&phba->hbalock, flags); 1547 + /* make sure the I/O had not been completed/released */ 1548 + if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) { 1549 + /* open up abort window to timeout handler */ 1550 + ctiocb->iocb_aux_flag |= LPFC_IO_CMD_OUTSTANDING; 1551 + } 1552 + spin_unlock_irqrestore(&phba->hbalock, flags); 1553 return 0; /* done for now */ 1554 + } 1555 + 1556 + /* iocb failed so cleanup */ 1557 + job->dd_data = NULL; 1558 1559 issue_ct_rsp_exit: 1560 lpfc_sli_release_iocbq(phba, ctiocb); ··· 5284 * remove it from the txq queue and call cancel iocbs. 5285 * Otherwise, call abort iotag 5286 */ 5287 cmdiocb = dd_data->context_un.iocb.cmdiocbq; 5288 + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5289 + 5290 + spin_lock_irqsave(&phba->hbalock, flags); 5291 + /* make sure the I/O abort window is still open */ 5292 + if (!(cmdiocb->iocb_aux_flag & LPFC_IO_CMD_OUTSTANDING)) { 5293 + spin_unlock_irqrestore(&phba->hbalock, flags); 5294 + return -EAGAIN; 5295 + } 5296 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, 5297 list) { 5298 if (check_iocb == cmdiocb) { ··· 5296 } 5297 if (list_empty(&completions)) 5298 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 5299 + spin_unlock_irqrestore(&phba->hbalock, flags); 5300 if (!list_empty(&completions)) { 5301 lpfc_sli_cancel_iocbs(phba, &completions, 5302 IOSTAT_LOCAL_REJECT, ··· 5321 * remove it from the txq queue and call cancel iocbs. 5322 * Otherwise, call abort iotag. 5323 */ 5324 cmdiocb = dd_data->context_un.menlo.cmdiocbq; 5325 + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5326 + 5327 + spin_lock_irqsave(&phba->hbalock, flags); 5328 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, 5329 list) { 5330 if (check_iocb == cmdiocb) { ··· 5333 } 5334 if (list_empty(&completions)) 5335 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 5336 + spin_unlock_irqrestore(&phba->hbalock, flags); 5337 if (!list_empty(&completions)) { 5338 lpfc_sli_cancel_iocbs(phba, &completions, 5339 IOSTAT_LOCAL_REJECT,
+2 -1
drivers/scsi/lpfc/lpfc_sli.h
··· 77 #define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ 78 #define LPFC_FIP_ELS_ID_SHIFT 14 79 80 - uint8_t rsvd2; 81 uint32_t drvrTimeout; /* driver timeout in seconds */ 82 uint32_t fcp_wqidx; /* index to FCP work queue */ 83 struct lpfc_vport *vport;/* virtual port pointer */
··· 77 #define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ 78 #define LPFC_FIP_ELS_ID_SHIFT 14 79 80 + uint8_t iocb_aux_flag; 81 + #define LPFC_IO_CMD_OUTSTANDING 0x01 /* timeout handler abort window */ 82 uint32_t drvrTimeout; /* driver timeout in seconds */ 83 uint32_t fcp_wqidx; /* index to FCP work queue */ 84 struct lpfc_vport *vport;/* virtual port pointer */