Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

scsi: lpfc: Release hbalock before calling lpfc_worker_wake_up()

lpfc_worker_wake_up() calls the lpfc_work_done() routine, which takes the
hbalock. Thus, lpfc_worker_wake_up() should not be called while holding the
hbalock to avoid potential deadlock.

Signed-off-by: Justin Tee <justin.tee@broadcom.com>
Link: https://lore.kernel.org/r/20240305200503.57317-7-justintee8345@gmail.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by

Justin Tee and committed by
Martin K. Petersen
ded20192 d11272be

+19 -20
+10 -10
drivers/scsi/lpfc/lpfc_els.c
··· 4437 4437 unsigned long flags; 4438 4438 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 4439 4439 4440 + /* Hold a node reference for outstanding queued work */ 4441 + if (!lpfc_nlp_get(ndlp)) 4442 + return; 4443 + 4440 4444 spin_lock_irqsave(&phba->hbalock, flags); 4441 4445 if (!list_empty(&evtp->evt_listp)) { 4442 4446 spin_unlock_irqrestore(&phba->hbalock, flags); 4447 + lpfc_nlp_put(ndlp); 4443 4448 return; 4444 4449 } 4445 4450 4446 - /* We need to hold the node by incrementing the reference 4447 - * count until the queued work is done 4448 - */ 4449 - evtp->evt_arg1 = lpfc_nlp_get(ndlp); 4450 - if (evtp->evt_arg1) { 4451 - evtp->evt = LPFC_EVT_ELS_RETRY; 4452 - list_add_tail(&evtp->evt_listp, &phba->work_list); 4453 - lpfc_worker_wake_up(phba); 4454 - } 4451 + evtp->evt_arg1 = ndlp; 4452 + evtp->evt = LPFC_EVT_ELS_RETRY; 4453 + list_add_tail(&evtp->evt_listp, &phba->work_list); 4455 4454 spin_unlock_irqrestore(&phba->hbalock, flags); 4456 - return; 4455 + 4456 + lpfc_worker_wake_up(phba); 4457 4457 } 4458 4458 4459 4459 /**
+2 -3
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 257 257 if (evtp->evt_arg1) { 258 258 evtp->evt = LPFC_EVT_DEV_LOSS; 259 259 list_add_tail(&evtp->evt_listp, &phba->work_list); 260 + spin_unlock_irqrestore(&phba->hbalock, iflags); 260 261 lpfc_worker_wake_up(phba); 262 + return; 261 263 } 262 264 spin_unlock_irqrestore(&phba->hbalock, iflags); 263 265 } else { ··· 277 275 lpfc_disc_state_machine(vport, ndlp, NULL, 278 276 NLP_EVT_DEVICE_RM); 279 277 } 280 - 281 278 } 282 - 283 - return; 284 279 } 285 280 286 281 /**
+7 -7
drivers/scsi/lpfc/lpfc_sli.c
··· 1217 1217 empty = list_empty(&phba->active_rrq_list); 1218 1218 list_add_tail(&rrq->list, &phba->active_rrq_list); 1219 1219 phba->hba_flag |= HBA_RRQ_ACTIVE; 1220 + spin_unlock_irqrestore(&phba->hbalock, iflags); 1220 1221 if (empty) 1221 1222 lpfc_worker_wake_up(phba); 1222 - spin_unlock_irqrestore(&phba->hbalock, iflags); 1223 1223 return 0; 1224 1224 out: 1225 1225 spin_unlock_irqrestore(&phba->hbalock, iflags); ··· 11373 11373 unsigned long iflags; 11374 11374 struct lpfc_work_evt *evtp = &ndlp->recovery_evt; 11375 11375 11376 + /* Hold a node reference for outstanding queued work */ 11377 + if (!lpfc_nlp_get(ndlp)) 11378 + return; 11379 + 11376 11380 spin_lock_irqsave(&phba->hbalock, iflags); 11377 11381 if (!list_empty(&evtp->evt_listp)) { 11378 11382 spin_unlock_irqrestore(&phba->hbalock, iflags); 11383 + lpfc_nlp_put(ndlp); 11379 11384 return; 11380 11385 } 11381 11386 11382 - /* Incrementing the reference count until the queued work is done. */ 11383 - evtp->evt_arg1 = lpfc_nlp_get(ndlp); 11384 - if (!evtp->evt_arg1) { 11385 - spin_unlock_irqrestore(&phba->hbalock, iflags); 11386 - return; 11387 - } 11387 + evtp->evt_arg1 = ndlp; 11388 11388 evtp->evt = LPFC_EVT_RECOVER_PORT; 11389 11389 list_add_tail(&evtp->evt_listp, &phba->work_list); 11390 11390 spin_unlock_irqrestore(&phba->hbalock, iflags);