dmaengine: altera: fix spinlock usage

Since this lock is acquired in both process and IRQ context, failing to
to disable IRQs when trying to acquire the lock in process context can
lead to deadlocks.

Signed-off-by: Sylvain Lesne <lesne@alse-fr.com>
Reviewed-by: Stefan Roese <sr@denx.de>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>

authored by Sylvain Lesne and committed by Vinod Koul edf10919 d9ec4641

+21 -14
+21 -14
drivers/dma/altera-msgdma.c
··· 212 static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) 213 { 214 struct msgdma_sw_desc *desc; 215 216 - spin_lock_bh(&mdev->lock); 217 desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); 218 list_del(&desc->node); 219 - spin_unlock_bh(&mdev->lock); 220 221 INIT_LIST_HEAD(&desc->tx_list); 222 ··· 307 struct msgdma_device *mdev = to_mdev(tx->chan); 308 struct msgdma_sw_desc *new; 309 dma_cookie_t cookie; 310 311 new = tx_to_desc(tx); 312 - spin_lock_bh(&mdev->lock); 313 cookie = dma_cookie_assign(tx); 314 315 list_add_tail(&new->node, &mdev->pending_list); 316 - spin_unlock_bh(&mdev->lock); 317 318 return cookie; 319 } ··· 338 struct msgdma_extended_desc *desc; 339 size_t copy; 340 u32 desc_cnt; 341 342 desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN); 343 344 - spin_lock_bh(&mdev->lock); 345 if (desc_cnt > mdev->desc_free_cnt) { 346 spin_unlock_bh(&mdev->lock); 347 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); 348 return NULL; 349 } 350 mdev->desc_free_cnt -= desc_cnt; 351 - spin_unlock_bh(&mdev->lock); 352 353 do { 354 /* Allocate and populate the descriptor */ ··· 400 u32 desc_cnt = 0, i; 401 struct scatterlist *sg; 402 u32 stride; 403 404 for_each_sg(sgl, sg, sg_len, i) 405 desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); 406 407 - spin_lock_bh(&mdev->lock); 408 if (desc_cnt > mdev->desc_free_cnt) { 409 spin_unlock_bh(&mdev->lock); 410 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); 411 return NULL; 412 } 413 mdev->desc_free_cnt -= desc_cnt; 414 - spin_unlock_bh(&mdev->lock); 415 416 avail = sg_dma_len(sgl); 417 ··· 570 static void msgdma_issue_pending(struct dma_chan *chan) 571 { 572 struct msgdma_device *mdev = to_mdev(chan); 573 574 - spin_lock_bh(&mdev->lock); 575 msgdma_start_transfer(mdev); 576 - spin_unlock_bh(&mdev->lock); 577 } 578 579 /** ··· 639 static void msgdma_free_chan_resources(struct dma_chan *dchan) 640 { 641 struct msgdma_device *mdev = to_mdev(dchan); 642 643 - spin_lock_bh(&mdev->lock); 644 msgdma_free_descriptors(mdev); 645 - spin_unlock_bh(&mdev->lock); 646 kfree(mdev->sw_desq); 647 } 648 ··· 688 u32 count; 689 u32 __maybe_unused size; 690 u32 __maybe_unused status; 691 692 - spin_lock(&mdev->lock); 693 694 /* Read number of responses that are available */ 695 count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); ··· 711 msgdma_chan_desc_cleanup(mdev); 712 } 713 714 - spin_unlock(&mdev->lock); 715 } 716 717 /**
··· 212 static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) 213 { 214 struct msgdma_sw_desc *desc; 215 + unsigned long flags; 216 217 + spin_lock_irqsave(&mdev->lock, flags); 218 desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); 219 list_del(&desc->node); 220 + spin_unlock_irqrestore(&mdev->lock, flags); 221 222 INIT_LIST_HEAD(&desc->tx_list); 223 ··· 306 struct msgdma_device *mdev = to_mdev(tx->chan); 307 struct msgdma_sw_desc *new; 308 dma_cookie_t cookie; 309 + unsigned long flags; 310 311 new = tx_to_desc(tx); 312 + spin_lock_irqsave(&mdev->lock, flags); 313 cookie = dma_cookie_assign(tx); 314 315 list_add_tail(&new->node, &mdev->pending_list); 316 + spin_unlock_irqrestore(&mdev->lock, flags); 317 318 return cookie; 319 } ··· 336 struct msgdma_extended_desc *desc; 337 size_t copy; 338 u32 desc_cnt; 339 + unsigned long irqflags; 340 341 desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN); 342 343 + spin_lock_irqsave(&mdev->lock, irqflags); 344 if (desc_cnt > mdev->desc_free_cnt) { 345 spin_unlock_bh(&mdev->lock); 346 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); 347 return NULL; 348 } 349 mdev->desc_free_cnt -= desc_cnt; 350 + spin_unlock_irqrestore(&mdev->lock, irqflags); 351 352 do { 353 /* Allocate and populate the descriptor */ ··· 397 u32 desc_cnt = 0, i; 398 struct scatterlist *sg; 399 u32 stride; 400 + unsigned long irqflags; 401 402 for_each_sg(sgl, sg, sg_len, i) 403 desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); 404 405 + spin_lock_irqsave(&mdev->lock, irqflags); 406 if (desc_cnt > mdev->desc_free_cnt) { 407 spin_unlock_bh(&mdev->lock); 408 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); 409 return NULL; 410 } 411 mdev->desc_free_cnt -= desc_cnt; 412 + spin_unlock_irqrestore(&mdev->lock, irqflags); 413 414 avail = sg_dma_len(sgl); 415 ··· 566 static void msgdma_issue_pending(struct dma_chan *chan) 567 { 568 struct msgdma_device *mdev = to_mdev(chan); 569 + unsigned long flags; 570 571 + spin_lock_irqsave(&mdev->lock, flags); 572 msgdma_start_transfer(mdev); 573 + spin_unlock_irqrestore(&mdev->lock, flags); 574 } 575 576 /** ··· 634 static void msgdma_free_chan_resources(struct dma_chan *dchan) 635 { 636 struct msgdma_device *mdev = to_mdev(dchan); 637 + unsigned long flags; 638 639 + spin_lock_irqsave(&mdev->lock, flags); 640 msgdma_free_descriptors(mdev); 641 + spin_unlock_irqrestore(&mdev->lock, flags); 642 kfree(mdev->sw_desq); 643 } 644 ··· 682 u32 count; 683 u32 __maybe_unused size; 684 u32 __maybe_unused status; 685 + unsigned long flags; 686 687 + spin_lock_irqsave(&mdev->lock, flags); 688 689 /* Read number of responses that are available */ 690 count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); ··· 704 msgdma_chan_desc_cleanup(mdev); 705 } 706 707 + spin_unlock_irqrestore(&mdev->lock, flags); 708 } 709 710 /**