+21
-14
drivers/dma/altera-msgdma.c
+21
-14
drivers/dma/altera-msgdma.c
···
212
212
static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
213
213
{
214
214
struct msgdma_sw_desc *desc;
215
+
unsigned long flags;
215
216
216
-
spin_lock_bh(&mdev->lock);
217
+
spin_lock_irqsave(&mdev->lock, flags);
217
218
desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
218
219
list_del(&desc->node);
219
-
spin_unlock_bh(&mdev->lock);
220
+
spin_unlock_irqrestore(&mdev->lock, flags);
220
221
221
222
INIT_LIST_HEAD(&desc->tx_list);
222
223
···
307
306
struct msgdma_device *mdev = to_mdev(tx->chan);
308
307
struct msgdma_sw_desc *new;
309
308
dma_cookie_t cookie;
309
+
unsigned long flags;
310
310
311
311
new = tx_to_desc(tx);
312
-
spin_lock_bh(&mdev->lock);
312
+
spin_lock_irqsave(&mdev->lock, flags);
313
313
cookie = dma_cookie_assign(tx);
314
314
315
315
list_add_tail(&new->node, &mdev->pending_list);
316
-
spin_unlock_bh(&mdev->lock);
316
+
spin_unlock_irqrestore(&mdev->lock, flags);
317
317
318
318
return cookie;
319
319
}
···
338
336
struct msgdma_extended_desc *desc;
339
337
size_t copy;
340
338
u32 desc_cnt;
339
+
unsigned long irqflags;
341
340
342
341
desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
343
342
344
-
spin_lock_bh(&mdev->lock);
343
+
spin_lock_irqsave(&mdev->lock, irqflags);
345
344
if (desc_cnt > mdev->desc_free_cnt) {
346
345
spin_unlock_bh(&mdev->lock);
347
346
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
348
347
return NULL;
349
348
}
350
349
mdev->desc_free_cnt -= desc_cnt;
351
-
spin_unlock_bh(&mdev->lock);
350
+
spin_unlock_irqrestore(&mdev->lock, irqflags);
352
351
353
352
do {
354
353
/* Allocate and populate the descriptor */
···
400
397
u32 desc_cnt = 0, i;
401
398
struct scatterlist *sg;
402
399
u32 stride;
400
+
unsigned long irqflags;
403
401
404
402
for_each_sg(sgl, sg, sg_len, i)
405
403
desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
406
404
407
-
spin_lock_bh(&mdev->lock);
405
+
spin_lock_irqsave(&mdev->lock, irqflags);
408
406
if (desc_cnt > mdev->desc_free_cnt) {
409
407
spin_unlock_bh(&mdev->lock);
410
408
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
411
409
return NULL;
412
410
}
413
411
mdev->desc_free_cnt -= desc_cnt;
414
-
spin_unlock_bh(&mdev->lock);
412
+
spin_unlock_irqrestore(&mdev->lock, irqflags);
415
413
416
414
avail = sg_dma_len(sgl);
417
415
···
570
566
static void msgdma_issue_pending(struct dma_chan *chan)
571
567
{
572
568
struct msgdma_device *mdev = to_mdev(chan);
569
+
unsigned long flags;
573
570
574
-
spin_lock_bh(&mdev->lock);
571
+
spin_lock_irqsave(&mdev->lock, flags);
575
572
msgdma_start_transfer(mdev);
576
-
spin_unlock_bh(&mdev->lock);
573
+
spin_unlock_irqrestore(&mdev->lock, flags);
577
574
}
578
575
579
576
/**
···
639
634
static void msgdma_free_chan_resources(struct dma_chan *dchan)
640
635
{
641
636
struct msgdma_device *mdev = to_mdev(dchan);
637
+
unsigned long flags;
642
638
643
-
spin_lock_bh(&mdev->lock);
639
+
spin_lock_irqsave(&mdev->lock, flags);
644
640
msgdma_free_descriptors(mdev);
645
-
spin_unlock_bh(&mdev->lock);
641
+
spin_unlock_irqrestore(&mdev->lock, flags);
646
642
kfree(mdev->sw_desq);
647
643
}
648
644
···
688
682
u32 count;
689
683
u32 __maybe_unused size;
690
684
u32 __maybe_unused status;
685
+
unsigned long flags;
691
686
692
-
spin_lock(&mdev->lock);
687
+
spin_lock_irqsave(&mdev->lock, flags);
693
688
694
689
/* Read number of responses that are available */
695
690
count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
···
711
704
msgdma_chan_desc_cleanup(mdev);
712
705
}
713
706
714
-
spin_unlock(&mdev->lock);
707
+
spin_unlock_irqrestore(&mdev->lock, flags);
715
708
}
716
709
717
710
/**