Merge tag 'dmaengine-fix-4.14-rc5' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine fixes from Vinod Koul:
"Here are fixes for this round

- fix spinlock usage amd fifo response for altera driver

- fix ti crossbar race condition

- fix edma memcpy align"

* tag 'dmaengine-fix-4.14-rc5' of git://git.infradead.org/users/vkoul/slave-dma:
dmaengine: altera: fix spinlock usage
dmaengine: altera: fix response FIFO emptying
dmaengine: ti-dma-crossbar: Fix possible race condition with dma_inuse
dmaengine: edma: Align the memcpy acnt array size with the transfer

Changed files
+40 -19
drivers
+22 -15
drivers/dma/altera-msgdma.c
··· 212 212 static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) 213 213 { 214 214 struct msgdma_sw_desc *desc; 215 + unsigned long flags; 215 216 216 - spin_lock_bh(&mdev->lock); 217 + spin_lock_irqsave(&mdev->lock, flags); 217 218 desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); 218 219 list_del(&desc->node); 219 - spin_unlock_bh(&mdev->lock); 220 + spin_unlock_irqrestore(&mdev->lock, flags); 220 221 221 222 INIT_LIST_HEAD(&desc->tx_list); 222 223 ··· 307 306 struct msgdma_device *mdev = to_mdev(tx->chan); 308 307 struct msgdma_sw_desc *new; 309 308 dma_cookie_t cookie; 309 + unsigned long flags; 310 310 311 311 new = tx_to_desc(tx); 312 - spin_lock_bh(&mdev->lock); 312 + spin_lock_irqsave(&mdev->lock, flags); 313 313 cookie = dma_cookie_assign(tx); 314 314 315 315 list_add_tail(&new->node, &mdev->pending_list); 316 - spin_unlock_bh(&mdev->lock); 316 + spin_unlock_irqrestore(&mdev->lock, flags); 317 317 318 318 return cookie; 319 319 } ··· 338 336 struct msgdma_extended_desc *desc; 339 337 size_t copy; 340 338 u32 desc_cnt; 339 + unsigned long irqflags; 341 340 342 341 desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN); 343 342 344 - spin_lock_bh(&mdev->lock); 343 + spin_lock_irqsave(&mdev->lock, irqflags); 345 344 if (desc_cnt > mdev->desc_free_cnt) { 346 345 spin_unlock_bh(&mdev->lock); 347 346 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); 348 347 return NULL; 349 348 } 350 349 mdev->desc_free_cnt -= desc_cnt; 351 - spin_unlock_bh(&mdev->lock); 350 + spin_unlock_irqrestore(&mdev->lock, irqflags); 352 351 353 352 do { 354 353 /* Allocate and populate the descriptor */ ··· 400 397 u32 desc_cnt = 0, i; 401 398 struct scatterlist *sg; 402 399 u32 stride; 400 + unsigned long irqflags; 403 401 404 402 for_each_sg(sgl, sg, sg_len, i) 405 403 desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); 406 404 407 - spin_lock_bh(&mdev->lock); 405 + spin_lock_irqsave(&mdev->lock, irqflags); 408 406 if (desc_cnt > mdev->desc_free_cnt) { 409 407 spin_unlock_bh(&mdev->lock); 410 408 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); 411 409 return NULL; 412 410 } 413 411 mdev->desc_free_cnt -= desc_cnt; 414 - spin_unlock_bh(&mdev->lock); 412 + spin_unlock_irqrestore(&mdev->lock, irqflags); 415 413 416 414 avail = sg_dma_len(sgl); 417 415 ··· 570 566 static void msgdma_issue_pending(struct dma_chan *chan) 571 567 { 572 568 struct msgdma_device *mdev = to_mdev(chan); 569 + unsigned long flags; 573 570 574 - spin_lock_bh(&mdev->lock); 571 + spin_lock_irqsave(&mdev->lock, flags); 575 572 msgdma_start_transfer(mdev); 576 - spin_unlock_bh(&mdev->lock); 573 + spin_unlock_irqrestore(&mdev->lock, flags); 577 574 } 578 575 579 576 /** ··· 639 634 static void msgdma_free_chan_resources(struct dma_chan *dchan) 640 635 { 641 636 struct msgdma_device *mdev = to_mdev(dchan); 637 + unsigned long flags; 642 638 643 - spin_lock_bh(&mdev->lock); 639 + spin_lock_irqsave(&mdev->lock, flags); 644 640 msgdma_free_descriptors(mdev); 645 - spin_unlock_bh(&mdev->lock); 641 + spin_unlock_irqrestore(&mdev->lock, flags); 646 642 kfree(mdev->sw_desq); 647 643 } 648 644 ··· 688 682 u32 count; 689 683 u32 __maybe_unused size; 690 684 u32 __maybe_unused status; 685 + unsigned long flags; 691 686 692 - spin_lock(&mdev->lock); 687 + spin_lock_irqsave(&mdev->lock, flags); 693 688 694 689 /* Read number of responses that are available */ 695 690 count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); ··· 705 698 * bits. So we need to just drop these values. 706 699 */ 707 700 size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED); 708 - status = ioread32(mdev->resp - MSGDMA_RESP_STATUS); 701 + status = ioread32(mdev->resp + MSGDMA_RESP_STATUS); 709 702 710 703 msgdma_complete_descriptor(mdev); 711 704 msgdma_chan_desc_cleanup(mdev); 712 705 } 713 706 714 - spin_unlock(&mdev->lock); 707 + spin_unlock_irqrestore(&mdev->lock, flags); 715 708 } 716 709 717 710 /**
+16 -3
drivers/dma/edma.c
··· 1143 1143 struct edma_desc *edesc; 1144 1144 struct device *dev = chan->device->dev; 1145 1145 struct edma_chan *echan = to_edma_chan(chan); 1146 - unsigned int width, pset_len; 1146 + unsigned int width, pset_len, array_size; 1147 1147 1148 1148 if (unlikely(!echan || !len)) 1149 1149 return NULL; 1150 + 1151 + /* Align the array size (acnt block) with the transfer properties */ 1152 + switch (__ffs((src | dest | len))) { 1153 + case 0: 1154 + array_size = SZ_32K - 1; 1155 + break; 1156 + case 1: 1157 + array_size = SZ_32K - 2; 1158 + break; 1159 + default: 1160 + array_size = SZ_32K - 4; 1161 + break; 1162 + } 1150 1163 1151 1164 if (len < SZ_64K) { 1152 1165 /* ··· 1182 1169 * When the full_length is multibple of 32767 one slot can be 1183 1170 * used to complete the transfer. 1184 1171 */ 1185 - width = SZ_32K - 1; 1172 + width = array_size; 1186 1173 pset_len = rounddown(len, width); 1187 1174 /* One slot is enough for lengths multiple of (SZ_32K -1) */ 1188 1175 if (unlikely(pset_len == len)) ··· 1230 1217 } 1231 1218 dest += pset_len; 1232 1219 src += pset_len; 1233 - pset_len = width = len % (SZ_32K - 1); 1220 + pset_len = width = len % array_size; 1234 1221 1235 1222 ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1, 1236 1223 width, pset_len, DMA_MEM_TO_MEM);
+2 -1
drivers/dma/ti-dma-crossbar.c
··· 262 262 mutex_lock(&xbar->mutex); 263 263 map->xbar_out = find_first_zero_bit(xbar->dma_inuse, 264 264 xbar->dma_requests); 265 - mutex_unlock(&xbar->mutex); 266 265 if (map->xbar_out == xbar->dma_requests) { 266 + mutex_unlock(&xbar->mutex); 267 267 dev_err(&pdev->dev, "Run out of free DMA requests\n"); 268 268 kfree(map); 269 269 return ERR_PTR(-ENOMEM); 270 270 } 271 271 set_bit(map->xbar_out, xbar->dma_inuse); 272 + mutex_unlock(&xbar->mutex); 272 273 273 274 map->xbar_in = (u16)dma_spec->args[0]; 274 275