Merge tag 'dmaengine-fix-4.14-rc5' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine fixes from Vinod Koul:
"Here are fixes for this round

- fix spinlock usage amd fifo response for altera driver

- fix ti crossbar race condition

- fix edma memcpy align"

* tag 'dmaengine-fix-4.14-rc5' of git://git.infradead.org/users/vkoul/slave-dma:
dmaengine: altera: fix spinlock usage
dmaengine: altera: fix response FIFO emptying
dmaengine: ti-dma-crossbar: Fix possible race condition with dma_inuse
dmaengine: edma: Align the memcpy acnt array size with the transfer

Changed files
+40 -19
drivers
+22 -15
drivers/dma/altera-msgdma.c
··· 212 static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) 213 { 214 struct msgdma_sw_desc *desc; 215 216 - spin_lock_bh(&mdev->lock); 217 desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); 218 list_del(&desc->node); 219 - spin_unlock_bh(&mdev->lock); 220 221 INIT_LIST_HEAD(&desc->tx_list); 222 ··· 307 struct msgdma_device *mdev = to_mdev(tx->chan); 308 struct msgdma_sw_desc *new; 309 dma_cookie_t cookie; 310 311 new = tx_to_desc(tx); 312 - spin_lock_bh(&mdev->lock); 313 cookie = dma_cookie_assign(tx); 314 315 list_add_tail(&new->node, &mdev->pending_list); 316 - spin_unlock_bh(&mdev->lock); 317 318 return cookie; 319 } ··· 338 struct msgdma_extended_desc *desc; 339 size_t copy; 340 u32 desc_cnt; 341 342 desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN); 343 344 - spin_lock_bh(&mdev->lock); 345 if (desc_cnt > mdev->desc_free_cnt) { 346 spin_unlock_bh(&mdev->lock); 347 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); 348 return NULL; 349 } 350 mdev->desc_free_cnt -= desc_cnt; 351 - spin_unlock_bh(&mdev->lock); 352 353 do { 354 /* Allocate and populate the descriptor */ ··· 400 u32 desc_cnt = 0, i; 401 struct scatterlist *sg; 402 u32 stride; 403 404 for_each_sg(sgl, sg, sg_len, i) 405 desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); 406 407 - spin_lock_bh(&mdev->lock); 408 if (desc_cnt > mdev->desc_free_cnt) { 409 spin_unlock_bh(&mdev->lock); 410 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); 411 return NULL; 412 } 413 mdev->desc_free_cnt -= desc_cnt; 414 - spin_unlock_bh(&mdev->lock); 415 416 avail = sg_dma_len(sgl); 417 ··· 570 static void msgdma_issue_pending(struct dma_chan *chan) 571 { 572 struct msgdma_device *mdev = to_mdev(chan); 573 574 - spin_lock_bh(&mdev->lock); 575 msgdma_start_transfer(mdev); 576 - spin_unlock_bh(&mdev->lock); 577 } 578 579 /** ··· 639 static void msgdma_free_chan_resources(struct dma_chan *dchan) 640 { 641 struct msgdma_device *mdev = to_mdev(dchan); 642 643 - spin_lock_bh(&mdev->lock); 644 msgdma_free_descriptors(mdev); 645 - spin_unlock_bh(&mdev->lock); 646 kfree(mdev->sw_desq); 647 } 648 ··· 688 u32 count; 689 u32 __maybe_unused size; 690 u32 __maybe_unused status; 691 692 - spin_lock(&mdev->lock); 693 694 /* Read number of responses that are available */ 695 count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); ··· 705 * bits. So we need to just drop these values. 706 */ 707 size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED); 708 - status = ioread32(mdev->resp - MSGDMA_RESP_STATUS); 709 710 msgdma_complete_descriptor(mdev); 711 msgdma_chan_desc_cleanup(mdev); 712 } 713 714 - spin_unlock(&mdev->lock); 715 } 716 717 /**
··· 212 static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) 213 { 214 struct msgdma_sw_desc *desc; 215 + unsigned long flags; 216 217 + spin_lock_irqsave(&mdev->lock, flags); 218 desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); 219 list_del(&desc->node); 220 + spin_unlock_irqrestore(&mdev->lock, flags); 221 222 INIT_LIST_HEAD(&desc->tx_list); 223 ··· 306 struct msgdma_device *mdev = to_mdev(tx->chan); 307 struct msgdma_sw_desc *new; 308 dma_cookie_t cookie; 309 + unsigned long flags; 310 311 new = tx_to_desc(tx); 312 + spin_lock_irqsave(&mdev->lock, flags); 313 cookie = dma_cookie_assign(tx); 314 315 list_add_tail(&new->node, &mdev->pending_list); 316 + spin_unlock_irqrestore(&mdev->lock, flags); 317 318 return cookie; 319 } ··· 336 struct msgdma_extended_desc *desc; 337 size_t copy; 338 u32 desc_cnt; 339 + unsigned long irqflags; 340 341 desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN); 342 343 + spin_lock_irqsave(&mdev->lock, irqflags); 344 if (desc_cnt > mdev->desc_free_cnt) { 345 spin_unlock_bh(&mdev->lock); 346 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); 347 return NULL; 348 } 349 mdev->desc_free_cnt -= desc_cnt; 350 + spin_unlock_irqrestore(&mdev->lock, irqflags); 351 352 do { 353 /* Allocate and populate the descriptor */ ··· 397 u32 desc_cnt = 0, i; 398 struct scatterlist *sg; 399 u32 stride; 400 + unsigned long irqflags; 401 402 for_each_sg(sgl, sg, sg_len, i) 403 desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); 404 405 + spin_lock_irqsave(&mdev->lock, irqflags); 406 if (desc_cnt > mdev->desc_free_cnt) { 407 spin_unlock_bh(&mdev->lock); 408 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); 409 return NULL; 410 } 411 mdev->desc_free_cnt -= desc_cnt; 412 + spin_unlock_irqrestore(&mdev->lock, irqflags); 413 414 avail = sg_dma_len(sgl); 415 ··· 566 static void msgdma_issue_pending(struct dma_chan *chan) 567 { 568 struct msgdma_device *mdev = to_mdev(chan); 569 + unsigned long flags; 570 571 + spin_lock_irqsave(&mdev->lock, flags); 572 msgdma_start_transfer(mdev); 573 + spin_unlock_irqrestore(&mdev->lock, flags); 574 } 575 576 /** ··· 634 static void msgdma_free_chan_resources(struct dma_chan *dchan) 635 { 636 struct msgdma_device *mdev = to_mdev(dchan); 637 + unsigned long flags; 638 639 + spin_lock_irqsave(&mdev->lock, flags); 640 msgdma_free_descriptors(mdev); 641 + spin_unlock_irqrestore(&mdev->lock, flags); 642 kfree(mdev->sw_desq); 643 } 644 ··· 682 u32 count; 683 u32 __maybe_unused size; 684 u32 __maybe_unused status; 685 + unsigned long flags; 686 687 + spin_lock_irqsave(&mdev->lock, flags); 688 689 /* Read number of responses that are available */ 690 count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); ··· 698 * bits. So we need to just drop these values. 699 */ 700 size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED); 701 + status = ioread32(mdev->resp + MSGDMA_RESP_STATUS); 702 703 msgdma_complete_descriptor(mdev); 704 msgdma_chan_desc_cleanup(mdev); 705 } 706 707 + spin_unlock_irqrestore(&mdev->lock, flags); 708 } 709 710 /**
+16 -3
drivers/dma/edma.c
··· 1143 struct edma_desc *edesc; 1144 struct device *dev = chan->device->dev; 1145 struct edma_chan *echan = to_edma_chan(chan); 1146 - unsigned int width, pset_len; 1147 1148 if (unlikely(!echan || !len)) 1149 return NULL; 1150 1151 if (len < SZ_64K) { 1152 /* ··· 1182 * When the full_length is multibple of 32767 one slot can be 1183 * used to complete the transfer. 1184 */ 1185 - width = SZ_32K - 1; 1186 pset_len = rounddown(len, width); 1187 /* One slot is enough for lengths multiple of (SZ_32K -1) */ 1188 if (unlikely(pset_len == len)) ··· 1230 } 1231 dest += pset_len; 1232 src += pset_len; 1233 - pset_len = width = len % (SZ_32K - 1); 1234 1235 ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1, 1236 width, pset_len, DMA_MEM_TO_MEM);
··· 1143 struct edma_desc *edesc; 1144 struct device *dev = chan->device->dev; 1145 struct edma_chan *echan = to_edma_chan(chan); 1146 + unsigned int width, pset_len, array_size; 1147 1148 if (unlikely(!echan || !len)) 1149 return NULL; 1150 + 1151 + /* Align the array size (acnt block) with the transfer properties */ 1152 + switch (__ffs((src | dest | len))) { 1153 + case 0: 1154 + array_size = SZ_32K - 1; 1155 + break; 1156 + case 1: 1157 + array_size = SZ_32K - 2; 1158 + break; 1159 + default: 1160 + array_size = SZ_32K - 4; 1161 + break; 1162 + } 1163 1164 if (len < SZ_64K) { 1165 /* ··· 1169 * When the full_length is multibple of 32767 one slot can be 1170 * used to complete the transfer. 1171 */ 1172 + width = array_size; 1173 pset_len = rounddown(len, width); 1174 /* One slot is enough for lengths multiple of (SZ_32K -1) */ 1175 if (unlikely(pset_len == len)) ··· 1217 } 1218 dest += pset_len; 1219 src += pset_len; 1220 + pset_len = width = len % array_size; 1221 1222 ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1, 1223 width, pset_len, DMA_MEM_TO_MEM);
+2 -1
drivers/dma/ti-dma-crossbar.c
··· 262 mutex_lock(&xbar->mutex); 263 map->xbar_out = find_first_zero_bit(xbar->dma_inuse, 264 xbar->dma_requests); 265 - mutex_unlock(&xbar->mutex); 266 if (map->xbar_out == xbar->dma_requests) { 267 dev_err(&pdev->dev, "Run out of free DMA requests\n"); 268 kfree(map); 269 return ERR_PTR(-ENOMEM); 270 } 271 set_bit(map->xbar_out, xbar->dma_inuse); 272 273 map->xbar_in = (u16)dma_spec->args[0]; 274
··· 262 mutex_lock(&xbar->mutex); 263 map->xbar_out = find_first_zero_bit(xbar->dma_inuse, 264 xbar->dma_requests); 265 if (map->xbar_out == xbar->dma_requests) { 266 + mutex_unlock(&xbar->mutex); 267 dev_err(&pdev->dev, "Run out of free DMA requests\n"); 268 kfree(map); 269 return ERR_PTR(-ENOMEM); 270 } 271 set_bit(map->xbar_out, xbar->dma_inuse); 272 + mutex_unlock(&xbar->mutex); 273 274 map->xbar_in = (u16)dma_spec->args[0]; 275