Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

async_tx: fix handling of the "out of descriptor" condition in async_xor

Ensure forward progress is made when a dmaengine driver is unable to
allocate an xor descriptor by breaking the dependency chain with
async_tx_quisce() and issue any pending descriptors.

Tested with iop-adma by setting device->max_xor = 2 to force multiple
calls to device_prep_dma_xor for each call to async_xor and limiting the
descriptor slot pool to 5. Discovered that the minimum descriptor pool
size for iop-adma is 2 * iop_chan_xor_slot_cnt(device->max_xor) + 1.

Signed-off-by: Dan Williams <dan.j.williams@intel.com>

+26 -6
+26 -6
crypto/async_tx/async_xor.c
··· 30 30 #include <linux/raid/xor.h> 31 31 #include <linux/async_tx.h> 32 32 33 + /** 34 + * async_tx_quiesce - ensure tx is complete and freeable upon return 35 + * @tx - transaction to quiesce 36 + */ 37 + static void async_tx_quiesce(struct dma_async_tx_descriptor **tx) 38 + { 39 + if (*tx) { 40 + /* if ack is already set then we cannot be sure 41 + * we are referring to the correct operation 42 + */ 43 + BUG_ON(async_tx_test_ack(*tx)); 44 + if (dma_wait_for_async_tx(*tx) == DMA_ERROR) 45 + panic("DMA_ERROR waiting for transaction\n"); 46 + async_tx_ack(*tx); 47 + *tx = NULL; 48 + } 49 + } 50 + 33 51 /* do_async_xor - dma map the pages and perform the xor with an engine. 34 52 * This routine is marked __always_inline so it can be compiled away 35 53 * when CONFIG_DMA_ENGINE=n ··· 103 85 tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off], 104 86 xor_src_cnt, len, dma_flags); 105 87 106 - if (unlikely(!tx && depend_tx)) 107 - dma_wait_for_async_tx(depend_tx); 88 + if (unlikely(!tx)) 89 + async_tx_quiesce(&depend_tx); 108 90 109 91 /* spin wait for the preceeding transactions to complete */ 110 - while (unlikely(!tx)) 92 + while (unlikely(!tx)) { 93 + dma_async_issue_pending(chan); 111 94 tx = dma->device_prep_dma_xor(chan, dma_dest, 112 95 &dma_src[src_off], 113 96 xor_src_cnt, len, 114 97 dma_flags); 98 + } 115 99 116 100 async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn, 117 101 _cb_param); ··· 287 267 tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt, 288 268 len, result, 289 269 dma_prep_flags); 290 - if (!tx) { 291 - if (depend_tx) 292 - dma_wait_for_async_tx(depend_tx); 270 + if (unlikely(!tx)) { 271 + async_tx_quiesce(&depend_tx); 293 272 294 273 while (!tx) 274 + dma_async_issue_pending(chan); 295 275 tx = device->device_prep_dma_zero_sum(chan, 296 276 dma_src, src_cnt, len, result, 297 277 dma_prep_flags);