Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

async_tx: export async_tx_quiesce

Replace open coded "wait and acknowledge" instances with async_tx_quiesce.

Signed-off-by: Dan Williams <dan.j.williams@intel.com>

+26 -62
+1 -9
crypto/async_tx/async_memcpy.c
··· 73 73 pr_debug("%s: (sync) len: %zu\n", __func__, len); 74 74 75 75 /* wait for any prerequisite operations */ 76 - if (depend_tx) { 77 - /* if ack is already set then we cannot be sure 78 - * we are referring to the correct operation 79 - */ 80 - BUG_ON(async_tx_test_ack(depend_tx)); 81 - if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) 82 - panic("%s: DMA_ERROR waiting for depend_tx\n", 83 - __func__); 84 - } 76 + async_tx_quiesce(&depend_tx); 85 77 86 78 dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; 87 79 src_buf = kmap_atomic(src, KM_USER1) + src_offset;
+1 -9
crypto/async_tx/async_memset.c
··· 72 72 dest_buf = (void *) (((char *) page_address(dest)) + offset); 73 73 74 74 /* wait for any prerequisite operations */ 75 - if (depend_tx) { 76 - /* if ack is already set then we cannot be sure 77 - * we are referring to the correct operation 78 - */ 79 - BUG_ON(async_tx_test_ack(depend_tx)); 80 - if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) 81 - panic("%s: DMA_ERROR waiting for depend_tx\n", 82 - __func__); 83 - } 75 + async_tx_quiesce(&depend_tx); 84 76 85 77 memset(dest_buf, val, len); 86 78
+20 -9
crypto/async_tx/async_tx.c
··· 607 607 pr_debug("%s: (sync)\n", __func__); 608 608 609 609 /* wait for any prerequisite operations */ 610 - if (depend_tx) { 611 - /* if ack is already set then we cannot be sure 612 - * we are referring to the correct operation 613 - */ 614 - BUG_ON(async_tx_test_ack(depend_tx)); 615 - if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) 616 - panic("%s: DMA_ERROR waiting for depend_tx\n", 617 - __func__); 618 - } 610 + async_tx_quiesce(&depend_tx); 619 611 620 612 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); 621 613 } ··· 615 623 return tx; 616 624 } 617 625 EXPORT_SYMBOL_GPL(async_trigger_callback); 626 + 627 + /** 628 + * async_tx_quiesce - ensure tx is complete and freeable upon return 629 + * @tx - transaction to quiesce 630 + */ 631 + void async_tx_quiesce(struct dma_async_tx_descriptor **tx) 632 + { 633 + if (*tx) { 634 + /* if ack is already set then we cannot be sure 635 + * we are referring to the correct operation 636 + */ 637 + BUG_ON(async_tx_test_ack(*tx)); 638 + if (dma_wait_for_async_tx(*tx) == DMA_ERROR) 639 + panic("DMA_ERROR waiting for transaction\n"); 640 + async_tx_ack(*tx); 641 + *tx = NULL; 642 + } 643 + } 644 + EXPORT_SYMBOL_GPL(async_tx_quiesce); 618 645 619 646 module_init(async_tx_init); 620 647 module_exit(async_tx_exit);
+2 -35
crypto/async_tx/async_xor.c
··· 30 30 #include <linux/raid/xor.h> 31 31 #include <linux/async_tx.h> 32 32 33 - /** 34 - * async_tx_quiesce - ensure tx is complete and freeable upon return 35 - * @tx - transaction to quiesce 36 - */ 37 - static void async_tx_quiesce(struct dma_async_tx_descriptor **tx) 38 - { 39 - if (*tx) { 40 - /* if ack is already set then we cannot be sure 41 - * we are referring to the correct operation 42 - */ 43 - BUG_ON(async_tx_test_ack(*tx)); 44 - if (dma_wait_for_async_tx(*tx) == DMA_ERROR) 45 - panic("DMA_ERROR waiting for transaction\n"); 46 - async_tx_ack(*tx); 47 - *tx = NULL; 48 - } 49 - } 50 - 51 33 /* do_async_xor - dma map the pages and perform the xor with an engine. 52 34 * This routine is marked __always_inline so it can be compiled away 53 35 * when CONFIG_DMA_ENGINE=n ··· 201 219 } 202 220 203 221 /* wait for any prerequisite operations */ 204 - if (depend_tx) { 205 - /* if ack is already set then we cannot be sure 206 - * we are referring to the correct operation 207 - */ 208 - BUG_ON(async_tx_test_ack(depend_tx)); 209 - if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) 210 - panic("%s: DMA_ERROR waiting for depend_tx\n", 211 - __func__); 212 - } 222 + async_tx_quiesce(&depend_tx); 213 223 214 224 do_sync_xor(dest, src_list, offset, src_cnt, len, 215 225 flags, depend_tx, cb_fn, cb_param); ··· 283 309 tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags, 284 310 depend_tx, NULL, NULL); 285 311 286 - if (tx) { 287 - if (dma_wait_for_async_tx(tx) == DMA_ERROR) 288 - panic("%s: DMA_ERROR waiting for tx\n", 289 - __func__); 290 - async_tx_ack(tx); 291 - } 312 + async_tx_quiesce(&tx); 292 313 293 314 *result = page_is_zero(dest, offset, len) ? 0 : 1; 294 - 295 - tx = NULL; 296 315 297 316 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); 298 317 }
+2
include/linux/async_tx.h
··· 152 152 async_trigger_callback(enum async_tx_flags flags, 153 153 struct dma_async_tx_descriptor *depend_tx, 154 154 dma_async_tx_callback cb_fn, void *cb_fn_param); 155 + 156 + void async_tx_quiesce(struct dma_async_tx_descriptor **tx); 155 157 #endif /* _ASYNC_TX_H_ */