Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: Revert "drivers/dma: remove unused support for MEMSET operations"

This reverts commit 48a9db462d99494583dad829969616ac90a8df4e.

Some platforms actually need support for the memset operations. Bring it back.

Signed-off-by: Maxime Ripard <maxime.ripard@free-electrons.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>

authored by

Maxime Ripard and committed by
Vinod Koul
4983a501 fbea28a2

+26
+2
drivers/dma/dmaengine.c
··· 832 832 !device->device_prep_dma_pq); 833 833 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && 834 834 !device->device_prep_dma_pq_val); 835 + BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && 836 + !device->device_prep_dma_memset); 835 837 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && 836 838 !device->device_prep_dma_interrupt); 837 839 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
+24
include/linux/dmaengine.h
··· 65 65 DMA_PQ, 66 66 DMA_XOR_VAL, 67 67 DMA_PQ_VAL, 68 + DMA_MEMSET, 68 69 DMA_INTERRUPT, 69 70 DMA_SG, 70 71 DMA_PRIVATE, ··· 571 570 * @copy_align: alignment shift for memcpy operations 572 571 * @xor_align: alignment shift for xor operations 573 572 * @pq_align: alignment shift for pq operations 573 + * @fill_align: alignment shift for memset operations 574 574 * @dev_id: unique device ID 575 575 * @dev: struct device reference for dma mapping api 576 576 * @src_addr_widths: bit mask of src addr widths the device supports ··· 590 588 * @device_prep_dma_xor_val: prepares a xor validation operation 591 589 * @device_prep_dma_pq: prepares a pq operation 592 590 * @device_prep_dma_pq_val: prepares a pqzero_sum operation 591 + * @device_prep_dma_memset: prepares a memset operation 593 592 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation 594 593 * @device_prep_slave_sg: prepares a slave dma operation 595 594 * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. ··· 623 620 u8 copy_align; 624 621 u8 xor_align; 625 622 u8 pq_align; 623 + u8 fill_align; 626 624 #define DMA_HAS_PQ_CONTINUE (1 << 15) 627 625 628 626 int dev_id; ··· 654 650 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, 655 651 unsigned int src_cnt, const unsigned char *scf, size_t len, 656 652 enum sum_check_flags *pqres, unsigned long flags); 653 + struct dma_async_tx_descriptor *(*device_prep_dma_memset)( 654 + struct dma_chan *chan, dma_addr_t dest, int value, size_t len, 655 + unsigned long flags); 657 656 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( 658 657 struct dma_chan *chan, unsigned long flags); 659 658 struct dma_async_tx_descriptor *(*device_prep_dma_sg)( ··· 752 745 return chan->device->device_prep_interleaved_dma(chan, xt, flags); 753 746 } 754 747 748 + static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset( 749 + struct dma_chan *chan, dma_addr_t dest, int value, size_t len, 750 + unsigned long flags) 751 + { 752 + if (!chan || !chan->device) 753 + return NULL; 754 + 755 + return chan->device->device_prep_dma_memset(chan, dest, value, 756 + len, flags); 757 + } 758 + 755 759 static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( 756 760 struct dma_chan *chan, 757 761 struct scatterlist *dst_sg, unsigned int dst_nents, ··· 836 818 size_t off2, size_t len) 837 819 { 838 820 return dmaengine_check_align(dev->pq_align, off1, off2, len); 821 + } 822 + 823 + static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1, 824 + size_t off2, size_t len) 825 + { 826 + return dmaengine_check_align(dev->fill_align, off1, off2, len); 839 827 } 840 828 841 829 static inline void