Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ALSA: memalloc: Unify x86 SG-buffer handling (take#3)

This is a second attempt to unify the x86-specific SG-buffer handling
code with the new standard non-contiguous page handler.

The first try (in commit 2d9ea39917a4) failed due to the wrong page
and address calculations, hence reverted. (And the second try failed
due to a copy&paste error.) Now it's corrected with the previous fix
for noncontig pages, and the proper sg page iteration by this patch.

After the migration, SNDRV_DMA_TYPE_DMA_SG becomes identical with
SNDRV_DMA_TYPE_NONCONTIG on x86, while others still fall back to
SNDRV_DMA_TYPE_DEV.

Tested-by: Alex Xu (Hello71) <alex_y_xu@yahoo.ca>
Tested-by: Harald Arnesen <harald@skogtun.org>
Link: https://lore.kernel.org/r/20211017074859.24112-4-tiwai@suse.de
Link: https://lore.kernel.org/r/20211109062235.22310-1-tiwai@suse.de
Link: https://lore.kernel.org/r/20211116073358.19741-1-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>

+56 -213
+7 -7
include/sound/memalloc.h
··· 36 36 #define SNDRV_DMA_TYPE_CONTINUOUS 1 /* continuous no-DMA memory */ 37 37 #define SNDRV_DMA_TYPE_DEV 2 /* generic device continuous */ 38 38 #define SNDRV_DMA_TYPE_DEV_WC 5 /* continuous write-combined */ 39 - #ifdef CONFIG_SND_DMA_SGBUF 40 - #define SNDRV_DMA_TYPE_DEV_SG 3 /* generic device SG-buffer */ 41 - #define SNDRV_DMA_TYPE_DEV_WC_SG 6 /* SG write-combined */ 42 - #else 43 - #define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */ 44 - #define SNDRV_DMA_TYPE_DEV_WC_SG SNDRV_DMA_TYPE_DEV_WC 45 - #endif 46 39 #ifdef CONFIG_GENERIC_ALLOCATOR 47 40 #define SNDRV_DMA_TYPE_DEV_IRAM 4 /* generic device iram-buffer */ 48 41 #else ··· 44 51 #define SNDRV_DMA_TYPE_VMALLOC 7 /* vmalloc'ed buffer */ 45 52 #define SNDRV_DMA_TYPE_NONCONTIG 8 /* non-coherent SG buffer */ 46 53 #define SNDRV_DMA_TYPE_NONCOHERENT 9 /* non-coherent buffer */ 54 + #ifdef CONFIG_SND_DMA_SGBUF 55 + #define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_NONCONTIG 56 + #define SNDRV_DMA_TYPE_DEV_WC_SG 6 /* SG write-combined */ 57 + #else 58 + #define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */ 59 + #define SNDRV_DMA_TYPE_DEV_WC_SG SNDRV_DMA_TYPE_DEV_WC 60 + #endif 47 61 48 62 /* 49 63 * info for buffer allocation
-1
sound/core/Makefile
··· 19 19 snd-pcm-y := pcm.o pcm_native.o pcm_lib.o pcm_misc.o \ 20 20 pcm_memory.o memalloc.o 21 21 snd-pcm-$(CONFIG_SND_PCM_TIMER) += pcm_timer.o 22 - snd-pcm-$(CONFIG_SND_DMA_SGBUF) += sgbuf.o 23 22 snd-pcm-$(CONFIG_SND_PCM_ELD) += pcm_drm_eld.o 24 23 snd-pcm-$(CONFIG_SND_PCM_IEC958) += pcm_iec958.o 25 24
+49 -4
sound/core/memalloc.c
··· 620 620 .get_chunk_size = snd_dma_noncontig_get_chunk_size, 621 621 }; 622 622 623 + /* x86-specific SG-buffer with WC pages */ 624 + #ifdef CONFIG_SND_DMA_SGBUF 625 + #define sg_wc_address(it) ((unsigned long)page_address(sg_page_iter_page(it))) 626 + 627 + static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size) 628 + { 629 + void *p = snd_dma_noncontig_alloc(dmab, size); 630 + struct sg_table *sgt = dmab->private_data; 631 + struct sg_page_iter iter; 632 + 633 + if (!p) 634 + return NULL; 635 + for_each_sgtable_page(sgt, &iter, 0) 636 + set_memory_wc(sg_wc_address(&iter), 1); 637 + return p; 638 + } 639 + 640 + static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab) 641 + { 642 + struct sg_table *sgt = dmab->private_data; 643 + struct sg_page_iter iter; 644 + 645 + for_each_sgtable_page(sgt, &iter, 0) 646 + set_memory_wb(sg_wc_address(&iter), 1); 647 + snd_dma_noncontig_free(dmab); 648 + } 649 + 650 + static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab, 651 + struct vm_area_struct *area) 652 + { 653 + area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); 654 + return dma_mmap_noncontiguous(dmab->dev.dev, area, 655 + dmab->bytes, dmab->private_data); 656 + } 657 + 658 + static const struct snd_malloc_ops snd_dma_sg_wc_ops = { 659 + .alloc = snd_dma_sg_wc_alloc, 660 + .free = snd_dma_sg_wc_free, 661 + .mmap = snd_dma_sg_wc_mmap, 662 + .sync = snd_dma_noncontig_sync, 663 + .get_addr = snd_dma_noncontig_get_addr, 664 + .get_page = snd_dma_noncontig_get_page, 665 + .get_chunk_size = snd_dma_noncontig_get_chunk_size, 666 + }; 667 + #endif /* CONFIG_SND_DMA_SGBUF */ 668 + 623 669 /* 624 670 * Non-coherent pages allocator 625 671 */ ··· 725 679 [SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops, 726 680 [SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops, 727 681 [SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops, 682 + #ifdef CONFIG_SND_DMA_SGBUF 683 + [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops, 684 + #endif 728 685 #ifdef CONFIG_GENERIC_ALLOCATOR 729 686 [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops, 730 687 #endif /* CONFIG_GENERIC_ALLOCATOR */ 731 688 #endif /* CONFIG_HAS_DMA */ 732 - #ifdef CONFIG_SND_DMA_SGBUF 733 - [SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops, 734 - [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_ops, 735 - #endif 736 689 }; 737 690 738 691 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
-201
sound/core/sgbuf.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-or-later 2 - /* 3 - * Scatter-Gather buffer 4 - * 5 - * Copyright (c) by Takashi Iwai <tiwai@suse.de> 6 - */ 7 - 8 - #include <linux/slab.h> 9 - #include <linux/mm.h> 10 - #include <linux/vmalloc.h> 11 - #include <linux/export.h> 12 - #include <sound/memalloc.h> 13 - #include "memalloc_local.h" 14 - 15 - struct snd_sg_page { 16 - void *buf; 17 - dma_addr_t addr; 18 - }; 19 - 20 - struct snd_sg_buf { 21 - int size; /* allocated byte size */ 22 - int pages; /* allocated pages */ 23 - int tblsize; /* allocated table size */ 24 - struct snd_sg_page *table; /* address table */ 25 - struct page **page_table; /* page table (for vmap/vunmap) */ 26 - struct device *dev; 27 - }; 28 - 29 - /* table entries are align to 32 */ 30 - #define SGBUF_TBL_ALIGN 32 31 - #define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN) 32 - 33 - static void snd_dma_sg_free(struct snd_dma_buffer *dmab) 34 - { 35 - struct snd_sg_buf *sgbuf = dmab->private_data; 36 - struct snd_dma_buffer tmpb; 37 - int i; 38 - 39 - if (!sgbuf) 40 - return; 41 - 42 - vunmap(dmab->area); 43 - dmab->area = NULL; 44 - 45 - tmpb.dev.type = SNDRV_DMA_TYPE_DEV; 46 - if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) 47 - tmpb.dev.type = SNDRV_DMA_TYPE_DEV_WC; 48 - tmpb.dev.dev = sgbuf->dev; 49 - for (i = 0; i < sgbuf->pages; i++) { 50 - if (!(sgbuf->table[i].addr & ~PAGE_MASK)) 51 - continue; /* continuous pages */ 52 - tmpb.area = sgbuf->table[i].buf; 53 - tmpb.addr = sgbuf->table[i].addr & PAGE_MASK; 54 - tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT; 55 - snd_dma_free_pages(&tmpb); 56 - } 57 - 58 - kfree(sgbuf->table); 59 - kfree(sgbuf->page_table); 60 - kfree(sgbuf); 61 - dmab->private_data = NULL; 62 - } 63 - 64 - #define MAX_ALLOC_PAGES 32 65 - 66 - static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size) 67 - { 68 - struct snd_sg_buf *sgbuf; 69 - unsigned int i, pages, chunk, maxpages; 70 - struct snd_dma_buffer tmpb; 71 - struct snd_sg_page *table; 72 - struct page **pgtable; 73 - int type = SNDRV_DMA_TYPE_DEV; 74 - pgprot_t prot = PAGE_KERNEL; 75 - void *area; 76 - 77 - dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL); 78 - if (!sgbuf) 79 - return NULL; 80 - if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) { 81 - type = SNDRV_DMA_TYPE_DEV_WC; 82 - #ifdef pgprot_noncached 83 - prot = pgprot_noncached(PAGE_KERNEL); 84 - #endif 85 - } 86 - sgbuf->dev = dmab->dev.dev; 87 - pages = snd_sgbuf_aligned_pages(size); 88 - sgbuf->tblsize = sgbuf_align_table(pages); 89 - table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL); 90 - if (!table) 91 - goto _failed; 92 - sgbuf->table = table; 93 - pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL); 94 - if (!pgtable) 95 - goto _failed; 96 - sgbuf->page_table = pgtable; 97 - 98 - /* allocate pages */ 99 - maxpages = MAX_ALLOC_PAGES; 100 - while (pages > 0) { 101 - chunk = pages; 102 - /* don't be too eager to take a huge chunk */ 103 - if (chunk > maxpages) 104 - chunk = maxpages; 105 - chunk <<= PAGE_SHIFT; 106 - if (snd_dma_alloc_pages_fallback(type, dmab->dev.dev, 107 - chunk, &tmpb) < 0) { 108 - if (!sgbuf->pages) 109 - goto _failed; 110 - size = sgbuf->pages * PAGE_SIZE; 111 - break; 112 - } 113 - chunk = tmpb.bytes >> PAGE_SHIFT; 114 - for (i = 0; i < chunk; i++) { 115 - table->buf = tmpb.area; 116 - table->addr = tmpb.addr; 117 - if (!i) 118 - table->addr |= chunk; /* mark head */ 119 - table++; 120 - *pgtable++ = virt_to_page(tmpb.area); 121 - tmpb.area += PAGE_SIZE; 122 - tmpb.addr += PAGE_SIZE; 123 - } 124 - sgbuf->pages += chunk; 125 - pages -= chunk; 126 - if (chunk < maxpages) 127 - maxpages = chunk; 128 - } 129 - 130 - sgbuf->size = size; 131 - area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot); 132 - if (!area) 133 - goto _failed; 134 - return area; 135 - 136 - _failed: 137 - snd_dma_sg_free(dmab); /* free the table */ 138 - return NULL; 139 - } 140 - 141 - static dma_addr_t snd_dma_sg_get_addr(struct snd_dma_buffer *dmab, 142 - size_t offset) 143 - { 144 - struct snd_sg_buf *sgbuf = dmab->private_data; 145 - dma_addr_t addr; 146 - 147 - addr = sgbuf->table[offset >> PAGE_SHIFT].addr; 148 - addr &= ~((dma_addr_t)PAGE_SIZE - 1); 149 - return addr + offset % PAGE_SIZE; 150 - } 151 - 152 - static struct page *snd_dma_sg_get_page(struct snd_dma_buffer *dmab, 153 - size_t offset) 154 - { 155 - struct snd_sg_buf *sgbuf = dmab->private_data; 156 - unsigned int idx = offset >> PAGE_SHIFT; 157 - 158 - if (idx >= (unsigned int)sgbuf->pages) 159 - return NULL; 160 - return sgbuf->page_table[idx]; 161 - } 162 - 163 - static unsigned int snd_dma_sg_get_chunk_size(struct snd_dma_buffer *dmab, 164 - unsigned int ofs, 165 - unsigned int size) 166 - { 167 - struct snd_sg_buf *sg = dmab->private_data; 168 - unsigned int start, end, pg; 169 - 170 - start = ofs >> PAGE_SHIFT; 171 - end = (ofs + size - 1) >> PAGE_SHIFT; 172 - /* check page continuity */ 173 - pg = sg->table[start].addr >> PAGE_SHIFT; 174 - for (;;) { 175 - start++; 176 - if (start > end) 177 - break; 178 - pg++; 179 - if ((sg->table[start].addr >> PAGE_SHIFT) != pg) 180 - return (start << PAGE_SHIFT) - ofs; 181 - } 182 - /* ok, all on continuous pages */ 183 - return size; 184 - } 185 - 186 - static int snd_dma_sg_mmap(struct snd_dma_buffer *dmab, 187 - struct vm_area_struct *area) 188 - { 189 - if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) 190 - area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); 191 - return -ENOENT; /* continue with the default mmap handler */ 192 - } 193 - 194 - const struct snd_malloc_ops snd_dma_sg_ops = { 195 - .alloc = snd_dma_sg_alloc, 196 - .free = snd_dma_sg_free, 197 - .get_addr = snd_dma_sg_get_addr, 198 - .get_page = snd_dma_sg_get_page, 199 - .get_chunk_size = snd_dma_sg_get_chunk_size, 200 - .mmap = snd_dma_sg_mmap, 201 - };