Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'dma-mapping-6.5-2023-06-28' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping updates from Christoph Hellwig:

- swiotlb cleanups (Petr Tesarik)

- use kvmalloc_array (gaoxu)

- a small step towards removing is_swiotlb_active (Christoph Hellwig)

- fix a Kconfig typo Sui Jingfeng)

* tag 'dma-mapping-6.5-2023-06-28' of git://git.infradead.org/users/hch/dma-mapping:
drm/nouveau: stop using is_swiotlb_active
swiotlb: use the atomic counter of total used slabs if available
swiotlb: remove unused field "used" from struct io_tlb_mem
dma-remap: use kvmalloc_array/kvfree for larger dma memory remap
dma-mapping: fix a Kconfig typo

+17 -12
+3 -7
drivers/gpu/drm/nouveau/nouveau_ttm.c
··· 24 24 */ 25 25 26 26 #include <linux/limits.h> 27 - #include <linux/swiotlb.h> 28 27 29 28 #include <drm/ttm/ttm_range_manager.h> 29 + #include <drm/drm_cache.h> 30 30 31 31 #include "nouveau_drv.h" 32 32 #include "nouveau_gem.h" ··· 265 265 struct nvkm_pci *pci = device->pci; 266 266 struct nvif_mmu *mmu = &drm->client.mmu; 267 267 struct drm_device *dev = drm->dev; 268 - bool need_swiotlb = false; 269 268 int typei, ret; 270 269 271 270 ret = nouveau_ttm_init_host(drm, 0); ··· 299 300 drm->agp.cma = pci->agp.cma; 300 301 } 301 302 302 - #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86) 303 - need_swiotlb = is_swiotlb_active(dev->dev); 304 - #endif 305 - 306 303 ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev, 307 304 dev->anon_inode->i_mapping, 308 - dev->vma_offset_manager, need_swiotlb, 305 + dev->vma_offset_manager, 306 + drm_need_swiotlb(drm->client.mmu.dmabits), 309 307 drm->client.mmu.dmabits <= 32); 310 308 if (ret) { 311 309 NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
-2
include/linux/swiotlb.h
··· 76 76 * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and 77 77 * @end. For default swiotlb, this is command line adjustable via 78 78 * setup_io_tlb_npages. 79 - * @used: The number of used IO TLB block. 80 79 * @list: The free list describing the number of free entries available 81 80 * from each index. 82 81 * @orig_addr: The original address corresponding to a mapped entry. ··· 97 98 phys_addr_t end; 98 99 void *vaddr; 99 100 unsigned long nslabs; 100 - unsigned long used; 101 101 struct dentry *debugfs; 102 102 bool late_alloc; 103 103 bool force_bounce;
+1 -1
kernel/dma/Kconfig
··· 42 42 # 43 43 # Select this option if the architecture needs special handling for 44 44 # DMA_ATTR_WRITE_COMBINE. Normally the "uncached" mapping should be what 45 - # people thing of when saying write combine, so very few platforms should 45 + # people think of when saying write combine, so very few platforms should 46 46 # need to enable this. 47 47 # 48 48 config ARCH_HAS_DMA_WRITE_COMBINE
+2 -2
kernel/dma/remap.c
··· 43 43 void *vaddr; 44 44 int i; 45 45 46 - pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL); 46 + pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); 47 47 if (!pages) 48 48 return NULL; 49 49 for (i = 0; i < count; i++) 50 50 pages[i] = nth_page(page, i); 51 51 vaddr = vmap(pages, count, VM_DMA_COHERENT, prot); 52 - kfree(pages); 52 + kvfree(pages); 53 53 54 54 return vaddr; 55 55 }
+11
kernel/dma/swiotlb.c
··· 717 717 return -1; 718 718 } 719 719 720 + #ifdef CONFIG_DEBUG_FS 721 + 722 + static unsigned long mem_used(struct io_tlb_mem *mem) 723 + { 724 + return atomic_long_read(&mem->total_used); 725 + } 726 + 727 + #else /* !CONFIG_DEBUG_FS */ 728 + 720 729 static unsigned long mem_used(struct io_tlb_mem *mem) 721 730 { 722 731 int i; ··· 735 726 used += mem->areas[i].used; 736 727 return used; 737 728 } 729 + 730 + #endif /* CONFIG_DEBUG_FS */ 738 731 739 732 phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, 740 733 size_t mapping_size, size_t alloc_size,