Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'dma-mapping-5.18' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping updates from Christoph Hellwig:

- do not zero buffer in set_memory_decrypted (Kirill A. Shutemov)

- fix return value of dma-debug __setup handlers (Randy Dunlap)

- swiotlb cleanups (Robin Murphy)

- remove most remaining users of the pci-dma-compat.h API
(Christophe JAILLET)

- share the ABI header for the DMA map_benchmark with userspace
(Tian Tao)

- update the maintainer for DMA MAPPING BENCHMARK (Xiang Chen)

- remove CONFIG_DMA_REMAP (me)

* tag 'dma-mapping-5.18' of git://git.infradead.org/users/hch/dma-mapping:
dma-mapping: benchmark: extract a common header file for map_benchmark definition
dma-debug: fix return value of __setup handlers
dma-mapping: remove CONFIG_DMA_REMAP
media: v4l2-pci-skeleton: Remove usage of the deprecated "pci-dma-compat.h" API
rapidio/tsi721: Remove usage of the deprecated "pci-dma-compat.h" API
sparc: Remove usage of the deprecated "pci-dma-compat.h" API
agp/intel: Remove usage of the deprecated "pci-dma-compat.h" API
alpha: Remove usage of the deprecated "pci-dma-compat.h" API
MAINTAINERS: update maintainer list of DMA MAPPING BENCHMARK
swiotlb: simplify array allocation
swiotlb: tidy up includes
swiotlb: simplify debugfs setup
swiotlb: do not zero buffer in set_memory_decrypted()

+105 -160
+1 -1
MAINTAINERS
··· 5880 5880 F: kernel/dma/ 5881 5881 5882 5882 DMA MAPPING BENCHMARK 5883 - M: Barry Song <song.bao.hua@hisilicon.com> 5883 + M: Xiang Chen <chenxiang66@hisilicon.com> 5884 5884 L: iommu@lists.linux-foundation.org 5885 5885 F: kernel/dma/map_benchmark.c 5886 5886 F: tools/testing/selftests/dma/
+4 -3
arch/alpha/include/asm/floppy.h
··· 43 43 static int prev_dir; 44 44 int dir; 45 45 46 - dir = (mode != DMA_MODE_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE; 46 + dir = (mode != DMA_MODE_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 47 47 48 48 if (bus_addr 49 49 && (addr != prev_addr || size != prev_size || dir != prev_dir)) { 50 50 /* different from last time -- unmap prev */ 51 - pci_unmap_single(isa_bridge, bus_addr, prev_size, prev_dir); 51 + dma_unmap_single(&isa_bridge->dev, bus_addr, prev_size, 52 + prev_dir); 52 53 bus_addr = 0; 53 54 } 54 55 55 56 if (!bus_addr) /* need to map it */ 56 - bus_addr = pci_map_single(isa_bridge, addr, size, dir); 57 + bus_addr = dma_map_single(&isa_bridge->dev, addr, size, dir); 57 58 58 59 /* remember this one as prev */ 59 60 prev_addr = addr;
+6 -6
arch/alpha/kernel/pci_iommu.c
··· 333 333 struct pci_dev *pdev = alpha_gendev_to_pci(dev); 334 334 int dac_allowed; 335 335 336 - BUG_ON(dir == PCI_DMA_NONE); 336 + BUG_ON(dir == DMA_NONE); 337 337 338 338 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; 339 339 return pci_map_single_1(pdev, (char *)page_address(page) + offset, ··· 356 356 struct pci_iommu_arena *arena; 357 357 long dma_ofs, npages; 358 358 359 - BUG_ON(dir == PCI_DMA_NONE); 359 + BUG_ON(dir == DMA_NONE); 360 360 361 361 if (dma_addr >= __direct_map_base 362 362 && dma_addr < __direct_map_base + __direct_map_size) { ··· 460 460 unsigned long attrs) 461 461 { 462 462 struct pci_dev *pdev = alpha_gendev_to_pci(dev); 463 - pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); 463 + dma_unmap_single(&pdev->dev, dma_addr, size, DMA_BIDIRECTIONAL); 464 464 free_pages((unsigned long)cpu_addr, get_order(size)); 465 465 466 466 DBGA2("pci_free_consistent: [%llx,%zx] from %ps\n", ··· 639 639 dma_addr_t max_dma; 640 640 int dac_allowed; 641 641 642 - BUG_ON(dir == PCI_DMA_NONE); 642 + BUG_ON(dir == DMA_NONE); 643 643 644 644 dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; 645 645 ··· 702 702 /* Some allocation failed while mapping the scatterlist 703 703 entries. Unmap them now. */ 704 704 if (out > start) 705 - pci_unmap_sg(pdev, start, out - start, dir); 705 + dma_unmap_sg(&pdev->dev, start, out - start, dir); 706 706 return -ENOMEM; 707 707 } 708 708 ··· 722 722 dma_addr_t max_dma; 723 723 dma_addr_t fbeg, fend; 724 724 725 - BUG_ON(dir == PCI_DMA_NONE); 725 + BUG_ON(dir == DMA_NONE); 726 726 727 727 if (! alpha_mv.mv_pci_tbi) 728 728 return;
+1 -1
arch/arm/Kconfig
··· 49 49 select DMA_DECLARE_COHERENT 50 50 select DMA_GLOBAL_POOL if !MMU 51 51 select DMA_OPS 52 - select DMA_REMAP if MMU 52 + select DMA_NONCOHERENT_MMAP if MMU 53 53 select EDAC_SUPPORT 54 54 select EDAC_ATOMIC_SCRUB 55 55 select GENERIC_ALLOCATOR
+1 -1
arch/sparc/kernel/ioport.c
··· 309 309 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, 310 310 enum dma_data_direction dir) 311 311 { 312 - if (dir != PCI_DMA_TODEVICE && 312 + if (dir != DMA_TO_DEVICE && 313 313 sparc_cpu_model == sparc_leon && 314 314 !sparc_leon3_snooping_enabled()) 315 315 leon_flush_dcache_all();
+1 -1
arch/xtensa/Kconfig
··· 18 18 select BUILDTIME_TABLE_SORT 19 19 select CLONE_BACKWARDS 20 20 select COMMON_CLK 21 - select DMA_REMAP if MMU 21 + select DMA_NONCOHERENT_MMAP if MMU 22 22 select GENERIC_ATOMIC64 23 23 select GENERIC_IRQ_SHOW 24 24 select GENERIC_LIB_CMPDI2
+13 -13
drivers/char/agp/intel-gtt.c
··· 111 111 for_each_sg(st->sgl, sg, num_entries, i) 112 112 sg_set_page(sg, pages[i], PAGE_SIZE, 0); 113 113 114 - if (!pci_map_sg(intel_private.pcidev, 115 - st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL)) 114 + if (!dma_map_sg(&intel_private.pcidev->dev, st->sgl, st->nents, 115 + DMA_BIDIRECTIONAL)) 116 116 goto err; 117 117 118 118 return 0; ··· 127 127 struct sg_table st; 128 128 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); 129 129 130 - pci_unmap_sg(intel_private.pcidev, sg_list, 131 - num_sg, PCI_DMA_BIDIRECTIONAL); 130 + dma_unmap_sg(&intel_private.pcidev->dev, sg_list, num_sg, 131 + DMA_BIDIRECTIONAL); 132 132 133 133 st.sgl = sg_list; 134 134 st.orig_nents = st.nents = num_sg; ··· 303 303 set_pages_uc(page, 1); 304 304 305 305 if (intel_private.needs_dmar) { 306 - dma_addr = pci_map_page(intel_private.pcidev, page, 0, 307 - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 308 - if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) { 306 + dma_addr = dma_map_page(&intel_private.pcidev->dev, page, 0, 307 + PAGE_SIZE, DMA_BIDIRECTIONAL); 308 + if (dma_mapping_error(&intel_private.pcidev->dev, dma_addr)) { 309 309 __free_page(page); 310 310 return -EINVAL; 311 311 } ··· 552 552 { 553 553 set_pages_wb(intel_private.scratch_page, 1); 554 554 if (intel_private.needs_dmar) 555 - pci_unmap_page(intel_private.pcidev, 556 - intel_private.scratch_page_dma, 557 - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 555 + dma_unmap_page(&intel_private.pcidev->dev, 556 + intel_private.scratch_page_dma, PAGE_SIZE, 557 + DMA_BIDIRECTIONAL); 558 558 __free_page(intel_private.scratch_page); 559 559 } 560 560 ··· 1412 1412 1413 1413 if (bridge) { 1414 1414 mask = intel_private.driver->dma_mask_size; 1415 - if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) 1415 + if (dma_set_mask(&intel_private.pcidev->dev, DMA_BIT_MASK(mask))) 1416 1416 dev_err(&intel_private.pcidev->dev, 1417 1417 "set gfx device dma mask %d-bit failed!\n", 1418 1418 mask); 1419 1419 else 1420 - pci_set_consistent_dma_mask(intel_private.pcidev, 1421 - DMA_BIT_MASK(mask)); 1420 + dma_set_coherent_mask(&intel_private.pcidev->dev, 1421 + DMA_BIT_MASK(mask)); 1422 1422 } 1423 1423 1424 1424 if (intel_gtt_init() != 0) {
+5 -9
drivers/iommu/dma-iommu.c
··· 856 856 return NULL; 857 857 } 858 858 859 - #ifdef CONFIG_DMA_REMAP 860 859 static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, 861 860 size_t size, enum dma_data_direction dir, gfp_t gfp, 862 861 unsigned long attrs) ··· 885 886 sg_free_table(&sh->sgt); 886 887 kfree(sh); 887 888 } 888 - #endif /* CONFIG_DMA_REMAP */ 889 889 890 890 static void iommu_dma_sync_single_for_cpu(struct device *dev, 891 891 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) ··· 1278 1280 dma_free_from_pool(dev, cpu_addr, alloc_size)) 1279 1281 return; 1280 1282 1281 - if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 1283 + if (is_vmalloc_addr(cpu_addr)) { 1282 1284 /* 1283 1285 * If it the address is remapped, then it's either non-coherent 1284 1286 * or highmem CMA, or an iommu_dma_alloc_remap() construction. ··· 1320 1322 if (!page) 1321 1323 return NULL; 1322 1324 1323 - if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { 1325 + if (!coherent || PageHighMem(page)) { 1324 1326 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); 1325 1327 1326 1328 cpu_addr = dma_common_contiguous_remap(page, alloc_size, ··· 1352 1354 1353 1355 gfp |= __GFP_ZERO; 1354 1356 1355 - if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) && 1357 + if (gfpflags_allow_blocking(gfp) && 1356 1358 !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) { 1357 1359 return iommu_dma_alloc_remap(dev, size, handle, gfp, 1358 1360 dma_pgprot(dev, PAGE_KERNEL, attrs), attrs); ··· 1393 1395 if (off >= nr_pages || vma_pages(vma) > nr_pages - off) 1394 1396 return -ENXIO; 1395 1397 1396 - if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 1398 + if (is_vmalloc_addr(cpu_addr)) { 1397 1399 struct page **pages = dma_common_find_pages(cpu_addr); 1398 1400 1399 1401 if (pages) ··· 1415 1417 struct page *page; 1416 1418 int ret; 1417 1419 1418 - if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 1420 + if (is_vmalloc_addr(cpu_addr)) { 1419 1421 struct page **pages = dma_common_find_pages(cpu_addr); 1420 1422 1421 1423 if (pages) { ··· 1447 1449 .free = iommu_dma_free, 1448 1450 .alloc_pages = dma_common_alloc_pages, 1449 1451 .free_pages = dma_common_free_pages, 1450 - #ifdef CONFIG_DMA_REMAP 1451 1452 .alloc_noncontiguous = iommu_dma_alloc_noncontiguous, 1452 1453 .free_noncontiguous = iommu_dma_free_noncontiguous, 1453 - #endif 1454 1454 .mmap = iommu_dma_mmap, 1455 1455 .get_sgtable = iommu_dma_get_sgtable, 1456 1456 .map_page = iommu_dma_map_page,
+4 -4
drivers/rapidio/devices/tsi721.c
··· 2836 2836 } 2837 2837 2838 2838 /* Configure DMA attributes. */ 2839 - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 2840 - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2839 + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 2840 + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 2841 2841 if (err) { 2842 2842 tsi_err(&pdev->dev, "Unable to set DMA mask"); 2843 2843 goto err_unmap_bars; 2844 2844 } 2845 2845 2846 - if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) 2846 + if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) 2847 2847 tsi_info(&pdev->dev, "Unable to set consistent DMA mask"); 2848 2848 } else { 2849 - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 2849 + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 2850 2850 if (err) 2851 2851 tsi_info(&pdev->dev, "Unable to set consistent DMA mask"); 2852 2852 }
+31
include/linux/map_benchmark.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (C) 2022 HiSilicon Limited. 4 + */ 5 + 6 + #ifndef _KERNEL_DMA_BENCHMARK_H 7 + #define _KERNEL_DMA_BENCHMARK_H 8 + 9 + #define DMA_MAP_BENCHMARK _IOWR('d', 1, struct map_benchmark) 10 + #define DMA_MAP_MAX_THREADS 1024 11 + #define DMA_MAP_MAX_SECONDS 300 12 + #define DMA_MAP_MAX_TRANS_DELAY (10 * NSEC_PER_MSEC) 13 + 14 + #define DMA_MAP_BIDIRECTIONAL 0 15 + #define DMA_MAP_TO_DEVICE 1 16 + #define DMA_MAP_FROM_DEVICE 2 17 + 18 + struct map_benchmark { 19 + __u64 avg_map_100ns; /* average map latency in 100ns */ 20 + __u64 map_stddev; /* standard deviation of map latency */ 21 + __u64 avg_unmap_100ns; /* as above */ 22 + __u64 unmap_stddev; 23 + __u32 threads; /* how many threads will do map/unmap in parallel */ 24 + __u32 seconds; /* how long the test will last */ 25 + __s32 node; /* which numa node this benchmark will run on */ 26 + __u32 dma_bits; /* DMA addressing capability */ 27 + __u32 dma_dir; /* DMA data direction */ 28 + __u32 dma_trans_ns; /* time for DMA transmission in ns */ 29 + __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */ 30 + }; 31 + #endif /* _KERNEL_DMA_BENCHMARK_H */
+1 -6
kernel/dma/Kconfig
··· 110 110 select DMA_DECLARE_COHERENT 111 111 bool 112 112 113 - config DMA_REMAP 114 - bool 115 - depends on MMU 116 - select DMA_NONCOHERENT_MMAP 117 - 118 113 config DMA_DIRECT_REMAP 119 114 bool 120 - select DMA_REMAP 121 115 select DMA_COHERENT_POOL 116 + select DMA_NONCOHERENT_MMAP 122 117 123 118 config DMA_CMA 124 119 bool "DMA Contiguous Memory Allocator"
+1 -1
kernel/dma/Makefile
··· 8 8 obj-$(CONFIG_DMA_API_DEBUG) += debug.o 9 9 obj-$(CONFIG_SWIOTLB) += swiotlb.o 10 10 obj-$(CONFIG_DMA_COHERENT_POOL) += pool.o 11 - obj-$(CONFIG_DMA_REMAP) += remap.o 11 + obj-$(CONFIG_MMU) += remap.o 12 12 obj-$(CONFIG_DMA_MAP_BENCHMARK) += map_benchmark.o
+2 -2
kernel/dma/debug.c
··· 927 927 global_disable = true; 928 928 } 929 929 930 - return 0; 930 + return 1; 931 931 } 932 932 933 933 static __init int dma_debug_entries_cmdline(char *str) ··· 936 936 return -EINVAL; 937 937 if (!get_option(&str, &nr_prealloc_entries)) 938 938 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; 939 - return 0; 939 + return 1; 940 940 } 941 941 942 942 __setup("dma_debug=", dma_debug_cmdline);
+7 -11
kernel/dma/direct.c
··· 265 265 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO); 266 266 if (!page) 267 267 return NULL; 268 + 269 + /* 270 + * dma_alloc_contiguous can return highmem pages depending on a 271 + * combination the cma= arguments and per-arch setup. These need to be 272 + * remapped to return a kernel virtual address. 273 + */ 268 274 if (PageHighMem(page)) { 269 - /* 270 - * Depending on the cma= arguments and per-arch setup, 271 - * dma_alloc_contiguous could return highmem pages. 272 - * Without remapping there is no way to return them here, so 273 - * log an error and fail. 274 - */ 275 - if (!IS_ENABLED(CONFIG_DMA_REMAP)) { 276 - dev_info(dev, "Rejecting highmem page from CMA.\n"); 277 - goto out_free_pages; 278 - } 279 275 remap = true; 280 276 set_uncached = false; 281 277 } ··· 345 349 dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) 346 350 return; 347 351 348 - if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 352 + if (is_vmalloc_addr(cpu_addr)) { 349 353 vunmap(cpu_addr); 350 354 } else { 351 355 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
+1 -24
kernel/dma/map_benchmark.c
··· 11 11 #include <linux/dma-mapping.h> 12 12 #include <linux/kernel.h> 13 13 #include <linux/kthread.h> 14 + #include <linux/map_benchmark.h> 14 15 #include <linux/math64.h> 15 16 #include <linux/module.h> 16 17 #include <linux/pci.h> 17 18 #include <linux/platform_device.h> 18 19 #include <linux/slab.h> 19 20 #include <linux/timekeeping.h> 20 - 21 - #define DMA_MAP_BENCHMARK _IOWR('d', 1, struct map_benchmark) 22 - #define DMA_MAP_MAX_THREADS 1024 23 - #define DMA_MAP_MAX_SECONDS 300 24 - #define DMA_MAP_MAX_TRANS_DELAY (10 * NSEC_PER_MSEC) 25 - 26 - #define DMA_MAP_BIDIRECTIONAL 0 27 - #define DMA_MAP_TO_DEVICE 1 28 - #define DMA_MAP_FROM_DEVICE 2 29 - 30 - struct map_benchmark { 31 - __u64 avg_map_100ns; /* average map latency in 100ns */ 32 - __u64 map_stddev; /* standard deviation of map latency */ 33 - __u64 avg_unmap_100ns; /* as above */ 34 - __u64 unmap_stddev; 35 - __u32 threads; /* how many threads will do map/unmap in parallel */ 36 - __u32 seconds; /* how long the test will last */ 37 - __s32 node; /* which numa node this benchmark will run on */ 38 - __u32 dma_bits; /* DMA addressing capability */ 39 - __u32 dma_dir; /* DMA data direction */ 40 - __u32 dma_trans_ns; /* time for DMA transmission in ns */ 41 - __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */ 42 - __u8 expansion[76]; /* For future use */ 43 - }; 44 21 45 22 struct map_benchmark_data { 46 23 struct map_benchmark bparam;
+24 -52
kernel/dma/swiotlb.c
··· 21 21 #define pr_fmt(fmt) "software IO TLB: " fmt 22 22 23 23 #include <linux/cache.h> 24 + #include <linux/cc_platform.h> 25 + #include <linux/ctype.h> 26 + #include <linux/debugfs.h> 24 27 #include <linux/dma-direct.h> 25 28 #include <linux/dma-map-ops.h> 26 - #include <linux/mm.h> 27 29 #include <linux/export.h> 30 + #include <linux/gfp.h> 31 + #include <linux/highmem.h> 32 + #include <linux/io.h> 33 + #include <linux/iommu-helper.h> 34 + #include <linux/init.h> 35 + #include <linux/memblock.h> 36 + #include <linux/mm.h> 37 + #include <linux/pfn.h> 38 + #include <linux/scatterlist.h> 39 + #include <linux/set_memory.h> 28 40 #include <linux/spinlock.h> 29 41 #include <linux/string.h> 30 42 #include <linux/swiotlb.h> 31 - #include <linux/pfn.h> 32 43 #include <linux/types.h> 33 - #include <linux/ctype.h> 34 - #include <linux/highmem.h> 35 - #include <linux/gfp.h> 36 - #include <linux/scatterlist.h> 37 - #include <linux/cc_platform.h> 38 - #include <linux/set_memory.h> 39 - #ifdef CONFIG_DEBUG_FS 40 - #include <linux/debugfs.h> 41 - #endif 42 44 #ifdef CONFIG_DMA_RESTRICTED_POOL 43 - #include <linux/io.h> 44 45 #include <linux/of.h> 45 46 #include <linux/of_fdt.h> 46 47 #include <linux/of_reserved_mem.h> 47 48 #include <linux/slab.h> 48 49 #endif 49 - 50 - #include <asm/io.h> 51 - #include <asm/dma.h> 52 - 53 - #include <linux/io.h> 54 - #include <linux/init.h> 55 - #include <linux/memblock.h> 56 - #include <linux/iommu-helper.h> 57 50 58 51 #define CREATE_TRACE_POINTS 59 52 #include <trace/events/swiotlb.h> ··· 200 207 mem->vaddr = swiotlb_mem_remap(mem, bytes); 201 208 if (!mem->vaddr) 202 209 mem->vaddr = vaddr; 203 - 204 - memset(mem->vaddr, 0, bytes); 205 210 } 206 211 207 212 static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, ··· 754 763 } 755 764 EXPORT_SYMBOL_GPL(is_swiotlb_active); 756 765 757 - #ifdef CONFIG_DEBUG_FS 758 - static struct dentry *debugfs_dir; 759 - 760 - static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem) 766 + static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem, 767 + const char *dirname) 761 768 { 769 + mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs); 770 + if (!mem->nslabs) 771 + return; 772 + 762 773 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs); 763 774 debugfs_create_ulong("io_tlb_used", 0400, mem->debugfs, &mem->used); 764 775 } 765 776 766 - static int __init swiotlb_create_default_debugfs(void) 777 + static int __init __maybe_unused swiotlb_create_default_debugfs(void) 767 778 { 768 - struct io_tlb_mem *mem = &io_tlb_default_mem; 769 - 770 - debugfs_dir = debugfs_create_dir("swiotlb", NULL); 771 - if (mem->nslabs) { 772 - mem->debugfs = debugfs_dir; 773 - swiotlb_create_debugfs_files(mem); 774 - } 779 + swiotlb_create_debugfs_files(&io_tlb_default_mem, "swiotlb"); 775 780 return 0; 776 781 } 777 782 783 + #ifdef CONFIG_DEBUG_FS 778 784 late_initcall(swiotlb_create_default_debugfs); 779 - 780 785 #endif 781 786 782 787 #ifdef CONFIG_DMA_RESTRICTED_POOL 783 - 784 - #ifdef CONFIG_DEBUG_FS 785 - static void rmem_swiotlb_debugfs_init(struct reserved_mem *rmem) 786 - { 787 - struct io_tlb_mem *mem = rmem->priv; 788 - 789 - mem->debugfs = debugfs_create_dir(rmem->name, debugfs_dir); 790 - swiotlb_create_debugfs_files(mem); 791 - } 792 - #else 793 - static void rmem_swiotlb_debugfs_init(struct reserved_mem *rmem) 794 - { 795 - } 796 - #endif 797 788 798 789 struct page *swiotlb_alloc(struct device *dev, size_t size) 799 790 { ··· 823 850 if (!mem) 824 851 return -ENOMEM; 825 852 826 - mem->slots = kzalloc(array_size(sizeof(*mem->slots), nslabs), 827 - GFP_KERNEL); 853 + mem->slots = kcalloc(nslabs, sizeof(*mem->slots), GFP_KERNEL); 828 854 if (!mem->slots) { 829 855 kfree(mem); 830 856 return -ENOMEM; ··· 837 865 838 866 rmem->priv = mem; 839 867 840 - rmem_swiotlb_debugfs_init(rmem); 868 + swiotlb_create_debugfs_files(mem, rmem->name); 841 869 } 842 870 843 871 dev->dma_io_tlb_mem = mem;
+1 -1
samples/v4l/v4l2-pci-skeleton.c
··· 766 766 ret = pci_enable_device(pdev); 767 767 if (ret) 768 768 return ret; 769 - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 769 + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 770 770 if (ret) { 771 771 dev_err(&pdev->dev, "no suitable DMA available.\n"); 772 772 goto disable_pci;
+1 -24
tools/testing/selftests/dma/dma_map_benchmark.c
··· 10 10 #include <unistd.h> 11 11 #include <sys/ioctl.h> 12 12 #include <sys/mman.h> 13 + #include <linux/map_benchmark.h> 13 14 #include <linux/types.h> 14 15 15 16 #define NSEC_PER_MSEC 1000000L 16 - 17 - #define DMA_MAP_BENCHMARK _IOWR('d', 1, struct map_benchmark) 18 - #define DMA_MAP_MAX_THREADS 1024 19 - #define DMA_MAP_MAX_SECONDS 300 20 - #define DMA_MAP_MAX_TRANS_DELAY (10 * NSEC_PER_MSEC) 21 - 22 - #define DMA_MAP_BIDIRECTIONAL 0 23 - #define DMA_MAP_TO_DEVICE 1 24 - #define DMA_MAP_FROM_DEVICE 2 25 17 26 18 static char *directions[] = { 27 19 "BIDIRECTIONAL", 28 20 "TO_DEVICE", 29 21 "FROM_DEVICE", 30 - }; 31 - 32 - struct map_benchmark { 33 - __u64 avg_map_100ns; /* average map latency in 100ns */ 34 - __u64 map_stddev; /* standard deviation of map latency */ 35 - __u64 avg_unmap_100ns; /* as above */ 36 - __u64 unmap_stddev; 37 - __u32 threads; /* how many threads will do map/unmap in parallel */ 38 - __u32 seconds; /* how long the test will last */ 39 - __s32 node; /* which numa node this benchmark will run on */ 40 - __u32 dma_bits; /* DMA addressing capability */ 41 - __u32 dma_dir; /* DMA data direction */ 42 - __u32 dma_trans_ns; /* time for DMA transmission in ns */ 43 - __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */ 44 - __u8 expansion[76]; /* For future use */ 45 22 }; 46 23 47 24 int main(int argc, char **argv)