Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dma-debug: Dynamically expand the dma_debug_entry pool

Certain drivers such as large multi-queue network adapters can use pools
of mapped DMA buffers larger than the default dma_debug_entry pool of
65536 entries, with the result that merely probing such a device can
cause DMA debug to disable itself during boot unless explicitly given an
appropriate "dma_debug_entries=..." option.

Developers trying to debug some other driver on such a system may not be
immediately aware of this, and at worst it can hide bugs if they fail to
realise that dma-debug has already disabled itself unexpectedly by the
time their code of interest gets to run. Even once they do realise, it
can be a bit of a pain to emprirically determine a suitable number of
preallocated entries to configure, short of massively over-allocating.

There's really no need for such a static limit, though, since we can
quite easily expand the pool at runtime in those rare cases that the
preallocated entries are insufficient, which is arguably the least
surprising and most useful behaviour. To that end, refactor the
prealloc_memory() logic a little bit to generalise it for runtime
reallocations as well.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Tested-by: Qian Cai <cai@lca.pw>
Signed-off-by: Christoph Hellwig <hch@lst.de>

authored by

Robin Murphy and committed by
Christoph Hellwig
2b9d9ac0 f737b095

+46 -44
+5 -6
Documentation/DMA-API.txt
··· 717 717 dma-api/min_free_entries This read-only file can be read to get the 718 718 minimum number of free dma_debug_entries the 719 719 allocator has ever seen. If this value goes 720 - down to zero the code will disable itself 721 - because it is not longer reliable. 720 + down to zero the code will attempt to increase 721 + nr_total_entries to compensate. 722 722 723 723 dma-api/num_free_entries The current number of free dma_debug_entries 724 724 in the allocator. ··· 745 745 driver afterwards. This filter can be disabled or changed later using debugfs. 746 746 747 747 When the code disables itself at runtime this is most likely because it ran 748 - out of dma_debug_entries. These entries are preallocated at boot. The number 749 - of preallocated entries is defined per architecture. If it is too low for you 750 - boot with 'dma_debug_entries=<your_desired_number>' to overwrite the 751 - architectural default. 748 + out of dma_debug_entries and was unable to allocate more on-demand. 65536 749 + entries are preallocated at boot - if this is too low for you boot with 750 + 'dma_debug_entries=<your_desired_number>' to overwrite the default. 752 751 753 752 :: 754 753
+41 -38
kernel/dma/debug.c
··· 47 47 #ifndef PREALLOC_DMA_DEBUG_ENTRIES 48 48 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) 49 49 #endif 50 + /* If the pool runs out, add this many new entries at once */ 51 + #define DMA_DEBUG_DYNAMIC_ENTRIES 256 50 52 51 53 enum { 52 54 dma_debug_single, ··· 648 646 */ 649 647 } 650 648 649 + static int dma_debug_create_entries(u32 num_entries, gfp_t gfp) 650 + { 651 + struct dma_debug_entry *entry, *next_entry; 652 + int i; 653 + 654 + for (i = 0; i < num_entries; ++i) { 655 + entry = kzalloc(sizeof(*entry), gfp); 656 + if (!entry) 657 + goto out_err; 658 + 659 + list_add_tail(&entry->list, &free_entries); 660 + } 661 + 662 + num_free_entries += num_entries; 663 + nr_total_entries += num_entries; 664 + 665 + return 0; 666 + 667 + out_err: 668 + 669 + list_for_each_entry_safe(entry, next_entry, &free_entries, list) { 670 + list_del(&entry->list); 671 + kfree(entry); 672 + } 673 + 674 + return -ENOMEM; 675 + } 676 + 651 677 static struct dma_debug_entry *__dma_entry_alloc(void) 652 678 { 653 679 struct dma_debug_entry *entry; ··· 702 672 unsigned long flags; 703 673 704 674 spin_lock_irqsave(&free_entries_lock, flags); 705 - 706 - if (list_empty(&free_entries)) { 707 - global_disable = true; 708 - spin_unlock_irqrestore(&free_entries_lock, flags); 709 - pr_err("debugging out of memory - disabling\n"); 710 - return NULL; 675 + if (num_free_entries == 0) { 676 + if (dma_debug_create_entries(DMA_DEBUG_DYNAMIC_ENTRIES, 677 + GFP_ATOMIC)) { 678 + global_disable = true; 679 + spin_unlock_irqrestore(&free_entries_lock, flags); 680 + pr_err("debugging out of memory - disabling\n"); 681 + return NULL; 682 + } 711 683 } 712 684 713 685 entry = __dma_entry_alloc(); ··· 795 763 * 1. Initialize core data structures 796 764 * 2. Preallocate a given number of dma_debug_entry structs 797 765 */ 798 - 799 - static int prealloc_memory(u32 num_entries) 800 - { 801 - struct dma_debug_entry *entry, *next_entry; 802 - int i; 803 - 804 - for (i = 0; i < num_entries; ++i) { 805 - entry = kzalloc(sizeof(*entry), GFP_KERNEL); 806 - if (!entry) 807 - goto out_err; 808 - 809 - list_add_tail(&entry->list, &free_entries); 810 - } 811 - 812 - num_free_entries = num_entries; 813 - min_free_entries = num_entries; 814 - 815 - pr_info("preallocated %d debug entries\n", num_entries); 816 - 817 - return 0; 818 - 819 - out_err: 820 - 821 - list_for_each_entry_safe(entry, next_entry, &free_entries, list) { 822 - list_del(&entry->list); 823 - kfree(entry); 824 - } 825 - 826 - return -ENOMEM; 827 - } 828 766 829 767 static ssize_t filter_read(struct file *file, char __user *user_buf, 830 768 size_t count, loff_t *ppos) ··· 1040 1038 return 0; 1041 1039 } 1042 1040 1043 - if (prealloc_memory(nr_prealloc_entries) != 0) { 1041 + if (dma_debug_create_entries(nr_prealloc_entries, GFP_KERNEL) != 0) { 1044 1042 pr_err("debugging out of memory error - disabled\n"); 1045 1043 global_disable = true; 1046 1044 1047 1045 return 0; 1048 1046 } 1049 1047 1050 - nr_total_entries = num_free_entries; 1048 + min_free_entries = num_free_entries; 1049 + pr_info("preallocated %d debug entries\n", nr_total_entries); 1051 1050 1052 1051 dma_debug_initialized = true; 1053 1052