Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'drm-i915-use-ref_tracker-library-for-tracking-wakerefs'

Andrzej Hajda says:

====================
drm/i915: use ref_tracker library for tracking wakerefs

This is reviewed series of ref_tracker patches, ready to merge
via network tree, rebased on net-next/main.
i915 patches will be merged later via intel-gfx tree.
====================

Merge on top of an -rc tag in case it's needed in another tree.

Link: https://lore.kernel.org/r/20230224-track_gt-v9-0-5b47a33f55d1@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+176 -36
+23 -2
include/linux/ref_tracker.h
··· 17 17 bool dead; 18 18 struct list_head list; /* List of active trackers */ 19 19 struct list_head quarantine; /* List of dead trackers */ 20 + char name[32]; 20 21 #endif 21 22 }; 22 23 23 24 #ifdef CONFIG_REF_TRACKER 25 + 24 26 static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir, 25 - unsigned int quarantine_count) 27 + unsigned int quarantine_count, 28 + const char *name) 26 29 { 27 30 INIT_LIST_HEAD(&dir->list); 28 31 INIT_LIST_HEAD(&dir->quarantine); ··· 34 31 dir->dead = false; 35 32 refcount_set(&dir->untracked, 1); 36 33 refcount_set(&dir->no_tracker, 1); 34 + strscpy(dir->name, name, sizeof(dir->name)); 37 35 stack_depot_init(); 38 36 } 39 37 40 38 void ref_tracker_dir_exit(struct ref_tracker_dir *dir); 41 39 40 + void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir, 41 + unsigned int display_limit); 42 + 42 43 void ref_tracker_dir_print(struct ref_tracker_dir *dir, 43 44 unsigned int display_limit); 45 + 46 + int ref_tracker_dir_snprint(struct ref_tracker_dir *dir, char *buf, size_t size); 44 47 45 48 int ref_tracker_alloc(struct ref_tracker_dir *dir, 46 49 struct ref_tracker **trackerp, gfp_t gfp); ··· 57 48 #else /* CONFIG_REF_TRACKER */ 58 49 59 50 static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir, 60 - unsigned int quarantine_count) 51 + unsigned int quarantine_count, 52 + const char *name) 61 53 { 62 54 } 63 55 ··· 66 56 { 67 57 } 68 58 59 + static inline void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir, 60 + unsigned int display_limit) 61 + { 62 + } 63 + 69 64 static inline void ref_tracker_dir_print(struct ref_tracker_dir *dir, 70 65 unsigned int display_limit) 71 66 { 67 + } 68 + 69 + static inline int ref_tracker_dir_snprint(struct ref_tracker_dir *dir, 70 + char *buf, size_t size) 71 + { 72 + return 0; 72 73 } 73 74 74 75 static inline int ref_tracker_alloc(struct ref_tracker_dir *dir,
+149 -30
lib/ref_tracker.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-or-later 2 + 3 + #define pr_fmt(fmt) "ref_tracker: " fmt 4 + 2 5 #include <linux/export.h> 6 + #include <linux/list_sort.h> 3 7 #include <linux/ref_tracker.h> 4 8 #include <linux/slab.h> 5 9 #include <linux/stacktrace.h> 6 10 #include <linux/stackdepot.h> 7 11 8 12 #define REF_TRACKER_STACK_ENTRIES 16 13 + #define STACK_BUF_SIZE 1024 9 14 10 15 struct ref_tracker { 11 16 struct list_head head; /* anchor into dir->list or dir->quarantine */ ··· 18 13 depot_stack_handle_t alloc_stack_handle; 19 14 depot_stack_handle_t free_stack_handle; 20 15 }; 16 + 17 + struct ref_tracker_dir_stats { 18 + int total; 19 + int count; 20 + struct { 21 + depot_stack_handle_t stack_handle; 22 + unsigned int count; 23 + } stacks[]; 24 + }; 25 + 26 + static struct ref_tracker_dir_stats * 27 + ref_tracker_get_stats(struct ref_tracker_dir *dir, unsigned int limit) 28 + { 29 + struct ref_tracker_dir_stats *stats; 30 + struct ref_tracker *tracker; 31 + 32 + stats = kmalloc(struct_size(stats, stacks, limit), 33 + GFP_NOWAIT | __GFP_NOWARN); 34 + if (!stats) 35 + return ERR_PTR(-ENOMEM); 36 + stats->total = 0; 37 + stats->count = 0; 38 + 39 + list_for_each_entry(tracker, &dir->list, head) { 40 + depot_stack_handle_t stack = tracker->alloc_stack_handle; 41 + int i; 42 + 43 + ++stats->total; 44 + for (i = 0; i < stats->count; ++i) 45 + if (stats->stacks[i].stack_handle == stack) 46 + break; 47 + if (i >= limit) 48 + continue; 49 + if (i >= stats->count) { 50 + stats->stacks[i].stack_handle = stack; 51 + stats->stacks[i].count = 0; 52 + ++stats->count; 53 + } 54 + ++stats->stacks[i].count; 55 + } 56 + 57 + return stats; 58 + } 59 + 60 + struct ostream { 61 + char *buf; 62 + int size, used; 63 + }; 64 + 65 + #define pr_ostream(stream, fmt, args...) \ 66 + ({ \ 67 + struct ostream *_s = (stream); \ 68 + \ 69 + if (!_s->buf) { \ 70 + pr_err(fmt, ##args); \ 71 + } else { \ 72 + int ret, len = _s->size - _s->used; \ 73 + ret = snprintf(_s->buf + _s->used, len, pr_fmt(fmt), ##args); \ 74 + _s->used += min(ret, len); \ 75 + } \ 76 + }) 77 + 78 + static void 79 + __ref_tracker_dir_pr_ostream(struct ref_tracker_dir *dir, 80 + unsigned int display_limit, struct ostream *s) 81 + { 82 + struct ref_tracker_dir_stats *stats; 83 + unsigned int i = 0, skipped; 84 + depot_stack_handle_t stack; 85 + char *sbuf; 86 + 87 + lockdep_assert_held(&dir->lock); 88 + 89 + if (list_empty(&dir->list)) 90 + return; 91 + 92 + stats = ref_tracker_get_stats(dir, display_limit); 93 + if (IS_ERR(stats)) { 94 + pr_ostream(s, "%s@%pK: couldn't get stats, error %pe\n", 95 + dir->name, dir, stats); 96 + return; 97 + } 98 + 99 + sbuf = kmalloc(STACK_BUF_SIZE, GFP_NOWAIT | __GFP_NOWARN); 100 + 101 + for (i = 0, skipped = stats->total; i < stats->count; ++i) { 102 + stack = stats->stacks[i].stack_handle; 103 + if (sbuf && !stack_depot_snprint(stack, sbuf, STACK_BUF_SIZE, 4)) 104 + sbuf[0] = 0; 105 + pr_ostream(s, "%s@%pK has %d/%d users at\n%s\n", dir->name, dir, 106 + stats->stacks[i].count, stats->total, sbuf); 107 + skipped -= stats->stacks[i].count; 108 + } 109 + 110 + if (skipped) 111 + pr_ostream(s, "%s@%pK skipped reports about %d/%d users.\n", 112 + dir->name, dir, skipped, stats->total); 113 + 114 + kfree(sbuf); 115 + 116 + kfree(stats); 117 + } 118 + 119 + void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir, 120 + unsigned int display_limit) 121 + { 122 + struct ostream os = {}; 123 + 124 + __ref_tracker_dir_pr_ostream(dir, display_limit, &os); 125 + } 126 + EXPORT_SYMBOL(ref_tracker_dir_print_locked); 127 + 128 + void ref_tracker_dir_print(struct ref_tracker_dir *dir, 129 + unsigned int display_limit) 130 + { 131 + unsigned long flags; 132 + 133 + spin_lock_irqsave(&dir->lock, flags); 134 + ref_tracker_dir_print_locked(dir, display_limit); 135 + spin_unlock_irqrestore(&dir->lock, flags); 136 + } 137 + EXPORT_SYMBOL(ref_tracker_dir_print); 138 + 139 + int ref_tracker_dir_snprint(struct ref_tracker_dir *dir, char *buf, size_t size) 140 + { 141 + struct ostream os = { .buf = buf, .size = size }; 142 + unsigned long flags; 143 + 144 + spin_lock_irqsave(&dir->lock, flags); 145 + __ref_tracker_dir_pr_ostream(dir, 16, &os); 146 + spin_unlock_irqrestore(&dir->lock, flags); 147 + 148 + return os.used; 149 + } 150 + EXPORT_SYMBOL(ref_tracker_dir_snprint); 21 151 22 152 void ref_tracker_dir_exit(struct ref_tracker_dir *dir) 23 153 { ··· 167 27 kfree(tracker); 168 28 dir->quarantine_avail++; 169 29 } 170 - list_for_each_entry_safe(tracker, n, &dir->list, head) { 171 - pr_err("leaked reference.\n"); 172 - if (tracker->alloc_stack_handle) 173 - stack_depot_print(tracker->alloc_stack_handle); 30 + if (!list_empty(&dir->list)) { 31 + ref_tracker_dir_print_locked(dir, 16); 174 32 leak = true; 175 - list_del(&tracker->head); 176 - kfree(tracker); 33 + list_for_each_entry_safe(tracker, n, &dir->list, head) { 34 + list_del(&tracker->head); 35 + kfree(tracker); 36 + } 177 37 } 178 38 spin_unlock_irqrestore(&dir->lock, flags); 179 39 WARN_ON_ONCE(leak); ··· 182 42 } 183 43 EXPORT_SYMBOL(ref_tracker_dir_exit); 184 44 185 - void ref_tracker_dir_print(struct ref_tracker_dir *dir, 186 - unsigned int display_limit) 187 - { 188 - struct ref_tracker *tracker; 189 - unsigned long flags; 190 - unsigned int i = 0; 191 - 192 - spin_lock_irqsave(&dir->lock, flags); 193 - list_for_each_entry(tracker, &dir->list, head) { 194 - if (i < display_limit) { 195 - pr_err("leaked reference.\n"); 196 - if (tracker->alloc_stack_handle) 197 - stack_depot_print(tracker->alloc_stack_handle); 198 - i++; 199 - } else { 200 - break; 201 - } 202 - } 203 - spin_unlock_irqrestore(&dir->lock, flags); 204 - } 205 - EXPORT_SYMBOL(ref_tracker_dir_print); 206 - 207 45 int ref_tracker_alloc(struct ref_tracker_dir *dir, 208 46 struct ref_tracker **trackerp, 209 47 gfp_t gfp) ··· 189 71 unsigned long entries[REF_TRACKER_STACK_ENTRIES]; 190 72 struct ref_tracker *tracker; 191 73 unsigned int nr_entries; 192 - gfp_t gfp_mask = gfp; 74 + gfp_t gfp_mask = gfp | __GFP_NOWARN; 193 75 unsigned long flags; 194 76 195 77 WARN_ON_ONCE(dir->dead); ··· 237 119 return -EEXIST; 238 120 } 239 121 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1); 240 - stack_handle = stack_depot_save(entries, nr_entries, GFP_ATOMIC); 122 + stack_handle = stack_depot_save(entries, nr_entries, 123 + GFP_NOWAIT | __GFP_NOWARN); 241 124 242 125 spin_lock_irqsave(&dir->lock, flags); 243 126 if (tracker->dead) {
+1 -1
lib/test_ref_tracker.c
··· 64 64 { 65 65 int i; 66 66 67 - ref_tracker_dir_init(&ref_dir, 100); 67 + ref_tracker_dir_init(&ref_dir, 100, "selftest"); 68 68 69 69 timer_setup(&test_ref_tracker_timer, test_ref_tracker_timer_func, 0); 70 70 mod_timer(&test_ref_tracker_timer, jiffies + 1);
+1 -1
net/core/dev.c
··· 10635 10635 dev = PTR_ALIGN(p, NETDEV_ALIGN); 10636 10636 dev->padded = (char *)dev - (char *)p; 10637 10637 10638 - ref_tracker_dir_init(&dev->refcnt_tracker, 128); 10638 + ref_tracker_dir_init(&dev->refcnt_tracker, 128, name); 10639 10639 #ifdef CONFIG_PCPU_DEV_REFCNT 10640 10640 dev->pcpu_refcnt = alloc_percpu(int); 10641 10641 if (!dev->pcpu_refcnt)
+2 -2
net/core/net_namespace.c
··· 308 308 /* init code that must occur even if setup_net() is not called. */ 309 309 static __net_init void preinit_net(struct net *net) 310 310 { 311 - ref_tracker_dir_init(&net->notrefcnt_tracker, 128); 311 + ref_tracker_dir_init(&net->notrefcnt_tracker, 128, "net notrefcnt"); 312 312 } 313 313 314 314 /* ··· 322 322 LIST_HEAD(net_exit_list); 323 323 324 324 refcount_set(&net->ns.count, 1); 325 - ref_tracker_dir_init(&net->refcnt_tracker, 128); 325 + ref_tracker_dir_init(&net->refcnt_tracker, 128, "net refcnt"); 326 326 327 327 refcount_set(&net->passive, 1); 328 328 get_random_bytes(&net->hash_mix, sizeof(u32));