Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * allocation tagging
4 */
5#ifndef _LINUX_ALLOC_TAG_H
6#define _LINUX_ALLOC_TAG_H
7
8#include <linux/bug.h>
9#include <linux/codetag.h>
10#include <linux/container_of.h>
11#include <linux/preempt.h>
12#include <asm/percpu.h>
13#include <linux/cpumask.h>
14#include <linux/smp.h>
15#include <linux/static_key.h>
16#include <linux/irqflags.h>
17
18struct alloc_tag_counters {
19 u64 bytes;
20 u64 calls;
21};
22
23/*
24 * An instance of this structure is created in a special ELF section at every
25 * allocation callsite. At runtime, the special section is treated as
26 * an array of these. Embedded codetag utilizes codetag framework.
27 */
28struct alloc_tag {
29 struct codetag ct;
30 struct alloc_tag_counters __percpu *counters;
31} __aligned(8);
32
33struct alloc_tag_kernel_section {
34 struct alloc_tag *first_tag;
35 unsigned long count;
36};
37
38struct alloc_tag_module_section {
39 union {
40 unsigned long start_addr;
41 struct alloc_tag *first_tag;
42 };
43 unsigned long end_addr;
44 /* used size */
45 unsigned long size;
46};
47
48#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
49
50#define CODETAG_EMPTY ((void *)1)
51
52static inline bool is_codetag_empty(union codetag_ref *ref)
53{
54 return ref->ct == CODETAG_EMPTY;
55}
56
57static inline void set_codetag_empty(union codetag_ref *ref)
58{
59 if (ref)
60 ref->ct = CODETAG_EMPTY;
61}
62
63#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
64
65static inline bool is_codetag_empty(union codetag_ref *ref) { return false; }
66static inline void set_codetag_empty(union codetag_ref *ref) {}
67
68#endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
69
70#ifdef CONFIG_MEM_ALLOC_PROFILING
71
72#define ALLOC_TAG_SECTION_NAME "alloc_tags"
73
74struct codetag_bytes {
75 struct codetag *ct;
76 s64 bytes;
77};
78
79size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep);
80
81static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct)
82{
83 return container_of(ct, struct alloc_tag, ct);
84}
85
86#ifdef ARCH_NEEDS_WEAK_PER_CPU
87/*
88 * When percpu variables are required to be defined as weak, static percpu
89 * variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION).
90 * Instead we will account all module allocations to a single counter.
91 */
92DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
93
94#define DEFINE_ALLOC_TAG(_alloc_tag) \
95 static struct alloc_tag _alloc_tag __used __aligned(8) \
96 __section(ALLOC_TAG_SECTION_NAME) = { \
97 .ct = CODE_TAG_INIT, \
98 .counters = &_shared_alloc_tag };
99
100#else /* ARCH_NEEDS_WEAK_PER_CPU */
101
102#define DEFINE_ALLOC_TAG(_alloc_tag) \
103 static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \
104 static struct alloc_tag _alloc_tag __used __aligned(8) \
105 __section(ALLOC_TAG_SECTION_NAME) = { \
106 .ct = CODE_TAG_INIT, \
107 .counters = &_alloc_tag_cntr };
108
109#endif /* ARCH_NEEDS_WEAK_PER_CPU */
110
111DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
112 mem_alloc_profiling_key);
113
114static inline bool mem_alloc_profiling_enabled(void)
115{
116 return static_branch_maybe(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
117 &mem_alloc_profiling_key);
118}
119
120static inline struct alloc_tag_counters alloc_tag_read(struct alloc_tag *tag)
121{
122 struct alloc_tag_counters v = { 0, 0 };
123 struct alloc_tag_counters *counter;
124 int cpu;
125
126 for_each_possible_cpu(cpu) {
127 counter = per_cpu_ptr(tag->counters, cpu);
128 v.bytes += counter->bytes;
129 v.calls += counter->calls;
130 }
131
132 return v;
133}
134
135#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
136static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag)
137{
138 WARN_ONCE(ref && ref->ct,
139 "alloc_tag was not cleared (got tag for %s:%u)\n",
140 ref->ct->filename, ref->ct->lineno);
141
142 WARN_ONCE(!tag, "current->alloc_tag not set\n");
143}
144
145static inline void alloc_tag_sub_check(union codetag_ref *ref)
146{
147 WARN_ONCE(ref && !ref->ct, "alloc_tag was not set\n");
148}
149#else
150static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag) {}
151static inline void alloc_tag_sub_check(union codetag_ref *ref) {}
152#endif
153
154/* Caller should verify both ref and tag to be valid */
155static inline bool __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
156{
157 alloc_tag_add_check(ref, tag);
158 if (!ref || !tag)
159 return false;
160
161 ref->ct = &tag->ct;
162 return true;
163}
164
165static inline bool alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
166{
167 if (unlikely(!__alloc_tag_ref_set(ref, tag)))
168 return false;
169
170 /*
171 * We need in increment the call counter every time we have a new
172 * allocation or when we split a large allocation into smaller ones.
173 * Each new reference for every sub-allocation needs to increment call
174 * counter because when we free each part the counter will be decremented.
175 */
176 this_cpu_inc(tag->counters->calls);
177 return true;
178}
179
180static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes)
181{
182 if (likely(alloc_tag_ref_set(ref, tag)))
183 this_cpu_add(tag->counters->bytes, bytes);
184}
185
186static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes)
187{
188 struct alloc_tag *tag;
189
190 alloc_tag_sub_check(ref);
191 if (!ref || !ref->ct)
192 return;
193
194 if (is_codetag_empty(ref)) {
195 ref->ct = NULL;
196 return;
197 }
198
199 tag = ct_to_alloc_tag(ref->ct);
200
201 this_cpu_sub(tag->counters->bytes, bytes);
202 this_cpu_dec(tag->counters->calls);
203
204 ref->ct = NULL;
205}
206
207#define alloc_tag_record(p) ((p) = current->alloc_tag)
208
209#else /* CONFIG_MEM_ALLOC_PROFILING */
210
211#define DEFINE_ALLOC_TAG(_alloc_tag)
212static inline bool mem_alloc_profiling_enabled(void) { return false; }
213static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag,
214 size_t bytes) {}
215static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {}
216#define alloc_tag_record(p) do {} while (0)
217
218#endif /* CONFIG_MEM_ALLOC_PROFILING */
219
220#define alloc_hooks_tag(_tag, _do_alloc) \
221({ \
222 struct alloc_tag * __maybe_unused _old = alloc_tag_save(_tag); \
223 typeof(_do_alloc) _res = _do_alloc; \
224 alloc_tag_restore(_tag, _old); \
225 _res; \
226})
227
228#define alloc_hooks(_do_alloc) \
229({ \
230 DEFINE_ALLOC_TAG(_alloc_tag); \
231 alloc_hooks_tag(&_alloc_tag, _do_alloc); \
232})
233
234#endif /* _LINUX_ALLOC_TAG_H */