Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

alloc_tag: load module tags into separate contiguous memory

When a module gets unloaded there is a possibility that some of the
allocations it made are still used and therefore the allocation tags
corresponding to these allocations are still referenced. As such, the
memory for these tags can't be freed. This is currently handled as an
abnormal situation and module's data section is not being unloaded. To
handle this situation without keeping module's data in memory, allow
codetags with longer lifespan than the module to be loaded into their own
separate memory. The in-use memory areas and gaps after module unloading
in this separate memory are tracked using maple trees. Allocation tags
arrange their separate memory so that it is virtually contiguous and that
will allow simple allocation tag indexing later on in this patchset. The
size of this virtually contiguous memory is set to store up to 100000
allocation tags.

[surenb@google.com: fix empty codetag module section handling]
Link: https://lkml.kernel.org/r/20241101000017.3856204-1-surenb@google.com
[akpm@linux-foundation.org: update comment, per Dan]
Link: https://lkml.kernel.org/r/20241023170759.999909-4-surenb@google.com
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov (AMD) <bp@alien8.de>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Daniel Gomez <da.gomez@samsung.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: David Rientjes <rientjes@google.com>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Kalesh Singh <kaleshsingh@google.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Kent Overstreet <kent.overstreet@linux.dev>
Cc: Liam R. Howlett <Liam.Howlett@Oracle.com>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Minchan Kim <minchan@google.com>
Cc: Paul E. McKenney <paulmck@kernel.org>
Cc: Petr Pavlu <petr.pavlu@suse.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Sami Tolvanen <samitolvanen@google.com>
Cc: Sourav Panda <souravpanda@google.com>
Cc: Steven Rostedt (Google) <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Huth <thuth@redhat.com>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Xiongwei Song <xiongwei.song@windriver.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Dan Carpenter <dan.carpenter@linaro.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Suren Baghdasaryan and committed by
Andrew Morton
0db6f8d7 3e09c500

+447 -64
+19
include/asm-generic/codetag.lds.h
··· 11 11 #define CODETAG_SECTIONS() \ 12 12 SECTION_WITH_BOUNDARIES(alloc_tags) 13 13 14 + /* 15 + * Module codetags which aren't used after module unload, therefore have the 16 + * same lifespan as the module and can be safely unloaded with the module. 17 + */ 18 + #define MOD_CODETAG_SECTIONS() 19 + 20 + #define MOD_SEPARATE_CODETAG_SECTION(_name) \ 21 + .codetag.##_name : { \ 22 + SECTION_WITH_BOUNDARIES(_name) \ 23 + } 24 + 25 + /* 26 + * For codetags which might be used after module unload, therefore might stay 27 + * longer in memory. Each such codetag type has its own section so that we can 28 + * unload them individually once unused. 29 + */ 30 + #define MOD_SEPARATE_CODETAG_SECTIONS() \ 31 + MOD_SEPARATE_CODETAG_SECTION(alloc_tags) 32 + 14 33 #endif /* __ASM_GENERIC_CODETAG_LDS_H */
+11 -2
include/linux/alloc_tag.h
··· 30 30 struct alloc_tag_counters __percpu *counters; 31 31 } __aligned(8); 32 32 33 + struct alloc_tag_module_section { 34 + unsigned long start_addr; 35 + unsigned long end_addr; 36 + /* used size */ 37 + unsigned long size; 38 + }; 39 + 33 40 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG 34 41 35 42 #define CODETAG_EMPTY ((void *)1) ··· 61 54 62 55 #ifdef CONFIG_MEM_ALLOC_PROFILING 63 56 57 + #define ALLOC_TAG_SECTION_NAME "alloc_tags" 58 + 64 59 struct codetag_bytes { 65 60 struct codetag *ct; 66 61 s64 bytes; ··· 85 76 86 77 #define DEFINE_ALLOC_TAG(_alloc_tag) \ 87 78 static struct alloc_tag _alloc_tag __used __aligned(8) \ 88 - __section("alloc_tags") = { \ 79 + __section(ALLOC_TAG_SECTION_NAME) = { \ 89 80 .ct = CODE_TAG_INIT, \ 90 81 .counters = &_shared_alloc_tag }; 91 82 ··· 94 85 #define DEFINE_ALLOC_TAG(_alloc_tag) \ 95 86 static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \ 96 87 static struct alloc_tag _alloc_tag __used __aligned(8) \ 97 - __section("alloc_tags") = { \ 88 + __section(ALLOC_TAG_SECTION_NAME) = { \ 98 89 .ct = CODE_TAG_INIT, \ 99 90 .counters = &_alloc_tag_cntr }; 100 91
+32 -5
include/linux/codetag.h
··· 35 35 size_t tag_size; 36 36 void (*module_load)(struct codetag_type *cttype, 37 37 struct codetag_module *cmod); 38 - bool (*module_unload)(struct codetag_type *cttype, 38 + void (*module_unload)(struct codetag_type *cttype, 39 39 struct codetag_module *cmod); 40 + #ifdef CONFIG_MODULES 41 + void (*module_replaced)(struct module *mod, struct module *new_mod); 42 + bool (*needs_section_mem)(struct module *mod, unsigned long size); 43 + void *(*alloc_section_mem)(struct module *mod, unsigned long size, 44 + unsigned int prepend, unsigned long align); 45 + void (*free_section_mem)(struct module *mod, bool used); 46 + #endif 40 47 }; 41 48 42 49 struct codetag_iterator { ··· 78 71 codetag_register_type(const struct codetag_type_desc *desc); 79 72 80 73 #if defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES) 74 + 75 + bool codetag_needs_module_section(struct module *mod, const char *name, 76 + unsigned long size); 77 + void *codetag_alloc_module_section(struct module *mod, const char *name, 78 + unsigned long size, unsigned int prepend, 79 + unsigned long align); 80 + void codetag_free_module_sections(struct module *mod); 81 + void codetag_module_replaced(struct module *mod, struct module *new_mod); 81 82 void codetag_load_module(struct module *mod); 82 - bool codetag_unload_module(struct module *mod); 83 - #else 83 + void codetag_unload_module(struct module *mod); 84 + 85 + #else /* defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES) */ 86 + 87 + static inline bool 88 + codetag_needs_module_section(struct module *mod, const char *name, 89 + unsigned long size) { return false; } 90 + static inline void * 91 + codetag_alloc_module_section(struct module *mod, const char *name, 92 + unsigned long size, unsigned int prepend, 93 + unsigned long align) { return NULL; } 94 + static inline void codetag_free_module_sections(struct module *mod) {} 95 + static inline void codetag_module_replaced(struct module *mod, struct module *new_mod) {} 84 96 static inline void codetag_load_module(struct module *mod) {} 85 - static inline bool codetag_unload_module(struct module *mod) { return true; } 86 - #endif 97 + static inline void codetag_unload_module(struct module *mod) {} 98 + 99 + #endif /* defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES) */ 87 100 88 101 #endif /* _LINUX_CODETAG_H */
+58 -26
kernel/module/main.c
··· 1251 1251 return 0; 1252 1252 } 1253 1253 1254 - static void module_memory_free(struct module *mod, enum mod_mem_type type, 1255 - bool unload_codetags) 1254 + static void module_memory_free(struct module *mod, enum mod_mem_type type) 1256 1255 { 1257 1256 struct module_memory *mem = &mod->mem[type]; 1258 - void *ptr = mem->base; 1259 1257 1260 1258 if (mem->is_rox) 1261 1259 vfree(mem->rw_copy); 1262 1260 1263 - if (!unload_codetags && mod_mem_type_is_core_data(type)) 1264 - return; 1265 - 1266 - execmem_free(ptr); 1261 + execmem_free(mem->base); 1267 1262 } 1268 1263 1269 - static void free_mod_mem(struct module *mod, bool unload_codetags) 1264 + static void free_mod_mem(struct module *mod) 1270 1265 { 1271 1266 for_each_mod_mem_type(type) { 1272 1267 struct module_memory *mod_mem = &mod->mem[type]; ··· 1272 1277 /* Free lock-classes; relies on the preceding sync_rcu(). */ 1273 1278 lockdep_free_key_range(mod_mem->base, mod_mem->size); 1274 1279 if (mod_mem->size) 1275 - module_memory_free(mod, type, unload_codetags); 1280 + module_memory_free(mod, type); 1276 1281 } 1277 1282 1278 1283 /* MOD_DATA hosts mod, so free it at last */ 1279 1284 lockdep_free_key_range(mod->mem[MOD_DATA].base, mod->mem[MOD_DATA].size); 1280 - module_memory_free(mod, MOD_DATA, unload_codetags); 1285 + module_memory_free(mod, MOD_DATA); 1281 1286 } 1282 1287 1283 1288 /* Free a module, remove from lists, etc. */ 1284 1289 static void free_module(struct module *mod) 1285 1290 { 1286 - bool unload_codetags; 1287 - 1288 1291 trace_module_free(mod); 1289 1292 1290 - unload_codetags = codetag_unload_module(mod); 1291 - if (!unload_codetags) 1292 - pr_warn("%s: memory allocation(s) from the module still alive, cannot unload cleanly\n", 1293 - mod->name); 1293 + codetag_unload_module(mod); 1294 1294 1295 1295 mod_sysfs_teardown(mod); 1296 1296 ··· 1328 1338 kfree(mod->args); 1329 1339 percpu_modfree(mod); 1330 1340 1331 - free_mod_mem(mod, unload_codetags); 1341 + free_mod_mem(mod); 1332 1342 } 1333 1343 1334 1344 void *__symbol_get(const char *symbol) ··· 1592 1602 1593 1603 if (WARN_ON_ONCE(type == MOD_INVALID)) 1594 1604 continue; 1605 + 1606 + /* 1607 + * Do not allocate codetag memory as we load it into 1608 + * preallocated contiguous memory. 1609 + */ 1610 + if (codetag_needs_module_section(mod, sname, s->sh_size)) { 1611 + /* 1612 + * s->sh_entsize won't be used but populate the 1613 + * type field to avoid confusion. 1614 + */ 1615 + s->sh_entsize = ((unsigned long)(type) & SH_ENTSIZE_TYPE_MASK) 1616 + << SH_ENTSIZE_TYPE_SHIFT; 1617 + continue; 1618 + } 1595 1619 1596 1620 s->sh_entsize = module_get_offset_and_type(mod, type, s, i); 1597 1621 pr_debug("\t%s\n", sname); ··· 2281 2277 int i; 2282 2278 enum mod_mem_type t = 0; 2283 2279 int ret = -ENOMEM; 2280 + bool codetag_section_found = false; 2284 2281 2285 2282 for_each_mod_mem_type(type) { 2286 2283 if (!mod->mem[type].size) { ··· 2293 2288 ret = module_memory_alloc(mod, type); 2294 2289 if (ret) { 2295 2290 t = type; 2296 - goto out_enomem; 2291 + goto out_err; 2297 2292 } 2298 2293 } 2299 2294 ··· 2302 2297 for (i = 0; i < info->hdr->e_shnum; i++) { 2303 2298 void *dest; 2304 2299 Elf_Shdr *shdr = &info->sechdrs[i]; 2305 - enum mod_mem_type type = shdr->sh_entsize >> SH_ENTSIZE_TYPE_SHIFT; 2306 - unsigned long offset = shdr->sh_entsize & SH_ENTSIZE_OFFSET_MASK; 2300 + const char *sname; 2307 2301 unsigned long addr; 2308 2302 2309 2303 if (!(shdr->sh_flags & SHF_ALLOC)) 2310 2304 continue; 2311 2305 2312 - addr = (unsigned long)mod->mem[type].base + offset; 2313 - dest = mod->mem[type].rw_copy + offset; 2306 + sname = info->secstrings + shdr->sh_name; 2307 + /* 2308 + * Load codetag sections separately as they might still be used 2309 + * after module unload. 2310 + */ 2311 + if (codetag_needs_module_section(mod, sname, shdr->sh_size)) { 2312 + dest = codetag_alloc_module_section(mod, sname, shdr->sh_size, 2313 + arch_mod_section_prepend(mod, i), shdr->sh_addralign); 2314 + if (WARN_ON(!dest)) { 2315 + ret = -EINVAL; 2316 + goto out_err; 2317 + } 2318 + if (IS_ERR(dest)) { 2319 + ret = PTR_ERR(dest); 2320 + goto out_err; 2321 + } 2322 + addr = (unsigned long)dest; 2323 + codetag_section_found = true; 2324 + } else { 2325 + enum mod_mem_type type = shdr->sh_entsize >> SH_ENTSIZE_TYPE_SHIFT; 2326 + unsigned long offset = shdr->sh_entsize & SH_ENTSIZE_OFFSET_MASK; 2327 + 2328 + addr = (unsigned long)mod->mem[type].base + offset; 2329 + dest = mod->mem[type].rw_copy + offset; 2330 + } 2314 2331 2315 2332 if (shdr->sh_type != SHT_NOBITS) { 2316 2333 /* ··· 2344 2317 if (i == info->index.mod && 2345 2318 (WARN_ON_ONCE(shdr->sh_size != sizeof(struct module)))) { 2346 2319 ret = -ENOEXEC; 2347 - goto out_enomem; 2320 + goto out_err; 2348 2321 } 2349 2322 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); 2350 2323 } ··· 2360 2333 } 2361 2334 2362 2335 return 0; 2363 - out_enomem: 2336 + out_err: 2364 2337 for (t--; t >= 0; t--) 2365 - module_memory_free(mod, t, true); 2338 + module_memory_free(mod, t); 2339 + if (codetag_section_found) 2340 + codetag_free_module_sections(mod); 2341 + 2366 2342 return ret; 2367 2343 } 2368 2344 ··· 2486 2456 /* Module has been copied to its final place now: return it. */ 2487 2457 mod = (void *)info->sechdrs[info->index.mod].sh_addr; 2488 2458 kmemleak_load_module(mod, info); 2459 + codetag_module_replaced(info->mod, mod); 2460 + 2489 2461 return mod; 2490 2462 } 2491 2463 ··· 2497 2465 percpu_modfree(mod); 2498 2466 module_arch_freeing_init(mod); 2499 2467 2500 - free_mod_mem(mod, true); 2468 + free_mod_mem(mod); 2501 2469 } 2502 2470 2503 2471 int __weak module_finalize(const Elf_Ehdr *hdr,
+231 -22
lib/alloc_tag.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 #include <linux/alloc_tag.h> 3 + #include <linux/execmem.h> 3 4 #include <linux/fs.h> 4 5 #include <linux/gfp.h> 5 6 #include <linux/module.h> ··· 10 9 #include <linux/seq_file.h> 11 10 12 11 #define ALLOCINFO_FILE_NAME "allocinfo" 12 + #define MODULE_ALLOC_TAG_VMAP_SIZE (100000UL * sizeof(struct alloc_tag)) 13 13 14 14 #ifdef CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT 15 15 static bool mem_profiling_support __meminitdata = true; ··· 176 174 } 177 175 } 178 176 179 - static bool alloc_tag_module_unload(struct codetag_type *cttype, 180 - struct codetag_module *cmod) 177 + #ifdef CONFIG_MODULES 178 + 179 + static struct maple_tree mod_area_mt = MTREE_INIT(mod_area_mt, MT_FLAGS_ALLOC_RANGE); 180 + /* A dummy object used to indicate an unloaded module */ 181 + static struct module unloaded_mod; 182 + /* A dummy object used to indicate a module prepended area */ 183 + static struct module prepend_mod; 184 + 185 + static struct alloc_tag_module_section module_tags; 186 + 187 + static bool needs_section_mem(struct module *mod, unsigned long size) 181 188 { 182 - struct codetag_iterator iter = codetag_get_ct_iter(cttype); 183 - struct alloc_tag_counters counter; 184 - bool module_unused = true; 185 - struct alloc_tag *tag; 186 - struct codetag *ct; 189 + return size >= sizeof(struct alloc_tag); 190 + } 187 191 188 - for (ct = codetag_next_ct(&iter); ct; ct = codetag_next_ct(&iter)) { 189 - if (iter.cmod != cmod) 190 - continue; 192 + static struct alloc_tag *find_used_tag(struct alloc_tag *from, struct alloc_tag *to) 193 + { 194 + while (from <= to) { 195 + struct alloc_tag_counters counter; 191 196 192 - tag = ct_to_alloc_tag(ct); 193 - counter = alloc_tag_read(tag); 194 - 195 - if (WARN(counter.bytes, 196 - "%s:%u module %s func:%s has %llu allocated at module unload", 197 - ct->filename, ct->lineno, ct->modname, ct->function, counter.bytes)) 198 - module_unused = false; 197 + counter = alloc_tag_read(from); 198 + if (counter.bytes) 199 + return from; 200 + from++; 199 201 } 200 202 201 - return module_unused; 203 + return NULL; 202 204 } 205 + 206 + /* Called with mod_area_mt locked */ 207 + static void clean_unused_module_areas_locked(void) 208 + { 209 + MA_STATE(mas, &mod_area_mt, 0, module_tags.size); 210 + struct module *val; 211 + 212 + mas_for_each(&mas, val, module_tags.size) { 213 + if (val != &unloaded_mod) 214 + continue; 215 + 216 + /* Release area if all tags are unused */ 217 + if (!find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index), 218 + (struct alloc_tag *)(module_tags.start_addr + mas.last))) 219 + mas_erase(&mas); 220 + } 221 + } 222 + 223 + /* Called with mod_area_mt locked */ 224 + static bool find_aligned_area(struct ma_state *mas, unsigned long section_size, 225 + unsigned long size, unsigned int prepend, unsigned long align) 226 + { 227 + bool cleanup_done = false; 228 + 229 + repeat: 230 + /* Try finding exact size and hope the start is aligned */ 231 + if (!mas_empty_area(mas, 0, section_size - 1, prepend + size)) { 232 + if (IS_ALIGNED(mas->index + prepend, align)) 233 + return true; 234 + 235 + /* Try finding larger area to align later */ 236 + mas_reset(mas); 237 + if (!mas_empty_area(mas, 0, section_size - 1, 238 + size + prepend + align - 1)) 239 + return true; 240 + } 241 + 242 + /* No free area, try cleanup stale data and repeat the search once */ 243 + if (!cleanup_done) { 244 + clean_unused_module_areas_locked(); 245 + cleanup_done = true; 246 + mas_reset(mas); 247 + goto repeat; 248 + } 249 + 250 + return false; 251 + } 252 + 253 + static void *reserve_module_tags(struct module *mod, unsigned long size, 254 + unsigned int prepend, unsigned long align) 255 + { 256 + unsigned long section_size = module_tags.end_addr - module_tags.start_addr; 257 + MA_STATE(mas, &mod_area_mt, 0, section_size - 1); 258 + unsigned long offset; 259 + void *ret = NULL; 260 + 261 + /* If no tags return error */ 262 + if (size < sizeof(struct alloc_tag)) 263 + return ERR_PTR(-EINVAL); 264 + 265 + /* 266 + * align is always power of 2, so we can use IS_ALIGNED and ALIGN. 267 + * align 0 or 1 means no alignment, to simplify set to 1. 268 + */ 269 + if (!align) 270 + align = 1; 271 + 272 + mas_lock(&mas); 273 + if (!find_aligned_area(&mas, section_size, size, prepend, align)) { 274 + ret = ERR_PTR(-ENOMEM); 275 + goto unlock; 276 + } 277 + 278 + /* Mark found area as reserved */ 279 + offset = mas.index; 280 + offset += prepend; 281 + offset = ALIGN(offset, align); 282 + if (offset != mas.index) { 283 + unsigned long pad_start = mas.index; 284 + 285 + mas.last = offset - 1; 286 + mas_store(&mas, &prepend_mod); 287 + if (mas_is_err(&mas)) { 288 + ret = ERR_PTR(xa_err(mas.node)); 289 + goto unlock; 290 + } 291 + mas.index = offset; 292 + mas.last = offset + size - 1; 293 + mas_store(&mas, mod); 294 + if (mas_is_err(&mas)) { 295 + mas.index = pad_start; 296 + mas_erase(&mas); 297 + ret = ERR_PTR(xa_err(mas.node)); 298 + } 299 + } else { 300 + mas.last = offset + size - 1; 301 + mas_store(&mas, mod); 302 + if (mas_is_err(&mas)) 303 + ret = ERR_PTR(xa_err(mas.node)); 304 + } 305 + unlock: 306 + mas_unlock(&mas); 307 + 308 + if (IS_ERR(ret)) 309 + return ret; 310 + 311 + if (module_tags.size < offset + size) 312 + module_tags.size = offset + size; 313 + 314 + return (struct alloc_tag *)(module_tags.start_addr + offset); 315 + } 316 + 317 + static void release_module_tags(struct module *mod, bool used) 318 + { 319 + MA_STATE(mas, &mod_area_mt, module_tags.size, module_tags.size); 320 + struct alloc_tag *tag; 321 + struct module *val; 322 + 323 + mas_lock(&mas); 324 + mas_for_each_rev(&mas, val, 0) 325 + if (val == mod) 326 + break; 327 + 328 + if (!val) /* module not found */ 329 + goto out; 330 + 331 + if (!used) 332 + goto release_area; 333 + 334 + /* Find out if the area is used */ 335 + tag = find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index), 336 + (struct alloc_tag *)(module_tags.start_addr + mas.last)); 337 + if (tag) { 338 + struct alloc_tag_counters counter = alloc_tag_read(tag); 339 + 340 + pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n", 341 + tag->ct.filename, tag->ct.lineno, tag->ct.modname, 342 + tag->ct.function, counter.bytes); 343 + } else { 344 + used = false; 345 + } 346 + release_area: 347 + mas_store(&mas, used ? &unloaded_mod : NULL); 348 + val = mas_prev_range(&mas, 0); 349 + if (val == &prepend_mod) 350 + mas_store(&mas, NULL); 351 + out: 352 + mas_unlock(&mas); 353 + } 354 + 355 + static void replace_module(struct module *mod, struct module *new_mod) 356 + { 357 + MA_STATE(mas, &mod_area_mt, 0, module_tags.size); 358 + struct module *val; 359 + 360 + mas_lock(&mas); 361 + mas_for_each(&mas, val, module_tags.size) { 362 + if (val != mod) 363 + continue; 364 + 365 + mas_store_gfp(&mas, new_mod, GFP_KERNEL); 366 + break; 367 + } 368 + mas_unlock(&mas); 369 + } 370 + 371 + static int __init alloc_mod_tags_mem(void) 372 + { 373 + /* Allocate space to copy allocation tags */ 374 + module_tags.start_addr = (unsigned long)execmem_alloc(EXECMEM_MODULE_DATA, 375 + MODULE_ALLOC_TAG_VMAP_SIZE); 376 + if (!module_tags.start_addr) 377 + return -ENOMEM; 378 + 379 + module_tags.end_addr = module_tags.start_addr + MODULE_ALLOC_TAG_VMAP_SIZE; 380 + 381 + return 0; 382 + } 383 + 384 + static void __init free_mod_tags_mem(void) 385 + { 386 + execmem_free((void *)module_tags.start_addr); 387 + module_tags.start_addr = 0; 388 + } 389 + 390 + #else /* CONFIG_MODULES */ 391 + 392 + static inline int alloc_mod_tags_mem(void) { return 0; } 393 + static inline void free_mod_tags_mem(void) {} 394 + 395 + #endif /* CONFIG_MODULES */ 203 396 204 397 static int __init setup_early_mem_profiling(char *str) 205 398 { ··· 471 274 static int __init alloc_tag_init(void) 472 275 { 473 276 const struct codetag_type_desc desc = { 474 - .section = "alloc_tags", 475 - .tag_size = sizeof(struct alloc_tag), 476 - .module_unload = alloc_tag_module_unload, 277 + .section = ALLOC_TAG_SECTION_NAME, 278 + .tag_size = sizeof(struct alloc_tag), 279 + #ifdef CONFIG_MODULES 280 + .needs_section_mem = needs_section_mem, 281 + .alloc_section_mem = reserve_module_tags, 282 + .free_section_mem = release_module_tags, 283 + .module_replaced = replace_module, 284 + #endif 477 285 }; 286 + int res; 287 + 288 + res = alloc_mod_tags_mem(); 289 + if (res) 290 + return res; 478 291 479 292 alloc_tag_cttype = codetag_register_type(&desc); 480 - if (IS_ERR(alloc_tag_cttype)) 293 + if (IS_ERR(alloc_tag_cttype)) { 294 + free_mod_tags_mem(); 481 295 return PTR_ERR(alloc_tag_cttype); 296 + } 482 297 483 298 sysctl_init(); 484 299 procfs_init();
+93 -7
lib/codetag.c
··· 207 207 } 208 208 209 209 #ifdef CONFIG_MODULES 210 + #define CODETAG_SECTION_PREFIX ".codetag." 211 + 212 + /* Some codetag types need a separate module section */ 213 + bool codetag_needs_module_section(struct module *mod, const char *name, 214 + unsigned long size) 215 + { 216 + const char *type_name; 217 + struct codetag_type *cttype; 218 + bool ret = false; 219 + 220 + if (strncmp(name, CODETAG_SECTION_PREFIX, strlen(CODETAG_SECTION_PREFIX))) 221 + return false; 222 + 223 + type_name = name + strlen(CODETAG_SECTION_PREFIX); 224 + mutex_lock(&codetag_lock); 225 + list_for_each_entry(cttype, &codetag_types, link) { 226 + if (strcmp(type_name, cttype->desc.section) == 0) { 227 + if (!cttype->desc.needs_section_mem) 228 + break; 229 + 230 + down_write(&cttype->mod_lock); 231 + ret = cttype->desc.needs_section_mem(mod, size); 232 + up_write(&cttype->mod_lock); 233 + break; 234 + } 235 + } 236 + mutex_unlock(&codetag_lock); 237 + 238 + return ret; 239 + } 240 + 241 + void *codetag_alloc_module_section(struct module *mod, const char *name, 242 + unsigned long size, unsigned int prepend, 243 + unsigned long align) 244 + { 245 + const char *type_name = name + strlen(CODETAG_SECTION_PREFIX); 246 + struct codetag_type *cttype; 247 + void *ret = ERR_PTR(-EINVAL); 248 + 249 + mutex_lock(&codetag_lock); 250 + list_for_each_entry(cttype, &codetag_types, link) { 251 + if (strcmp(type_name, cttype->desc.section) == 0) { 252 + if (WARN_ON(!cttype->desc.alloc_section_mem)) 253 + break; 254 + 255 + down_write(&cttype->mod_lock); 256 + ret = cttype->desc.alloc_section_mem(mod, size, prepend, align); 257 + up_write(&cttype->mod_lock); 258 + break; 259 + } 260 + } 261 + mutex_unlock(&codetag_lock); 262 + 263 + return ret; 264 + } 265 + 266 + void codetag_free_module_sections(struct module *mod) 267 + { 268 + struct codetag_type *cttype; 269 + 270 + mutex_lock(&codetag_lock); 271 + list_for_each_entry(cttype, &codetag_types, link) { 272 + if (!cttype->desc.free_section_mem) 273 + continue; 274 + 275 + down_write(&cttype->mod_lock); 276 + cttype->desc.free_section_mem(mod, false); 277 + up_write(&cttype->mod_lock); 278 + } 279 + mutex_unlock(&codetag_lock); 280 + } 281 + 282 + void codetag_module_replaced(struct module *mod, struct module *new_mod) 283 + { 284 + struct codetag_type *cttype; 285 + 286 + mutex_lock(&codetag_lock); 287 + list_for_each_entry(cttype, &codetag_types, link) { 288 + if (!cttype->desc.module_replaced) 289 + continue; 290 + 291 + down_write(&cttype->mod_lock); 292 + cttype->desc.module_replaced(mod, new_mod); 293 + up_write(&cttype->mod_lock); 294 + } 295 + mutex_unlock(&codetag_lock); 296 + } 297 + 210 298 void codetag_load_module(struct module *mod) 211 299 { 212 300 struct codetag_type *cttype; ··· 308 220 mutex_unlock(&codetag_lock); 309 221 } 310 222 311 - bool codetag_unload_module(struct module *mod) 223 + void codetag_unload_module(struct module *mod) 312 224 { 313 225 struct codetag_type *cttype; 314 - bool unload_ok = true; 315 226 316 227 if (!mod) 317 - return true; 228 + return; 318 229 319 230 /* await any module's kfree_rcu() operations to complete */ 320 231 kvfree_rcu_barrier(); ··· 333 246 } 334 247 if (found) { 335 248 if (cttype->desc.module_unload) 336 - if (!cttype->desc.module_unload(cttype, cmod)) 337 - unload_ok = false; 249 + cttype->desc.module_unload(cttype, cmod); 338 250 339 251 cttype->count -= range_size(cttype, &cmod->range); 340 252 idr_remove(&cttype->mod_idr, mod_id); 341 253 kfree(cmod); 342 254 } 343 255 up_write(&cttype->mod_lock); 256 + if (found && cttype->desc.free_section_mem) 257 + cttype->desc.free_section_mem(mod, true); 344 258 } 345 259 mutex_unlock(&codetag_lock); 346 - 347 - return unload_ok; 348 260 } 349 261 #endif /* CONFIG_MODULES */ 350 262
+3 -2
scripts/module.lds.S
··· 50 50 .data : { 51 51 *(.data .data.[0-9a-zA-Z_]*) 52 52 *(.data..L*) 53 - CODETAG_SECTIONS() 53 + MOD_CODETAG_SECTIONS() 54 54 } 55 55 56 56 .rodata : { ··· 59 59 } 60 60 #else 61 61 .data : { 62 - CODETAG_SECTIONS() 62 + MOD_CODETAG_SECTIONS() 63 63 } 64 64 #endif 65 + MOD_SEPARATE_CODETAG_SECTIONS() 65 66 } 66 67 67 68 /* bring in arch-specific sections */