Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc64/modules: replace stub allocation sentinel with an explicit counter

The logic for allocating ppc64_stub_entry trampolines in the .stubs
section relies on an inline sentinel, where a NULL .funcdata member
indicates an available slot.

While preceding commits fixed the initialization bugs that led to ftrace
stub corruption, the sentinel-based approach remains fragile: it depends
on an implicit convention between subsystems modifying different
struct types in the same memory area.

Replace the sentinel with an explicit counter, module->arch.num_stubs.
Instead of iterating through memory to find a NULL marker, the module
loader uses this counter as the boundary for the next free slot.

This simplifies the allocation code, hardens it against future changes
to stub structures, and removes the need for an extra relocation slot
previously reserved to terminate the sentinel search.

Signed-off-by: Joe Lawrence <joe.lawrence@redhat.com>
Acked-by: Naveen N Rao (AMD) <naveen@kernel.org>
Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
Link: https://patch.msgid.link/20250912142740.3581368-4-joe.lawrence@redhat.com

authored by

Joe Lawrence and committed by
Madhavan Srinivasan
b137312f f6b4df37

+9 -18
+1
arch/powerpc/include/asm/module.h
··· 27 27 struct mod_arch_specific { 28 28 #ifdef __powerpc64__ 29 29 unsigned int stubs_section; /* Index of stubs section in module */ 30 + unsigned int stub_count; /* Number of stubs used */ 30 31 #ifdef CONFIG_PPC_KERNEL_PCREL 31 32 unsigned int got_section; /* What section is the GOT? */ 32 33 unsigned int pcpu_section; /* .data..percpu section */
+8 -18
arch/powerpc/kernel/module_64.c
··· 209 209 char *secstrings, 210 210 struct module *me) 211 211 { 212 - /* One extra reloc so it's always 0-addr terminated */ 213 - unsigned long relocs = 1; 212 + unsigned long relocs = 0; 214 213 unsigned i; 215 214 216 215 /* Every relocated section... */ ··· 704 705 705 706 /* Find this stub, or if that fails, the next avail. entry */ 706 707 stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr; 707 - for (i = 0; stub_func_addr(stubs[i].funcdata); i++) { 708 + for (i = 0; i < me->arch.stub_count; i++) { 708 709 if (WARN_ON(i >= num_stubs)) 709 710 return 0; 710 711 ··· 715 716 if (!create_stub(sechdrs, &stubs[i], addr, me, name)) 716 717 return 0; 717 718 719 + me->arch.stub_count++; 718 720 return (unsigned long)&stubs[i]; 719 721 } 720 722 ··· 1118 1118 static int setup_ftrace_ool_stubs(const Elf64_Shdr *sechdrs, unsigned long addr, struct module *me) 1119 1119 { 1120 1120 #ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE 1121 - unsigned int i, total_stubs, num_stubs; 1121 + unsigned int total_stubs, num_stubs; 1122 1122 struct ppc64_stub_entry *stub; 1123 1123 1124 1124 total_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stub); 1125 1125 num_stubs = roundup(me->arch.ool_stub_count * sizeof(struct ftrace_ool_stub), 1126 1126 sizeof(struct ppc64_stub_entry)) / sizeof(struct ppc64_stub_entry); 1127 1127 1128 - /* Find the next available entry */ 1129 - stub = (void *)sechdrs[me->arch.stubs_section].sh_addr; 1130 - for (i = 0; stub_func_addr(stub[i].funcdata); i++) 1131 - if (WARN_ON(i >= total_stubs)) 1132 - return -1; 1133 - 1134 - if (WARN_ON(i + num_stubs > total_stubs)) 1128 + if (WARN_ON(me->arch.stub_count + num_stubs > total_stubs)) 1135 1129 return -1; 1136 1130 1137 - stub += i; 1138 - me->arch.ool_stubs = (struct ftrace_ool_stub *)stub; 1139 - 1140 - /* reserve stubs */ 1141 - for (i = 0; i < num_stubs; i++) 1142 - if (patch_u32((void *)&stub[i].funcdata, PPC_RAW_NOP())) 1143 - return -1; 1131 + stub = (void *)sechdrs[me->arch.stubs_section].sh_addr; 1132 + me->arch.ool_stubs = (struct ftrace_ool_stub *)(stub + me->arch.stub_count); 1133 + me->arch.stub_count += num_stubs; 1144 1134 #endif 1145 1135 1146 1136 return 0;