Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/buildid.h>
4#include <linux/cache.h>
5#include <linux/elf.h>
6#include <linux/kernel.h>
7#include <linux/pagemap.h>
8#include <linux/secretmem.h>
9
10#define BUILD_ID 3
11
12#define MAX_PHDR_CNT 256
13
14void freader_init_from_file(struct freader *r, void *buf, u32 buf_sz,
15 struct file *file, bool may_fault)
16{
17 memset(r, 0, sizeof(*r));
18 r->buf = buf;
19 r->buf_sz = buf_sz;
20 r->file = file;
21 r->may_fault = may_fault;
22}
23
24void freader_init_from_mem(struct freader *r, const char *data, u64 data_sz)
25{
26 memset(r, 0, sizeof(*r));
27 r->data = data;
28 r->data_sz = data_sz;
29}
30
31static void freader_put_folio(struct freader *r)
32{
33 if (!r->folio)
34 return;
35 kunmap_local(r->addr);
36 folio_put(r->folio);
37 r->folio = NULL;
38}
39
40static int freader_get_folio(struct freader *r, loff_t file_off)
41{
42 /* check if we can just reuse current folio */
43 if (r->folio && file_off >= r->folio_off &&
44 file_off < r->folio_off + folio_size(r->folio))
45 return 0;
46
47 freader_put_folio(r);
48
49 /* reject secretmem folios created with memfd_secret() */
50 if (secretmem_mapping(r->file->f_mapping))
51 return -EFAULT;
52
53 r->folio = filemap_get_folio(r->file->f_mapping, file_off >> PAGE_SHIFT);
54
55 /* if sleeping is allowed, wait for the page, if necessary */
56 if (r->may_fault && (IS_ERR(r->folio) || !folio_test_uptodate(r->folio))) {
57 filemap_invalidate_lock_shared(r->file->f_mapping);
58 r->folio = read_cache_folio(r->file->f_mapping, file_off >> PAGE_SHIFT,
59 NULL, r->file);
60 filemap_invalidate_unlock_shared(r->file->f_mapping);
61 }
62
63 if (IS_ERR(r->folio) || !folio_test_uptodate(r->folio)) {
64 if (!IS_ERR(r->folio))
65 folio_put(r->folio);
66 r->folio = NULL;
67 return -EFAULT;
68 }
69
70 r->folio_off = folio_pos(r->folio);
71 r->addr = kmap_local_folio(r->folio, 0);
72
73 return 0;
74}
75
76const void *freader_fetch(struct freader *r, loff_t file_off, size_t sz)
77{
78 size_t folio_sz;
79
80 /* provided internal temporary buffer should be sized correctly */
81 if (WARN_ON(r->buf && sz > r->buf_sz)) {
82 r->err = -E2BIG;
83 return NULL;
84 }
85
86 if (unlikely(file_off + sz < file_off)) {
87 r->err = -EOVERFLOW;
88 return NULL;
89 }
90
91 /* working with memory buffer is much more straightforward */
92 if (!r->buf) {
93 if (file_off + sz > r->data_sz) {
94 r->err = -ERANGE;
95 return NULL;
96 }
97 return r->data + file_off;
98 }
99
100 /* fetch or reuse folio for given file offset */
101 r->err = freader_get_folio(r, file_off);
102 if (r->err)
103 return NULL;
104
105 /* if requested data is crossing folio boundaries, we have to copy
106 * everything into our local buffer to keep a simple linear memory
107 * access interface
108 */
109 folio_sz = folio_size(r->folio);
110 if (file_off + sz > r->folio_off + folio_sz) {
111 u64 part_sz = r->folio_off + folio_sz - file_off, off;
112
113 memcpy(r->buf, r->addr + file_off - r->folio_off, part_sz);
114 off = part_sz;
115
116 while (off < sz) {
117 /* fetch next folio */
118 r->err = freader_get_folio(r, r->folio_off + folio_sz);
119 if (r->err)
120 return NULL;
121 folio_sz = folio_size(r->folio);
122 part_sz = min_t(u64, sz - off, folio_sz);
123 memcpy(r->buf + off, r->addr, part_sz);
124 off += part_sz;
125 }
126
127 return r->buf;
128 }
129
130 /* if data fits in a single folio, just return direct pointer */
131 return r->addr + (file_off - r->folio_off);
132}
133
134void freader_cleanup(struct freader *r)
135{
136 if (!r->buf)
137 return; /* non-file-backed mode */
138
139 freader_put_folio(r);
140}
141
142/*
143 * Parse build id from the note segment. This logic can be shared between
144 * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are
145 * identical.
146 */
147static int parse_build_id(struct freader *r, unsigned char *build_id, __u32 *size,
148 loff_t note_off, Elf32_Word note_size)
149{
150 const char note_name[] = "GNU";
151 const size_t note_name_sz = sizeof(note_name);
152 u32 build_id_off, new_off, note_end, name_sz, desc_sz;
153 const Elf32_Nhdr *nhdr;
154 const char *data;
155
156 if (check_add_overflow(note_off, note_size, ¬e_end))
157 return -EINVAL;
158
159 while (note_end - note_off > sizeof(Elf32_Nhdr) + note_name_sz) {
160 nhdr = freader_fetch(r, note_off, sizeof(Elf32_Nhdr) + note_name_sz);
161 if (!nhdr)
162 return r->err;
163
164 name_sz = READ_ONCE(nhdr->n_namesz);
165 desc_sz = READ_ONCE(nhdr->n_descsz);
166
167 new_off = note_off + sizeof(Elf32_Nhdr);
168 if (check_add_overflow(new_off, ALIGN(name_sz, 4), &new_off) ||
169 check_add_overflow(new_off, ALIGN(desc_sz, 4), &new_off) ||
170 new_off > note_end)
171 break;
172
173 if (nhdr->n_type == BUILD_ID &&
174 name_sz == note_name_sz &&
175 memcmp(nhdr + 1, note_name, note_name_sz) == 0 &&
176 desc_sz > 0 && desc_sz <= BUILD_ID_SIZE_MAX) {
177 build_id_off = note_off + sizeof(Elf32_Nhdr) + ALIGN(note_name_sz, 4);
178
179 /* freader_fetch() will invalidate nhdr pointer */
180 data = freader_fetch(r, build_id_off, desc_sz);
181 if (!data)
182 return r->err;
183
184 memcpy(build_id, data, desc_sz);
185 memset(build_id + desc_sz, 0, BUILD_ID_SIZE_MAX - desc_sz);
186 if (size)
187 *size = desc_sz;
188 return 0;
189 }
190
191 note_off = new_off;
192 }
193
194 return -EINVAL;
195}
196
197/* Parse build ID from 32-bit ELF */
198static int get_build_id_32(struct freader *r, unsigned char *build_id, __u32 *size)
199{
200 const Elf32_Ehdr *ehdr;
201 const Elf32_Phdr *phdr;
202 __u32 phnum, phoff, i;
203
204 ehdr = freader_fetch(r, 0, sizeof(Elf32_Ehdr));
205 if (!ehdr)
206 return r->err;
207
208 /* subsequent freader_fetch() calls invalidate pointers, so remember locally */
209 phnum = READ_ONCE(ehdr->e_phnum);
210 phoff = READ_ONCE(ehdr->e_phoff);
211
212 /* set upper bound on amount of segments (phdrs) we iterate */
213 if (phnum > MAX_PHDR_CNT)
214 phnum = MAX_PHDR_CNT;
215
216 /* check that phoff is not large enough to cause an overflow */
217 if (phoff + phnum * sizeof(Elf32_Phdr) < phoff)
218 return -EINVAL;
219
220 for (i = 0; i < phnum; ++i) {
221 phdr = freader_fetch(r, phoff + i * sizeof(Elf32_Phdr), sizeof(Elf32_Phdr));
222 if (!phdr)
223 return r->err;
224
225 if (phdr->p_type == PT_NOTE &&
226 !parse_build_id(r, build_id, size, READ_ONCE(phdr->p_offset),
227 READ_ONCE(phdr->p_filesz)))
228 return 0;
229 }
230 return -EINVAL;
231}
232
233/* Parse build ID from 64-bit ELF */
234static int get_build_id_64(struct freader *r, unsigned char *build_id, __u32 *size)
235{
236 const Elf64_Ehdr *ehdr;
237 const Elf64_Phdr *phdr;
238 __u32 phnum, i;
239 __u64 phoff;
240
241 ehdr = freader_fetch(r, 0, sizeof(Elf64_Ehdr));
242 if (!ehdr)
243 return r->err;
244
245 /* subsequent freader_fetch() calls invalidate pointers, so remember locally */
246 phnum = READ_ONCE(ehdr->e_phnum);
247 phoff = READ_ONCE(ehdr->e_phoff);
248
249 /* set upper bound on amount of segments (phdrs) we iterate */
250 if (phnum > MAX_PHDR_CNT)
251 phnum = MAX_PHDR_CNT;
252
253 /* check that phoff is not large enough to cause an overflow */
254 if (phoff + phnum * sizeof(Elf64_Phdr) < phoff)
255 return -EINVAL;
256
257 for (i = 0; i < phnum; ++i) {
258 phdr = freader_fetch(r, phoff + i * sizeof(Elf64_Phdr), sizeof(Elf64_Phdr));
259 if (!phdr)
260 return r->err;
261
262 if (phdr->p_type == PT_NOTE &&
263 !parse_build_id(r, build_id, size, READ_ONCE(phdr->p_offset),
264 READ_ONCE(phdr->p_filesz)))
265 return 0;
266 }
267
268 return -EINVAL;
269}
270
271/* enough for Elf64_Ehdr, Elf64_Phdr, and all the smaller requests */
272#define MAX_FREADER_BUF_SZ 64
273
274static int __build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
275 __u32 *size, bool may_fault)
276{
277 const Elf32_Ehdr *ehdr;
278 struct freader r;
279 char buf[MAX_FREADER_BUF_SZ];
280 int ret;
281
282 /* only works for page backed storage */
283 if (!vma->vm_file)
284 return -EINVAL;
285
286 freader_init_from_file(&r, buf, sizeof(buf), vma->vm_file, may_fault);
287
288 /* fetch first 18 bytes of ELF header for checks */
289 ehdr = freader_fetch(&r, 0, offsetofend(Elf32_Ehdr, e_type));
290 if (!ehdr) {
291 ret = r.err;
292 goto out;
293 }
294
295 ret = -EINVAL;
296
297 /* compare magic x7f "ELF" */
298 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
299 goto out;
300
301 /* only support executable file and shared object file */
302 if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN)
303 goto out;
304
305 if (ehdr->e_ident[EI_CLASS] == ELFCLASS32)
306 ret = get_build_id_32(&r, build_id, size);
307 else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
308 ret = get_build_id_64(&r, build_id, size);
309out:
310 freader_cleanup(&r);
311 return ret;
312}
313
314/*
315 * Parse build ID of ELF file mapped to vma
316 * @vma: vma object
317 * @build_id: buffer to store build id, at least BUILD_ID_SIZE long
318 * @size: returns actual build id size in case of success
319 *
320 * Assumes no page fault can be taken, so if relevant portions of ELF file are
321 * not already paged in, fetching of build ID fails.
322 *
323 * Return: 0 on success; negative error, otherwise
324 */
325int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size)
326{
327 return __build_id_parse(vma, build_id, size, false /* !may_fault */);
328}
329
330/*
331 * Parse build ID of ELF file mapped to VMA
332 * @vma: vma object
333 * @build_id: buffer to store build id, at least BUILD_ID_SIZE long
334 * @size: returns actual build id size in case of success
335 *
336 * Assumes faultable context and can cause page faults to bring in file data
337 * into page cache.
338 *
339 * Return: 0 on success; negative error, otherwise
340 */
341int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size)
342{
343 return __build_id_parse(vma, build_id, size, true /* may_fault */);
344}
345
346/**
347 * build_id_parse_buf - Get build ID from a buffer
348 * @buf: ELF note section(s) to parse
349 * @buf_size: Size of @buf in bytes
350 * @build_id: Build ID parsed from @buf, at least BUILD_ID_SIZE_MAX long
351 *
352 * Return: 0 on success, -EINVAL otherwise
353 */
354int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size)
355{
356 struct freader r;
357 int err;
358
359 freader_init_from_mem(&r, buf, buf_size);
360
361 err = parse_build_id(&r, build_id, NULL, 0, buf_size);
362
363 freader_cleanup(&r);
364 return err;
365}
366
367#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_VMCORE_INFO)
368unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX] __ro_after_init;
369
370/**
371 * init_vmlinux_build_id - Compute and stash the running kernel's build ID
372 */
373void __init init_vmlinux_build_id(void)
374{
375 extern const void __start_notes;
376 extern const void __stop_notes;
377 unsigned int size = &__stop_notes - &__start_notes;
378
379 build_id_parse_buf(&__start_notes, vmlinux_build_id, size);
380}
381#endif