at v6.19-rc8 389 lines 10 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2 3#include <linux/buildid.h> 4#include <linux/cache.h> 5#include <linux/elf.h> 6#include <linux/kernel.h> 7#include <linux/pagemap.h> 8#include <linux/fs.h> 9#include <linux/secretmem.h> 10 11#define BUILD_ID 3 12 13#define MAX_PHDR_CNT 256 14 15void freader_init_from_file(struct freader *r, void *buf, u32 buf_sz, 16 struct file *file, bool may_fault) 17{ 18 memset(r, 0, sizeof(*r)); 19 r->buf = buf; 20 r->buf_sz = buf_sz; 21 r->file = file; 22 r->may_fault = may_fault; 23} 24 25void freader_init_from_mem(struct freader *r, const char *data, u64 data_sz) 26{ 27 memset(r, 0, sizeof(*r)); 28 r->data = data; 29 r->data_sz = data_sz; 30} 31 32static void freader_put_folio(struct freader *r) 33{ 34 if (!r->folio) 35 return; 36 kunmap_local(r->addr); 37 folio_put(r->folio); 38 r->folio = NULL; 39} 40 41static int freader_get_folio(struct freader *r, loff_t file_off) 42{ 43 /* check if we can just reuse current folio */ 44 if (r->folio && file_off >= r->folio_off && 45 file_off < r->folio_off + folio_size(r->folio)) 46 return 0; 47 48 freader_put_folio(r); 49 50 /* only use page cache lookup - fail if not already cached */ 51 r->folio = filemap_get_folio(r->file->f_mapping, file_off >> PAGE_SHIFT); 52 53 if (IS_ERR(r->folio) || !folio_test_uptodate(r->folio)) { 54 if (!IS_ERR(r->folio)) 55 folio_put(r->folio); 56 r->folio = NULL; 57 return -EFAULT; 58 } 59 60 r->folio_off = folio_pos(r->folio); 61 r->addr = kmap_local_folio(r->folio, 0); 62 63 return 0; 64} 65 66const void *freader_fetch(struct freader *r, loff_t file_off, size_t sz) 67{ 68 size_t folio_sz; 69 70 /* provided internal temporary buffer should be sized correctly */ 71 if (WARN_ON(r->buf && sz > r->buf_sz)) { 72 r->err = -E2BIG; 73 return NULL; 74 } 75 76 if (unlikely(file_off + sz < file_off)) { 77 r->err = -EOVERFLOW; 78 return NULL; 79 } 80 81 /* working with memory buffer is much more straightforward */ 82 if (!r->buf) { 83 if (file_off + sz > r->data_sz) { 84 r->err = -ERANGE; 85 return NULL; 86 } 87 return r->data + file_off; 88 } 89 90 /* reject secretmem folios created with memfd_secret() */ 91 if (secretmem_mapping(r->file->f_mapping)) { 92 r->err = -EFAULT; 93 return NULL; 94 } 95 96 /* use __kernel_read() for sleepable context */ 97 if (r->may_fault) { 98 ssize_t ret; 99 100 ret = __kernel_read(r->file, r->buf, sz, &file_off); 101 if (ret != sz) { 102 r->err = (ret < 0) ? ret : -EIO; 103 return NULL; 104 } 105 return r->buf; 106 } 107 108 /* fetch or reuse folio for given file offset */ 109 r->err = freader_get_folio(r, file_off); 110 if (r->err) 111 return NULL; 112 113 /* if requested data is crossing folio boundaries, we have to copy 114 * everything into our local buffer to keep a simple linear memory 115 * access interface 116 */ 117 folio_sz = folio_size(r->folio); 118 if (file_off + sz > r->folio_off + folio_sz) { 119 u64 part_sz = r->folio_off + folio_sz - file_off, off; 120 121 memcpy(r->buf, r->addr + file_off - r->folio_off, part_sz); 122 off = part_sz; 123 124 while (off < sz) { 125 /* fetch next folio */ 126 r->err = freader_get_folio(r, r->folio_off + folio_sz); 127 if (r->err) 128 return NULL; 129 folio_sz = folio_size(r->folio); 130 part_sz = min_t(u64, sz - off, folio_sz); 131 memcpy(r->buf + off, r->addr, part_sz); 132 off += part_sz; 133 } 134 135 return r->buf; 136 } 137 138 /* if data fits in a single folio, just return direct pointer */ 139 return r->addr + (file_off - r->folio_off); 140} 141 142void freader_cleanup(struct freader *r) 143{ 144 if (!r->buf) 145 return; /* non-file-backed mode */ 146 147 freader_put_folio(r); 148} 149 150/* 151 * Parse build id from the note segment. This logic can be shared between 152 * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are 153 * identical. 154 */ 155static int parse_build_id(struct freader *r, unsigned char *build_id, __u32 *size, 156 loff_t note_off, Elf32_Word note_size) 157{ 158 const char note_name[] = "GNU"; 159 const size_t note_name_sz = sizeof(note_name); 160 u32 build_id_off, new_off, note_end, name_sz, desc_sz; 161 const Elf32_Nhdr *nhdr; 162 const char *data; 163 164 if (check_add_overflow(note_off, note_size, &note_end)) 165 return -EINVAL; 166 167 while (note_end - note_off > sizeof(Elf32_Nhdr) + note_name_sz) { 168 nhdr = freader_fetch(r, note_off, sizeof(Elf32_Nhdr) + note_name_sz); 169 if (!nhdr) 170 return r->err; 171 172 name_sz = READ_ONCE(nhdr->n_namesz); 173 desc_sz = READ_ONCE(nhdr->n_descsz); 174 175 new_off = note_off + sizeof(Elf32_Nhdr); 176 if (check_add_overflow(new_off, ALIGN(name_sz, 4), &new_off) || 177 check_add_overflow(new_off, ALIGN(desc_sz, 4), &new_off) || 178 new_off > note_end) 179 break; 180 181 if (nhdr->n_type == BUILD_ID && 182 name_sz == note_name_sz && 183 memcmp(nhdr + 1, note_name, note_name_sz) == 0 && 184 desc_sz > 0 && desc_sz <= BUILD_ID_SIZE_MAX) { 185 build_id_off = note_off + sizeof(Elf32_Nhdr) + ALIGN(note_name_sz, 4); 186 187 /* freader_fetch() will invalidate nhdr pointer */ 188 data = freader_fetch(r, build_id_off, desc_sz); 189 if (!data) 190 return r->err; 191 192 memcpy(build_id, data, desc_sz); 193 memset(build_id + desc_sz, 0, BUILD_ID_SIZE_MAX - desc_sz); 194 if (size) 195 *size = desc_sz; 196 return 0; 197 } 198 199 note_off = new_off; 200 } 201 202 return -EINVAL; 203} 204 205/* Parse build ID from 32-bit ELF */ 206static int get_build_id_32(struct freader *r, unsigned char *build_id, __u32 *size) 207{ 208 const Elf32_Ehdr *ehdr; 209 const Elf32_Phdr *phdr; 210 __u32 phnum, phoff, i; 211 212 ehdr = freader_fetch(r, 0, sizeof(Elf32_Ehdr)); 213 if (!ehdr) 214 return r->err; 215 216 /* subsequent freader_fetch() calls invalidate pointers, so remember locally */ 217 phnum = READ_ONCE(ehdr->e_phnum); 218 phoff = READ_ONCE(ehdr->e_phoff); 219 220 /* set upper bound on amount of segments (phdrs) we iterate */ 221 if (phnum > MAX_PHDR_CNT) 222 phnum = MAX_PHDR_CNT; 223 224 /* check that phoff is not large enough to cause an overflow */ 225 if (phoff + phnum * sizeof(Elf32_Phdr) < phoff) 226 return -EINVAL; 227 228 for (i = 0; i < phnum; ++i) { 229 phdr = freader_fetch(r, phoff + i * sizeof(Elf32_Phdr), sizeof(Elf32_Phdr)); 230 if (!phdr) 231 return r->err; 232 233 if (phdr->p_type == PT_NOTE && 234 !parse_build_id(r, build_id, size, READ_ONCE(phdr->p_offset), 235 READ_ONCE(phdr->p_filesz))) 236 return 0; 237 } 238 return -EINVAL; 239} 240 241/* Parse build ID from 64-bit ELF */ 242static int get_build_id_64(struct freader *r, unsigned char *build_id, __u32 *size) 243{ 244 const Elf64_Ehdr *ehdr; 245 const Elf64_Phdr *phdr; 246 __u32 phnum, i; 247 __u64 phoff; 248 249 ehdr = freader_fetch(r, 0, sizeof(Elf64_Ehdr)); 250 if (!ehdr) 251 return r->err; 252 253 /* subsequent freader_fetch() calls invalidate pointers, so remember locally */ 254 phnum = READ_ONCE(ehdr->e_phnum); 255 phoff = READ_ONCE(ehdr->e_phoff); 256 257 /* set upper bound on amount of segments (phdrs) we iterate */ 258 if (phnum > MAX_PHDR_CNT) 259 phnum = MAX_PHDR_CNT; 260 261 /* check that phoff is not large enough to cause an overflow */ 262 if (phoff + phnum * sizeof(Elf64_Phdr) < phoff) 263 return -EINVAL; 264 265 for (i = 0; i < phnum; ++i) { 266 phdr = freader_fetch(r, phoff + i * sizeof(Elf64_Phdr), sizeof(Elf64_Phdr)); 267 if (!phdr) 268 return r->err; 269 270 if (phdr->p_type == PT_NOTE && 271 !parse_build_id(r, build_id, size, READ_ONCE(phdr->p_offset), 272 READ_ONCE(phdr->p_filesz))) 273 return 0; 274 } 275 276 return -EINVAL; 277} 278 279/* enough for Elf64_Ehdr, Elf64_Phdr, and all the smaller requests */ 280#define MAX_FREADER_BUF_SZ 64 281 282static int __build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, 283 __u32 *size, bool may_fault) 284{ 285 const Elf32_Ehdr *ehdr; 286 struct freader r; 287 char buf[MAX_FREADER_BUF_SZ]; 288 int ret; 289 290 /* only works for page backed storage */ 291 if (!vma->vm_file) 292 return -EINVAL; 293 294 freader_init_from_file(&r, buf, sizeof(buf), vma->vm_file, may_fault); 295 296 /* fetch first 18 bytes of ELF header for checks */ 297 ehdr = freader_fetch(&r, 0, offsetofend(Elf32_Ehdr, e_type)); 298 if (!ehdr) { 299 ret = r.err; 300 goto out; 301 } 302 303 ret = -EINVAL; 304 305 /* compare magic x7f "ELF" */ 306 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) 307 goto out; 308 309 /* only support executable file and shared object file */ 310 if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) 311 goto out; 312 313 if (ehdr->e_ident[EI_CLASS] == ELFCLASS32) 314 ret = get_build_id_32(&r, build_id, size); 315 else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) 316 ret = get_build_id_64(&r, build_id, size); 317out: 318 freader_cleanup(&r); 319 return ret; 320} 321 322/* 323 * Parse build ID of ELF file mapped to vma 324 * @vma: vma object 325 * @build_id: buffer to store build id, at least BUILD_ID_SIZE long 326 * @size: returns actual build id size in case of success 327 * 328 * Assumes no page fault can be taken, so if relevant portions of ELF file are 329 * not already paged in, fetching of build ID fails. 330 * 331 * Return: 0 on success; negative error, otherwise 332 */ 333int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size) 334{ 335 return __build_id_parse(vma, build_id, size, false /* !may_fault */); 336} 337 338/* 339 * Parse build ID of ELF file mapped to VMA 340 * @vma: vma object 341 * @build_id: buffer to store build id, at least BUILD_ID_SIZE long 342 * @size: returns actual build id size in case of success 343 * 344 * Assumes faultable context and can cause page faults to bring in file data 345 * into page cache. 346 * 347 * Return: 0 on success; negative error, otherwise 348 */ 349int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size) 350{ 351 return __build_id_parse(vma, build_id, size, true /* may_fault */); 352} 353 354/** 355 * build_id_parse_buf - Get build ID from a buffer 356 * @buf: ELF note section(s) to parse 357 * @buf_size: Size of @buf in bytes 358 * @build_id: Build ID parsed from @buf, at least BUILD_ID_SIZE_MAX long 359 * 360 * Return: 0 on success, -EINVAL otherwise 361 */ 362int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size) 363{ 364 struct freader r; 365 int err; 366 367 freader_init_from_mem(&r, buf, buf_size); 368 369 err = parse_build_id(&r, build_id, NULL, 0, buf_size); 370 371 freader_cleanup(&r); 372 return err; 373} 374 375#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_VMCORE_INFO) 376unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX] __ro_after_init; 377 378/** 379 * init_vmlinux_build_id - Compute and stash the running kernel's build ID 380 */ 381void __init init_vmlinux_build_id(void) 382{ 383 extern const void __start_notes; 384 extern const void __stop_notes; 385 unsigned int size = &__stop_notes - &__start_notes; 386 387 build_id_parse_buf(&__start_notes, vmlinux_build_id, size); 388} 389#endif