Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

kexec: implementation of new syscall kexec_file_load

Previous patch provided the interface definition and this patch prvides
implementation of new syscall.

Previously segment list was prepared in user space. Now user space just
passes kernel fd, initrd fd and command line and kernel will create a
segment list internally.

This patch contains generic part of the code. Actual segment preparation
and loading is done by arch and image specific loader. Which comes in
next patch.

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Eric Biederman <ebiederm@xmission.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Matthew Garrett <mjg59@srcf.ucam.org>
Cc: Greg Kroah-Hartman <greg@kroah.com>
Cc: Dave Young <dyoung@redhat.com>
Cc: WANG Chao <chaowang@redhat.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Vivek Goyal and committed by
Linus Torvalds
cb105258 f0895685

+587 -5
+45
arch/x86/kernel/machine_kexec_64.c
··· 22 22 #include <asm/mmu_context.h> 23 23 #include <asm/debugreg.h> 24 24 25 + static struct kexec_file_ops *kexec_file_loaders[] = { 26 + NULL, 27 + }; 28 + 25 29 static void free_transition_pgtable(struct kimage *image) 26 30 { 27 31 free_page((unsigned long)image->arch.pud); ··· 287 283 (unsigned long)&_text - __START_KERNEL); 288 284 } 289 285 286 + /* arch-dependent functionality related to kexec file-based syscall */ 287 + 288 + int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, 289 + unsigned long buf_len) 290 + { 291 + int i, ret = -ENOEXEC; 292 + struct kexec_file_ops *fops; 293 + 294 + for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) { 295 + fops = kexec_file_loaders[i]; 296 + if (!fops || !fops->probe) 297 + continue; 298 + 299 + ret = fops->probe(buf, buf_len); 300 + if (!ret) { 301 + image->fops = fops; 302 + return ret; 303 + } 304 + } 305 + 306 + return ret; 307 + } 308 + 309 + void *arch_kexec_kernel_image_load(struct kimage *image) 310 + { 311 + if (!image->fops || !image->fops->load) 312 + return ERR_PTR(-ENOEXEC); 313 + 314 + return image->fops->load(image, image->kernel_buf, 315 + image->kernel_buf_len, image->initrd_buf, 316 + image->initrd_buf_len, image->cmdline_buf, 317 + image->cmdline_buf_len); 318 + } 319 + 320 + int arch_kimage_file_post_load_cleanup(struct kimage *image) 321 + { 322 + if (!image->fops || !image->fops->cleanup) 323 + return 0; 324 + 325 + return image->fops->cleanup(image); 326 + }
+53
include/linux/kexec.h
··· 121 121 #define KEXEC_TYPE_DEFAULT 0 122 122 #define KEXEC_TYPE_CRASH 1 123 123 unsigned int preserve_context : 1; 124 + /* If set, we are using file mode kexec syscall */ 125 + unsigned int file_mode:1; 124 126 125 127 #ifdef ARCH_HAS_KIMAGE_ARCH 126 128 struct kimage_arch arch; 127 129 #endif 130 + 131 + /* Additional fields for file based kexec syscall */ 132 + void *kernel_buf; 133 + unsigned long kernel_buf_len; 134 + 135 + void *initrd_buf; 136 + unsigned long initrd_buf_len; 137 + 138 + char *cmdline_buf; 139 + unsigned long cmdline_buf_len; 140 + 141 + /* File operations provided by image loader */ 142 + struct kexec_file_ops *fops; 143 + 144 + /* Image loader handling the kernel can store a pointer here */ 145 + void *image_loader_data; 128 146 }; 129 147 148 + /* 149 + * Keeps track of buffer parameters as provided by caller for requesting 150 + * memory placement of buffer. 151 + */ 152 + struct kexec_buf { 153 + struct kimage *image; 154 + char *buffer; 155 + unsigned long bufsz; 156 + unsigned long memsz; 157 + unsigned long buf_align; 158 + unsigned long buf_min; 159 + unsigned long buf_max; 160 + bool top_down; /* allocate from top of memory hole */ 161 + }; 130 162 163 + typedef int (kexec_probe_t)(const char *kernel_buf, unsigned long kernel_size); 164 + typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf, 165 + unsigned long kernel_len, char *initrd, 166 + unsigned long initrd_len, char *cmdline, 167 + unsigned long cmdline_len); 168 + typedef int (kexec_cleanup_t)(struct kimage *image); 169 + 170 + struct kexec_file_ops { 171 + kexec_probe_t *probe; 172 + kexec_load_t *load; 173 + kexec_cleanup_t *cleanup; 174 + }; 131 175 132 176 /* kexec interface functions */ 133 177 extern void machine_kexec(struct kimage *image); ··· 182 138 struct kexec_segment __user *segments, 183 139 unsigned long flags); 184 140 extern int kernel_kexec(void); 141 + extern int kexec_add_buffer(struct kimage *image, char *buffer, 142 + unsigned long bufsz, unsigned long memsz, 143 + unsigned long buf_align, unsigned long buf_min, 144 + unsigned long buf_max, bool top_down, 145 + unsigned long *load_addr); 185 146 extern struct page *kimage_alloc_control_pages(struct kimage *image, 186 147 unsigned int order); 187 148 extern void crash_kexec(struct pt_regs *); ··· 236 187 #else 237 188 #define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT) 238 189 #endif 190 + 191 + /* List of defined/legal kexec file flags */ 192 + #define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \ 193 + KEXEC_FILE_NO_INITRAMFS) 239 194 240 195 #define VMCOREINFO_BYTES (4096) 241 196 #define VMCOREINFO_NOTE_NAME "VMCOREINFO"
+11
include/uapi/linux/kexec.h
··· 13 13 #define KEXEC_PRESERVE_CONTEXT 0x00000002 14 14 #define KEXEC_ARCH_MASK 0xffff0000 15 15 16 + /* 17 + * Kexec file load interface flags. 18 + * KEXEC_FILE_UNLOAD : Unload already loaded kexec/kdump image. 19 + * KEXEC_FILE_ON_CRASH : Load/unload operation belongs to kdump image. 20 + * KEXEC_FILE_NO_INITRAMFS : No initramfs is being loaded. Ignore the initrd 21 + * fd field. 22 + */ 23 + #define KEXEC_FILE_UNLOAD 0x00000001 24 + #define KEXEC_FILE_ON_CRASH 0x00000002 25 + #define KEXEC_FILE_NO_INITRAMFS 0x00000004 26 + 16 27 /* These values match the ELF architecture values. 17 28 * Unless there is a good reason that should continue to be the case. 18 29 */
+478 -5
kernel/kexec.c
··· 6 6 * Version 2. See the file COPYING for more details. 7 7 */ 8 8 9 + #define pr_fmt(fmt) "kexec: " fmt 10 + 9 11 #include <linux/capability.h> 10 12 #include <linux/mm.h> 11 13 #include <linux/file.h> ··· 324 322 return 0; 325 323 out_free_control_pages: 326 324 kimage_free_page_list(&image->control_pages); 325 + out_free_image: 326 + kfree(image); 327 + return ret; 328 + } 329 + 330 + static int copy_file_from_fd(int fd, void **buf, unsigned long *buf_len) 331 + { 332 + struct fd f = fdget(fd); 333 + int ret; 334 + struct kstat stat; 335 + loff_t pos; 336 + ssize_t bytes = 0; 337 + 338 + if (!f.file) 339 + return -EBADF; 340 + 341 + ret = vfs_getattr(&f.file->f_path, &stat); 342 + if (ret) 343 + goto out; 344 + 345 + if (stat.size > INT_MAX) { 346 + ret = -EFBIG; 347 + goto out; 348 + } 349 + 350 + /* Don't hand 0 to vmalloc, it whines. */ 351 + if (stat.size == 0) { 352 + ret = -EINVAL; 353 + goto out; 354 + } 355 + 356 + *buf = vmalloc(stat.size); 357 + if (!*buf) { 358 + ret = -ENOMEM; 359 + goto out; 360 + } 361 + 362 + pos = 0; 363 + while (pos < stat.size) { 364 + bytes = kernel_read(f.file, pos, (char *)(*buf) + pos, 365 + stat.size - pos); 366 + if (bytes < 0) { 367 + vfree(*buf); 368 + ret = bytes; 369 + goto out; 370 + } 371 + 372 + if (bytes == 0) 373 + break; 374 + pos += bytes; 375 + } 376 + 377 + if (pos != stat.size) { 378 + ret = -EBADF; 379 + vfree(*buf); 380 + goto out; 381 + } 382 + 383 + *buf_len = pos; 384 + out: 385 + fdput(f); 386 + return ret; 387 + } 388 + 389 + /* Architectures can provide this probe function */ 390 + int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf, 391 + unsigned long buf_len) 392 + { 393 + return -ENOEXEC; 394 + } 395 + 396 + void * __weak arch_kexec_kernel_image_load(struct kimage *image) 397 + { 398 + return ERR_PTR(-ENOEXEC); 399 + } 400 + 401 + void __weak arch_kimage_file_post_load_cleanup(struct kimage *image) 402 + { 403 + } 404 + 405 + /* 406 + * Free up memory used by kernel, initrd, and comand line. This is temporary 407 + * memory allocation which is not needed any more after these buffers have 408 + * been loaded into separate segments and have been copied elsewhere. 409 + */ 410 + static void kimage_file_post_load_cleanup(struct kimage *image) 411 + { 412 + vfree(image->kernel_buf); 413 + image->kernel_buf = NULL; 414 + 415 + vfree(image->initrd_buf); 416 + image->initrd_buf = NULL; 417 + 418 + kfree(image->cmdline_buf); 419 + image->cmdline_buf = NULL; 420 + 421 + /* See if architecture has anything to cleanup post load */ 422 + arch_kimage_file_post_load_cleanup(image); 423 + } 424 + 425 + /* 426 + * In file mode list of segments is prepared by kernel. Copy relevant 427 + * data from user space, do error checking, prepare segment list 428 + */ 429 + static int 430 + kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd, 431 + const char __user *cmdline_ptr, 432 + unsigned long cmdline_len, unsigned flags) 433 + { 434 + int ret = 0; 435 + void *ldata; 436 + 437 + ret = copy_file_from_fd(kernel_fd, &image->kernel_buf, 438 + &image->kernel_buf_len); 439 + if (ret) 440 + return ret; 441 + 442 + /* Call arch image probe handlers */ 443 + ret = arch_kexec_kernel_image_probe(image, image->kernel_buf, 444 + image->kernel_buf_len); 445 + 446 + if (ret) 447 + goto out; 448 + 449 + /* It is possible that there no initramfs is being loaded */ 450 + if (!(flags & KEXEC_FILE_NO_INITRAMFS)) { 451 + ret = copy_file_from_fd(initrd_fd, &image->initrd_buf, 452 + &image->initrd_buf_len); 453 + if (ret) 454 + goto out; 455 + } 456 + 457 + if (cmdline_len) { 458 + image->cmdline_buf = kzalloc(cmdline_len, GFP_KERNEL); 459 + if (!image->cmdline_buf) { 460 + ret = -ENOMEM; 461 + goto out; 462 + } 463 + 464 + ret = copy_from_user(image->cmdline_buf, cmdline_ptr, 465 + cmdline_len); 466 + if (ret) { 467 + ret = -EFAULT; 468 + goto out; 469 + } 470 + 471 + image->cmdline_buf_len = cmdline_len; 472 + 473 + /* command line should be a string with last byte null */ 474 + if (image->cmdline_buf[cmdline_len - 1] != '\0') { 475 + ret = -EINVAL; 476 + goto out; 477 + } 478 + } 479 + 480 + /* Call arch image load handlers */ 481 + ldata = arch_kexec_kernel_image_load(image); 482 + 483 + if (IS_ERR(ldata)) { 484 + ret = PTR_ERR(ldata); 485 + goto out; 486 + } 487 + 488 + image->image_loader_data = ldata; 489 + out: 490 + /* In case of error, free up all allocated memory in this function */ 491 + if (ret) 492 + kimage_file_post_load_cleanup(image); 493 + return ret; 494 + } 495 + 496 + static int 497 + kimage_file_alloc_init(struct kimage **rimage, int kernel_fd, 498 + int initrd_fd, const char __user *cmdline_ptr, 499 + unsigned long cmdline_len, unsigned long flags) 500 + { 501 + int ret; 502 + struct kimage *image; 503 + 504 + image = do_kimage_alloc_init(); 505 + if (!image) 506 + return -ENOMEM; 507 + 508 + image->file_mode = 1; 509 + 510 + ret = kimage_file_prepare_segments(image, kernel_fd, initrd_fd, 511 + cmdline_ptr, cmdline_len, flags); 512 + if (ret) 513 + goto out_free_image; 514 + 515 + ret = sanity_check_segment_list(image); 516 + if (ret) 517 + goto out_free_post_load_bufs; 518 + 519 + ret = -ENOMEM; 520 + image->control_code_page = kimage_alloc_control_pages(image, 521 + get_order(KEXEC_CONTROL_PAGE_SIZE)); 522 + if (!image->control_code_page) { 523 + pr_err("Could not allocate control_code_buffer\n"); 524 + goto out_free_post_load_bufs; 525 + } 526 + 527 + image->swap_page = kimage_alloc_control_pages(image, 0); 528 + if (!image->swap_page) { 529 + pr_err(KERN_ERR "Could not allocate swap buffer\n"); 530 + goto out_free_control_pages; 531 + } 532 + 533 + *rimage = image; 534 + return 0; 535 + out_free_control_pages: 536 + kimage_free_page_list(&image->control_pages); 537 + out_free_post_load_bufs: 538 + kimage_file_post_load_cleanup(image); 539 + kfree(image->image_loader_data); 327 540 out_free_image: 328 541 kfree(image); 329 542 return ret; ··· 861 644 862 645 /* Free the kexec control pages... */ 863 646 kimage_free_page_list(&image->control_pages); 647 + 648 + kfree(image->image_loader_data); 649 + 650 + /* 651 + * Free up any temporary buffers allocated. This might hit if 652 + * error occurred much later after buffer allocation. 653 + */ 654 + if (image->file_mode) 655 + kimage_file_post_load_cleanup(image); 656 + 864 657 kfree(image); 865 658 } 866 659 ··· 999 772 unsigned long maddr; 1000 773 size_t ubytes, mbytes; 1001 774 int result; 1002 - unsigned char __user *buf; 775 + unsigned char __user *buf = NULL; 776 + unsigned char *kbuf = NULL; 1003 777 1004 778 result = 0; 1005 - buf = segment->buf; 779 + if (image->file_mode) 780 + kbuf = segment->kbuf; 781 + else 782 + buf = segment->buf; 1006 783 ubytes = segment->bufsz; 1007 784 mbytes = segment->memsz; 1008 785 maddr = segment->mem; ··· 1038 807 PAGE_SIZE - (maddr & ~PAGE_MASK)); 1039 808 uchunk = min(ubytes, mchunk); 1040 809 1041 - result = copy_from_user(ptr, buf, uchunk); 810 + /* For file based kexec, source pages are in kernel memory */ 811 + if (image->file_mode) 812 + memcpy(ptr, kbuf, uchunk); 813 + else 814 + result = copy_from_user(ptr, buf, uchunk); 1042 815 kunmap(page); 1043 816 if (result) { 1044 817 result = -EFAULT; ··· 1050 815 } 1051 816 ubytes -= uchunk; 1052 817 maddr += mchunk; 1053 - buf += mchunk; 818 + if (image->file_mode) 819 + kbuf += mchunk; 820 + else 821 + buf += mchunk; 1054 822 mbytes -= mchunk; 1055 823 } 1056 824 out: ··· 1300 1062 unsigned long, cmdline_len, const char __user *, cmdline_ptr, 1301 1063 unsigned long, flags) 1302 1064 { 1303 - return -ENOSYS; 1065 + int ret = 0, i; 1066 + struct kimage **dest_image, *image; 1067 + 1068 + /* We only trust the superuser with rebooting the system. */ 1069 + if (!capable(CAP_SYS_BOOT) || kexec_load_disabled) 1070 + return -EPERM; 1071 + 1072 + /* Make sure we have a legal set of flags */ 1073 + if (flags != (flags & KEXEC_FILE_FLAGS)) 1074 + return -EINVAL; 1075 + 1076 + image = NULL; 1077 + 1078 + if (!mutex_trylock(&kexec_mutex)) 1079 + return -EBUSY; 1080 + 1081 + dest_image = &kexec_image; 1082 + if (flags & KEXEC_FILE_ON_CRASH) 1083 + dest_image = &kexec_crash_image; 1084 + 1085 + if (flags & KEXEC_FILE_UNLOAD) 1086 + goto exchange; 1087 + 1088 + /* 1089 + * In case of crash, new kernel gets loaded in reserved region. It is 1090 + * same memory where old crash kernel might be loaded. Free any 1091 + * current crash dump kernel before we corrupt it. 1092 + */ 1093 + if (flags & KEXEC_FILE_ON_CRASH) 1094 + kimage_free(xchg(&kexec_crash_image, NULL)); 1095 + 1096 + ret = kimage_file_alloc_init(&image, kernel_fd, initrd_fd, cmdline_ptr, 1097 + cmdline_len, flags); 1098 + if (ret) 1099 + goto out; 1100 + 1101 + ret = machine_kexec_prepare(image); 1102 + if (ret) 1103 + goto out; 1104 + 1105 + for (i = 0; i < image->nr_segments; i++) { 1106 + struct kexec_segment *ksegment; 1107 + 1108 + ksegment = &image->segment[i]; 1109 + pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n", 1110 + i, ksegment->buf, ksegment->bufsz, ksegment->mem, 1111 + ksegment->memsz); 1112 + 1113 + ret = kimage_load_segment(image, &image->segment[i]); 1114 + if (ret) 1115 + goto out; 1116 + } 1117 + 1118 + kimage_terminate(image); 1119 + 1120 + /* 1121 + * Free up any temporary buffers allocated which are not needed 1122 + * after image has been loaded 1123 + */ 1124 + kimage_file_post_load_cleanup(image); 1125 + exchange: 1126 + image = xchg(dest_image, image); 1127 + out: 1128 + mutex_unlock(&kexec_mutex); 1129 + kimage_free(image); 1130 + return ret; 1304 1131 } 1305 1132 1306 1133 void crash_kexec(struct pt_regs *regs) ··· 1922 1619 } 1923 1620 1924 1621 subsys_initcall(crash_save_vmcoreinfo_init); 1622 + 1623 + static int __kexec_add_segment(struct kimage *image, char *buf, 1624 + unsigned long bufsz, unsigned long mem, 1625 + unsigned long memsz) 1626 + { 1627 + struct kexec_segment *ksegment; 1628 + 1629 + ksegment = &image->segment[image->nr_segments]; 1630 + ksegment->kbuf = buf; 1631 + ksegment->bufsz = bufsz; 1632 + ksegment->mem = mem; 1633 + ksegment->memsz = memsz; 1634 + image->nr_segments++; 1635 + 1636 + return 0; 1637 + } 1638 + 1639 + static int locate_mem_hole_top_down(unsigned long start, unsigned long end, 1640 + struct kexec_buf *kbuf) 1641 + { 1642 + struct kimage *image = kbuf->image; 1643 + unsigned long temp_start, temp_end; 1644 + 1645 + temp_end = min(end, kbuf->buf_max); 1646 + temp_start = temp_end - kbuf->memsz; 1647 + 1648 + do { 1649 + /* align down start */ 1650 + temp_start = temp_start & (~(kbuf->buf_align - 1)); 1651 + 1652 + if (temp_start < start || temp_start < kbuf->buf_min) 1653 + return 0; 1654 + 1655 + temp_end = temp_start + kbuf->memsz - 1; 1656 + 1657 + /* 1658 + * Make sure this does not conflict with any of existing 1659 + * segments 1660 + */ 1661 + if (kimage_is_destination_range(image, temp_start, temp_end)) { 1662 + temp_start = temp_start - PAGE_SIZE; 1663 + continue; 1664 + } 1665 + 1666 + /* We found a suitable memory range */ 1667 + break; 1668 + } while (1); 1669 + 1670 + /* If we are here, we found a suitable memory range */ 1671 + __kexec_add_segment(image, kbuf->buffer, kbuf->bufsz, temp_start, 1672 + kbuf->memsz); 1673 + 1674 + /* Success, stop navigating through remaining System RAM ranges */ 1675 + return 1; 1676 + } 1677 + 1678 + static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end, 1679 + struct kexec_buf *kbuf) 1680 + { 1681 + struct kimage *image = kbuf->image; 1682 + unsigned long temp_start, temp_end; 1683 + 1684 + temp_start = max(start, kbuf->buf_min); 1685 + 1686 + do { 1687 + temp_start = ALIGN(temp_start, kbuf->buf_align); 1688 + temp_end = temp_start + kbuf->memsz - 1; 1689 + 1690 + if (temp_end > end || temp_end > kbuf->buf_max) 1691 + return 0; 1692 + /* 1693 + * Make sure this does not conflict with any of existing 1694 + * segments 1695 + */ 1696 + if (kimage_is_destination_range(image, temp_start, temp_end)) { 1697 + temp_start = temp_start + PAGE_SIZE; 1698 + continue; 1699 + } 1700 + 1701 + /* We found a suitable memory range */ 1702 + break; 1703 + } while (1); 1704 + 1705 + /* If we are here, we found a suitable memory range */ 1706 + __kexec_add_segment(image, kbuf->buffer, kbuf->bufsz, temp_start, 1707 + kbuf->memsz); 1708 + 1709 + /* Success, stop navigating through remaining System RAM ranges */ 1710 + return 1; 1711 + } 1712 + 1713 + static int locate_mem_hole_callback(u64 start, u64 end, void *arg) 1714 + { 1715 + struct kexec_buf *kbuf = (struct kexec_buf *)arg; 1716 + unsigned long sz = end - start + 1; 1717 + 1718 + /* Returning 0 will take to next memory range */ 1719 + if (sz < kbuf->memsz) 1720 + return 0; 1721 + 1722 + if (end < kbuf->buf_min || start > kbuf->buf_max) 1723 + return 0; 1724 + 1725 + /* 1726 + * Allocate memory top down with-in ram range. Otherwise bottom up 1727 + * allocation. 1728 + */ 1729 + if (kbuf->top_down) 1730 + return locate_mem_hole_top_down(start, end, kbuf); 1731 + return locate_mem_hole_bottom_up(start, end, kbuf); 1732 + } 1733 + 1734 + /* 1735 + * Helper function for placing a buffer in a kexec segment. This assumes 1736 + * that kexec_mutex is held. 1737 + */ 1738 + int kexec_add_buffer(struct kimage *image, char *buffer, unsigned long bufsz, 1739 + unsigned long memsz, unsigned long buf_align, 1740 + unsigned long buf_min, unsigned long buf_max, 1741 + bool top_down, unsigned long *load_addr) 1742 + { 1743 + 1744 + struct kexec_segment *ksegment; 1745 + struct kexec_buf buf, *kbuf; 1746 + int ret; 1747 + 1748 + /* Currently adding segment this way is allowed only in file mode */ 1749 + if (!image->file_mode) 1750 + return -EINVAL; 1751 + 1752 + if (image->nr_segments >= KEXEC_SEGMENT_MAX) 1753 + return -EINVAL; 1754 + 1755 + /* 1756 + * Make sure we are not trying to add buffer after allocating 1757 + * control pages. All segments need to be placed first before 1758 + * any control pages are allocated. As control page allocation 1759 + * logic goes through list of segments to make sure there are 1760 + * no destination overlaps. 1761 + */ 1762 + if (!list_empty(&image->control_pages)) { 1763 + WARN_ON(1); 1764 + return -EINVAL; 1765 + } 1766 + 1767 + memset(&buf, 0, sizeof(struct kexec_buf)); 1768 + kbuf = &buf; 1769 + kbuf->image = image; 1770 + kbuf->buffer = buffer; 1771 + kbuf->bufsz = bufsz; 1772 + 1773 + kbuf->memsz = ALIGN(memsz, PAGE_SIZE); 1774 + kbuf->buf_align = max(buf_align, PAGE_SIZE); 1775 + kbuf->buf_min = buf_min; 1776 + kbuf->buf_max = buf_max; 1777 + kbuf->top_down = top_down; 1778 + 1779 + /* Walk the RAM ranges and allocate a suitable range for the buffer */ 1780 + ret = walk_system_ram_res(0, -1, kbuf, locate_mem_hole_callback); 1781 + if (ret != 1) { 1782 + /* A suitable memory range could not be found for buffer */ 1783 + return -EADDRNOTAVAIL; 1784 + } 1785 + 1786 + /* Found a suitable memory range */ 1787 + ksegment = &image->segment[image->nr_segments - 1]; 1788 + *load_addr = ksegment->mem; 1789 + return 0; 1790 + } 1791 + 1925 1792 1926 1793 /* 1927 1794 * Move into place and start executing a preloaded standalone