Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'parisc-5.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc updates from Helge Deller:

- Make the powerpc implementation to read elf files available as a
public kexec interface so it can be re-used on other architectures
(Sven)

- Implement kexec on parisc (Sven)

- Add kprobes on ftrace on parisc (Sven)

- Fix kernel crash with HSC-PCI cards based on card-mode Dino

- Add assembly implementations for memset, strlen, strcpy, strncpy and
strcat

- Some cleanups, documentation updates, warning fixes, ...

* 'parisc-5.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux: (25 commits)
parisc: Have git ignore generated real2.S and firmware.c
parisc: Disable HP HSC-PCI Cards to prevent kernel crash
parisc: add support for kexec_file_load() syscall
parisc: wire up kexec_file_load syscall
parisc: add kexec syscall support
parisc: add __pdc_cpu_rendezvous()
kprobes/parisc: remove arch_kprobe_on_func_entry()
kexec_elf: support 32 bit ELF files
kexec_elf: remove unused variable in kexec_elf_load()
kexec_elf: remove Elf_Rel macro
kexec_elf: remove PURGATORY_STACK_SIZE
kexec_elf: remove parsing of section headers
kexec_elf: change order of elf_*_to_cpu() functions
kexec: add KEXEC_ELF
parisc: Save some bytes in dino driver
parisc: Drop comments which are already in pci.h
parisc: Convert eisa_enumerator to use pr_cont()
parisc: Avoid warning when loading hppb driver
parisc: speed up flush_tlb_all_local with qemu
parisc: Add ALTERNATIVE_CODE() and ALT_COND_RUN_ON_QEMU
...

+1288 -675
+1 -1
Documentation/features/core/jump-labels/arch-support.txt
··· 21 21 | nds32: | TODO | 22 22 | nios2: | TODO | 23 23 | openrisc: | TODO | 24 - | parisc: | TODO | 24 + | parisc: | ok | 25 25 | powerpc: | ok | 26 26 | riscv: | TODO | 27 27 | s390: | ok |
+1 -1
Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
··· 21 21 | nds32: | TODO | 22 22 | nios2: | TODO | 23 23 | openrisc: | TODO | 24 - | parisc: | TODO | 24 + | parisc: | ok | 25 25 | powerpc: | ok | 26 26 | riscv: | TODO | 27 27 | s390: | TODO |
+3
arch/Kconfig
··· 18 18 select CRASH_CORE 19 19 bool 20 20 21 + config KEXEC_ELF 22 + bool 23 + 21 24 config HAVE_IMA_KEXEC 22 25 bool 23 26
+25
arch/parisc/Kconfig
··· 61 61 select HAVE_KRETPROBES 62 62 select HAVE_DYNAMIC_FTRACE if $(cc-option,-fpatchable-function-entry=1,1) 63 63 select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE 64 + select HAVE_KPROBES_ON_FTRACE 65 + select HAVE_DYNAMIC_FTRACE_WITH_REGS 64 66 65 67 help 66 68 The PA-RISC microprocessor is designed by Hewlett-Packard and used ··· 345 343 range 2 32 346 344 depends on SMP 347 345 default "4" 346 + 347 + config KEXEC 348 + bool "Kexec system call" 349 + select KEXEC_CORE 350 + help 351 + kexec is a system call that implements the ability to shutdown your 352 + current kernel, and to start another kernel. It is like a reboot 353 + but it is independent of the system firmware. And like a reboot 354 + you can start any kernel with it, not just Linux. 355 + 356 + It is an ongoing process to be certain the hardware in a machine 357 + shutdown, so do not be surprised if this code does not 358 + initially work for you. 359 + 360 + config KEXEC_FILE 361 + bool "kexec file based system call" 362 + select KEXEC_CORE 363 + select KEXEC_ELF 364 + help 365 + This enables the kexec_file_load() System call. This is 366 + file based and takes file descriptors as system call argument 367 + for kernel and initramfs as opposed to list of segments as 368 + accepted by previous system call. 348 369 349 370 endmenu 350 371
+2
arch/parisc/boot/compressed/.gitignore
··· 1 + firmware.c 2 + real2.S 1 3 sizes.h 2 4 vmlinux 3 5 vmlinux.lds
+10 -1
arch/parisc/include/asm/alternative.h
··· 8 8 #define ALT_COND_NO_ICACHE 0x04 /* if system has no i-cache */ 9 9 #define ALT_COND_NO_SPLIT_TLB 0x08 /* if split_tlb == 0 */ 10 10 #define ALT_COND_NO_IOC_FDC 0x10 /* if I/O cache does not need flushes */ 11 + #define ALT_COND_RUN_ON_QEMU 0x20 /* if running on QEMU */ 11 12 12 13 #define INSN_PxTLB 0x02 /* modify pdtlb, pitlb */ 13 14 #define INSN_NOP 0x08000240 /* nop */ ··· 22 21 23 22 struct alt_instr { 24 23 s32 orig_offset; /* offset to original instructions */ 25 - u32 len; /* end of original instructions */ 24 + s32 len; /* end of original instructions */ 26 25 u32 cond; /* see ALT_COND_XXX */ 27 26 u32 replacement; /* replacement instruction or code */ 28 27 }; ··· 41 40 42 41 #else 43 42 43 + /* to replace one single instructions by a new instruction */ 44 44 #define ALTERNATIVE(from, to, cond, replacement)\ 45 45 .section .altinstructions, "aw" ! \ 46 46 .word (from - .), (to - from)/4 ! \ 47 47 .word cond, replacement ! \ 48 + .previous 49 + 50 + /* to replace multiple instructions by new code */ 51 + #define ALTERNATIVE_CODE(from, num_instructions, cond, new_instr_ptr)\ 52 + .section .altinstructions, "aw" ! \ 53 + .word (from - .), -num_instructions ! \ 54 + .word cond, (new_instr_ptr - .) ! \ 48 55 .previous 49 56 50 57 #endif /* __ASSEMBLY__ */
+1
arch/parisc/include/asm/fixmap.h
··· 30 30 enum fixed_addresses { 31 31 /* Support writing RO kernel text via kprobes, jump labels, etc. */ 32 32 FIX_TEXT_POKE0, 33 + FIX_TEXT_KEXEC, 33 34 FIX_BITMAP_COUNT 34 35 }; 35 36
+1
arch/parisc/include/asm/ftrace.h
··· 8 8 #define MCOUNT_ADDR ((unsigned long)mcount) 9 9 #define MCOUNT_INSN_SIZE 4 10 10 #define CC_USING_NOP_MCOUNT 11 + #define ARCH_SUPPORTS_FTRACE_OPS 1 11 12 extern unsigned long sys_call_table[]; 12 13 13 14 extern unsigned long return_address(unsigned int);
+37
arch/parisc/include/asm/kexec.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _ASM_PARISC_KEXEC_H 3 + #define _ASM_PARISC_KEXEC_H 4 + 5 + #ifdef CONFIG_KEXEC 6 + 7 + /* Maximum physical address we can use pages from */ 8 + #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) 9 + /* Maximum address we can reach in physical address mode */ 10 + #define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) 11 + /* Maximum address we can use for the control code buffer */ 12 + #define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) 13 + 14 + #define KEXEC_CONTROL_PAGE_SIZE 4096 15 + 16 + #define KEXEC_ARCH KEXEC_ARCH_PARISC 17 + #define ARCH_HAS_KIMAGE_ARCH 18 + 19 + #ifndef __ASSEMBLY__ 20 + 21 + struct kimage_arch { 22 + unsigned long initrd_start; 23 + unsigned long initrd_end; 24 + unsigned long cmdline; 25 + }; 26 + 27 + static inline void crash_setup_regs(struct pt_regs *newregs, 28 + struct pt_regs *oldregs) 29 + { 30 + /* Dummy implementation for now */ 31 + } 32 + 33 + #endif /* __ASSEMBLY__ */ 34 + 35 + #endif /* CONFIG_KEXEC */ 36 + 37 + #endif /* _ASM_PARISC_KEXEC_H */
+1
arch/parisc/include/asm/pdc.h
··· 91 91 unsigned long inptr, unsigned long outputr, 92 92 unsigned long glob_cfg); 93 93 94 + int __pdc_cpu_rendezvous(void); 94 95 static inline char * os_id_to_string(u16 os_id) { 95 96 switch(os_id) { 96 97 case OS_ID_NONE: return "No OS";
+15
arch/parisc/include/asm/string.h
··· 8 8 #define __HAVE_ARCH_MEMCPY 9 9 void * memcpy(void * dest,const void *src,size_t count); 10 10 11 + #define __HAVE_ARCH_STRLEN 12 + extern size_t strlen(const char *s); 13 + 14 + #define __HAVE_ARCH_STRCPY 15 + extern char *strcpy(char *dest, const char *src); 16 + 17 + #define __HAVE_ARCH_STRNCPY 18 + extern char *strncpy(char *dest, const char *src, size_t count); 19 + 20 + #define __HAVE_ARCH_STRCAT 21 + extern char *strcat(char *dest, const char *src); 22 + 23 + #define __HAVE_ARCH_MEMSET 24 + extern void *memset(void *, int, size_t); 25 + 11 26 #endif
+2
arch/parisc/kernel/Makefile
··· 37 37 obj-$(CONFIG_JUMP_LABEL) += jump_label.o 38 38 obj-$(CONFIG_KGDB) += kgdb.o 39 39 obj-$(CONFIG_KPROBES) += kprobes.o 40 + obj-$(CONFIG_KEXEC) += kexec.o relocate_kernel.o 41 + obj-$(CONFIG_KEXEC_FILE) += kexec_file.o
+16 -5
arch/parisc/kernel/alternative.c
··· 28 28 29 29 for (entry = start; entry < end; entry++, index++) { 30 30 31 - u32 *from, len, cond, replacement; 31 + u32 *from, cond, replacement; 32 + s32 len; 32 33 33 34 from = (u32 *)((ulong)&entry->orig_offset + entry->orig_offset); 34 35 len = entry->len; ··· 49 48 if ((cond & ALT_COND_NO_DCACHE) && (cache_info.dc_size != 0)) 50 49 continue; 51 50 if ((cond & ALT_COND_NO_ICACHE) && (cache_info.ic_size != 0)) 51 + continue; 52 + if ((cond & ALT_COND_RUN_ON_QEMU) && !running_on_qemu) 52 53 continue; 53 54 54 55 /* ··· 77 74 if (replacement == INSN_NOP && len > 1) 78 75 replacement = 0xe8000002 + (len-2)*8; /* "b,n .+8" */ 79 76 80 - pr_debug("Do %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n", 81 - index, cond, len, from, replacement); 77 + pr_debug("ALTERNATIVE %3d: Cond %2x, Replace %2d instructions to 0x%08x @ 0x%px (%pS)\n", 78 + index, cond, len, replacement, from, from); 82 79 83 - /* Replace instruction */ 84 - *from = replacement; 80 + if (len < 0) { 81 + /* Replace multiple instruction by new code */ 82 + u32 *source; 83 + len = -len; 84 + source = (u32 *)((ulong)&entry->replacement + entry->replacement); 85 + memcpy(from, source, 4 * len); 86 + } else { 87 + /* Replace by one instruction */ 88 + *from = replacement; 89 + } 85 90 applied++; 86 91 } 87 92
+99
arch/parisc/kernel/entry.S
··· 1996 1996 * calling mcount(), and 2 instructions for ftrace_stub(). That way we 1997 1997 * have all on one L1 cacheline. 1998 1998 */ 1999 + ldi 0, %arg3 1999 2000 b ftrace_function_trampoline 2000 2001 copy %r3, %arg2 /* caller original %sp */ 2001 2002 ftrace_stub: ··· 2049 2048 LDREG 0(%r3), %r25 2050 2049 copy %rp, %r26 2051 2050 ldo -8(%r25), %r25 2051 + ldi 0, %r23 /* no pt_regs */ 2052 2052 b,l ftrace_function_trampoline, %rp 2053 2053 copy %r3, %r24 2054 2054 ··· 2077 2075 2078 2076 ENDPROC_CFI(ftrace_caller) 2079 2077 2078 + #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS 2079 + ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN, 2080 + CALLS,SAVE_RP,SAVE_SP) 2081 + ftrace_regs_caller: 2082 + .global ftrace_regs_caller 2083 + 2084 + ldo -FTRACE_FRAME_SIZE(%sp), %r1 2085 + STREG %rp, -RP_OFFSET(%r1) 2086 + 2087 + copy %sp, %r1 2088 + ldo PT_SZ_ALGN(%sp), %sp 2089 + 2090 + STREG %rp, PT_GR2(%r1) 2091 + STREG %r3, PT_GR3(%r1) 2092 + STREG %r4, PT_GR4(%r1) 2093 + STREG %r5, PT_GR5(%r1) 2094 + STREG %r6, PT_GR6(%r1) 2095 + STREG %r7, PT_GR7(%r1) 2096 + STREG %r8, PT_GR8(%r1) 2097 + STREG %r9, PT_GR9(%r1) 2098 + STREG %r10, PT_GR10(%r1) 2099 + STREG %r11, PT_GR11(%r1) 2100 + STREG %r12, PT_GR12(%r1) 2101 + STREG %r13, PT_GR13(%r1) 2102 + STREG %r14, PT_GR14(%r1) 2103 + STREG %r15, PT_GR15(%r1) 2104 + STREG %r16, PT_GR16(%r1) 2105 + STREG %r17, PT_GR17(%r1) 2106 + STREG %r18, PT_GR18(%r1) 2107 + STREG %r19, PT_GR19(%r1) 2108 + STREG %r20, PT_GR20(%r1) 2109 + STREG %r21, PT_GR21(%r1) 2110 + STREG %r22, PT_GR22(%r1) 2111 + STREG %r23, PT_GR23(%r1) 2112 + STREG %r24, PT_GR24(%r1) 2113 + STREG %r25, PT_GR25(%r1) 2114 + STREG %r26, PT_GR26(%r1) 2115 + STREG %r27, PT_GR27(%r1) 2116 + STREG %r28, PT_GR28(%r1) 2117 + STREG %r29, PT_GR29(%r1) 2118 + STREG %r30, PT_GR30(%r1) 2119 + STREG %r31, PT_GR31(%r1) 2120 + mfctl %cr11, %r26 2121 + STREG %r26, PT_SAR(%r1) 2122 + 2123 + copy %rp, %r26 2124 + LDREG -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25 2125 + ldo -8(%r25), %r25 2126 + copy %r3, %arg2 2127 + b,l ftrace_function_trampoline, %rp 2128 + copy %r1, %arg3 /* struct pt_regs */ 2129 + 2130 + ldo -PT_SZ_ALGN(%sp), %r1 2131 + 2132 + LDREG PT_SAR(%r1), %rp 2133 + mtctl %rp, %cr11 2134 + 2135 + LDREG PT_GR2(%r1), %rp 2136 + LDREG PT_GR3(%r1), %r3 2137 + LDREG PT_GR4(%r1), %r4 2138 + LDREG PT_GR5(%r1), %r5 2139 + LDREG PT_GR6(%r1), %r6 2140 + LDREG PT_GR7(%r1), %r7 2141 + LDREG PT_GR8(%r1), %r8 2142 + LDREG PT_GR9(%r1), %r9 2143 + LDREG PT_GR10(%r1),%r10 2144 + LDREG PT_GR11(%r1),%r11 2145 + LDREG PT_GR12(%r1),%r12 2146 + LDREG PT_GR13(%r1),%r13 2147 + LDREG PT_GR14(%r1),%r14 2148 + LDREG PT_GR15(%r1),%r15 2149 + LDREG PT_GR16(%r1),%r16 2150 + LDREG PT_GR17(%r1),%r17 2151 + LDREG PT_GR18(%r1),%r18 2152 + LDREG PT_GR19(%r1),%r19 2153 + LDREG PT_GR20(%r1),%r20 2154 + LDREG PT_GR21(%r1),%r21 2155 + LDREG PT_GR22(%r1),%r22 2156 + LDREG PT_GR23(%r1),%r23 2157 + LDREG PT_GR24(%r1),%r24 2158 + LDREG PT_GR25(%r1),%r25 2159 + LDREG PT_GR26(%r1),%r26 2160 + LDREG PT_GR27(%r1),%r27 2161 + LDREG PT_GR28(%r1),%r28 2162 + LDREG PT_GR29(%r1),%r29 2163 + LDREG PT_GR30(%r1),%r30 2164 + LDREG PT_GR31(%r1),%r31 2165 + 2166 + ldo -PT_SZ_ALGN(%sp), %sp 2167 + LDREGM -FTRACE_FRAME_SIZE(%sp), %r1 2168 + /* Adjust return point to jump back to beginning of traced function */ 2169 + ldo -4(%r1), %r1 2170 + bv,n (%r1) 2171 + 2172 + ENDPROC_CFI(ftrace_regs_caller) 2173 + 2174 + #endif 2080 2175 #endif 2081 2176 2082 2177 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+13
arch/parisc/kernel/firmware.c
··· 312 312 } 313 313 314 314 /** 315 + * pdc_cpu_rendenzvous - Stop currently executing CPU 316 + * @retval: -1 on error, 0 on success 317 + */ 318 + int __pdc_cpu_rendezvous(void) 319 + { 320 + if (is_pdc_pat()) 321 + return mem_pdc_call(PDC_PAT_CPU, PDC_PAT_CPU_RENDEZVOUS); 322 + else 323 + return mem_pdc_call(PDC_PROC, 1, 0); 324 + } 325 + 326 + 327 + /** 315 328 * pdc_chassis_warn - Fetches chassis warnings 316 329 * @retval: -1 on error, 0 on success 317 330 */
+60 -4
arch/parisc/kernel/ftrace.c
··· 13 13 #include <linux/init.h> 14 14 #include <linux/ftrace.h> 15 15 #include <linux/uaccess.h> 16 + #include <linux/kprobes.h> 17 + #include <linux/ptrace.h> 16 18 17 19 #include <asm/assembly.h> 18 20 #include <asm/sections.h> ··· 50 48 51 49 void notrace __hot ftrace_function_trampoline(unsigned long parent, 52 50 unsigned long self_addr, 53 - unsigned long org_sp_gr3) 51 + unsigned long org_sp_gr3, 52 + struct pt_regs *regs) 54 53 { 55 54 #ifndef CONFIG_DYNAMIC_FTRACE 56 55 extern ftrace_func_t ftrace_trace_function; 57 56 #endif 58 - if (ftrace_trace_function != ftrace_stub) 59 - ftrace_trace_function(self_addr, parent, NULL, NULL); 57 + extern struct ftrace_ops *function_trace_op; 58 + 59 + if (function_trace_op->flags & FTRACE_OPS_FL_ENABLED && 60 + ftrace_trace_function != ftrace_stub) 61 + ftrace_trace_function(self_addr, parent, 62 + function_trace_op, regs); 60 63 61 64 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 62 65 if (ftrace_graph_return != (trace_func_graph_ret_t) ftrace_stub || 63 - ftrace_graph_entry != ftrace_graph_entry_stub) { 66 + ftrace_graph_entry != ftrace_graph_entry_stub) { 64 67 unsigned long *parent_rp; 65 68 66 69 /* calculate pointer to %rp in stack */ ··· 99 92 return 0; 100 93 } 101 94 int ftrace_update_ftrace_func(ftrace_func_t func) 95 + { 96 + return 0; 97 + } 98 + 99 + int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 100 + unsigned long addr) 102 101 { 103 102 return 0; 104 103 } ··· 197 184 __patch_text((void *)rec->ip, INSN_NOP); 198 185 __patch_text_multiple((void *)rec->ip + 4 - sizeof(insn), 199 186 insn, sizeof(insn)-4); 187 + return 0; 188 + } 189 + #endif 190 + 191 + #ifdef CONFIG_KPROBES_ON_FTRACE 192 + void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, 193 + struct ftrace_ops *ops, struct pt_regs *regs) 194 + { 195 + struct kprobe_ctlblk *kcb; 196 + struct kprobe *p = get_kprobe((kprobe_opcode_t *)ip); 197 + 198 + if (unlikely(!p) || kprobe_disabled(p)) 199 + return; 200 + 201 + if (kprobe_running()) { 202 + kprobes_inc_nmissed_count(p); 203 + return; 204 + } 205 + 206 + __this_cpu_write(current_kprobe, p); 207 + 208 + kcb = get_kprobe_ctlblk(); 209 + kcb->kprobe_status = KPROBE_HIT_ACTIVE; 210 + 211 + regs->iaoq[0] = ip; 212 + regs->iaoq[1] = ip + 4; 213 + 214 + if (!p->pre_handler || !p->pre_handler(p, regs)) { 215 + regs->iaoq[0] = ip + 4; 216 + regs->iaoq[1] = ip + 8; 217 + 218 + if (unlikely(p->post_handler)) { 219 + kcb->kprobe_status = KPROBE_HIT_SSDONE; 220 + p->post_handler(p, regs, 0); 221 + } 222 + } 223 + __this_cpu_write(current_kprobe, NULL); 224 + } 225 + NOKPROBE_SYMBOL(kprobe_ftrace_handler); 226 + 227 + int arch_prepare_kprobe_ftrace(struct kprobe *p) 228 + { 229 + p->ainsn.insn = NULL; 200 230 return 0; 201 231 } 202 232 #endif
+112
arch/parisc/kernel/kexec.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <linux/kernel.h> 4 + #include <linux/console.h> 5 + #include <linux/kexec.h> 6 + #include <linux/delay.h> 7 + #include <asm/cacheflush.h> 8 + #include <asm/sections.h> 9 + 10 + extern void relocate_new_kernel(unsigned long head, 11 + unsigned long start, 12 + unsigned long phys); 13 + 14 + extern const unsigned int relocate_new_kernel_size; 15 + extern unsigned int kexec_initrd_start_offset; 16 + extern unsigned int kexec_initrd_end_offset; 17 + extern unsigned int kexec_cmdline_offset; 18 + extern unsigned int kexec_free_mem_offset; 19 + 20 + static void kexec_show_segment_info(const struct kimage *kimage, 21 + unsigned long n) 22 + { 23 + pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n", 24 + n, 25 + kimage->segment[n].mem, 26 + kimage->segment[n].mem + kimage->segment[n].memsz, 27 + (unsigned long)kimage->segment[n].memsz, 28 + (unsigned long)kimage->segment[n].memsz / PAGE_SIZE); 29 + } 30 + 31 + static void kexec_image_info(const struct kimage *kimage) 32 + { 33 + unsigned long i; 34 + 35 + pr_debug("kexec kimage info:\n"); 36 + pr_debug(" type: %d\n", kimage->type); 37 + pr_debug(" start: %lx\n", kimage->start); 38 + pr_debug(" head: %lx\n", kimage->head); 39 + pr_debug(" nr_segments: %lu\n", kimage->nr_segments); 40 + 41 + for (i = 0; i < kimage->nr_segments; i++) 42 + kexec_show_segment_info(kimage, i); 43 + 44 + #ifdef CONFIG_KEXEC_FILE 45 + if (kimage->file_mode) { 46 + pr_debug("cmdline: %.*s\n", (int)kimage->cmdline_buf_len, 47 + kimage->cmdline_buf); 48 + } 49 + #endif 50 + } 51 + 52 + void machine_kexec_cleanup(struct kimage *kimage) 53 + { 54 + } 55 + 56 + void machine_crash_shutdown(struct pt_regs *regs) 57 + { 58 + } 59 + 60 + void machine_shutdown(void) 61 + { 62 + smp_send_stop(); 63 + while (num_online_cpus() > 1) { 64 + cpu_relax(); 65 + mdelay(1); 66 + } 67 + } 68 + 69 + void machine_kexec(struct kimage *image) 70 + { 71 + #ifdef CONFIG_64BIT 72 + Elf64_Fdesc desc; 73 + #endif 74 + void (*reloc)(unsigned long head, 75 + unsigned long start, 76 + unsigned long phys); 77 + 78 + unsigned long phys = page_to_phys(image->control_code_page); 79 + void *virt = (void *)__fix_to_virt(FIX_TEXT_KEXEC); 80 + struct kimage_arch *arch = &image->arch; 81 + 82 + set_fixmap(FIX_TEXT_KEXEC, phys); 83 + 84 + flush_cache_all(); 85 + 86 + #ifdef CONFIG_64BIT 87 + reloc = (void *)&desc; 88 + desc.addr = (long long)virt; 89 + #else 90 + reloc = (void *)virt; 91 + #endif 92 + 93 + memcpy(virt, dereference_function_descriptor(relocate_new_kernel), 94 + relocate_new_kernel_size); 95 + 96 + *(unsigned long *)(virt + kexec_cmdline_offset) = arch->cmdline; 97 + *(unsigned long *)(virt + kexec_initrd_start_offset) = arch->initrd_start; 98 + *(unsigned long *)(virt + kexec_initrd_end_offset) = arch->initrd_end; 99 + *(unsigned long *)(virt + kexec_free_mem_offset) = PAGE0->mem_free; 100 + 101 + flush_cache_all(); 102 + flush_tlb_all(); 103 + local_irq_disable(); 104 + 105 + reloc(image->head & PAGE_MASK, image->start, phys); 106 + } 107 + 108 + int machine_kexec_prepare(struct kimage *image) 109 + { 110 + kexec_image_info(image); 111 + return 0; 112 + }
+86
arch/parisc/kernel/kexec_file.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Load ELF vmlinux file for the kexec_file_load syscall. 4 + * 5 + * Copyright (c) 2019 Sven Schnelle <svens@stackframe.org> 6 + * 7 + */ 8 + #include <linux/elf.h> 9 + #include <linux/kexec.h> 10 + #include <linux/libfdt.h> 11 + #include <linux/module.h> 12 + #include <linux/of_fdt.h> 13 + #include <linux/slab.h> 14 + #include <linux/types.h> 15 + 16 + static void *elf_load(struct kimage *image, char *kernel_buf, 17 + unsigned long kernel_len, char *initrd, 18 + unsigned long initrd_len, char *cmdline, 19 + unsigned long cmdline_len) 20 + { 21 + int ret, i; 22 + unsigned long kernel_load_addr; 23 + struct elfhdr ehdr; 24 + struct kexec_elf_info elf_info; 25 + struct kexec_buf kbuf = { .image = image, .buf_min = 0, 26 + .buf_max = -1UL, }; 27 + 28 + ret = kexec_build_elf_info(kernel_buf, kernel_len, &ehdr, &elf_info); 29 + if (ret) 30 + goto out; 31 + 32 + ret = kexec_elf_load(image, &ehdr, &elf_info, &kbuf, &kernel_load_addr); 33 + if (ret) 34 + goto out; 35 + 36 + image->start = __pa(elf_info.ehdr->e_entry); 37 + 38 + for (i = 0; i < image->nr_segments; i++) 39 + image->segment[i].mem = __pa(image->segment[i].mem); 40 + 41 + pr_debug("Loaded the kernel at 0x%lx, entry at 0x%lx\n", 42 + kernel_load_addr, image->start); 43 + 44 + if (initrd != NULL) { 45 + kbuf.buffer = initrd; 46 + kbuf.bufsz = kbuf.memsz = initrd_len; 47 + kbuf.buf_align = PAGE_SIZE; 48 + kbuf.top_down = false; 49 + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; 50 + ret = kexec_add_buffer(&kbuf); 51 + if (ret) 52 + goto out; 53 + 54 + pr_debug("Loaded initrd at 0x%lx\n", kbuf.mem); 55 + image->arch.initrd_start = kbuf.mem; 56 + image->arch.initrd_end = kbuf.mem + initrd_len; 57 + } 58 + 59 + if (cmdline != NULL) { 60 + kbuf.buffer = cmdline; 61 + kbuf.bufsz = kbuf.memsz = ALIGN(cmdline_len, 8); 62 + kbuf.buf_align = PAGE_SIZE; 63 + kbuf.top_down = false; 64 + kbuf.buf_min = PAGE0->mem_free + PAGE_SIZE; 65 + kbuf.buf_max = kernel_load_addr; 66 + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; 67 + ret = kexec_add_buffer(&kbuf); 68 + if (ret) 69 + goto out; 70 + 71 + pr_debug("Loaded cmdline at 0x%lx\n", kbuf.mem); 72 + image->arch.cmdline = kbuf.mem; 73 + } 74 + out: 75 + return NULL; 76 + } 77 + 78 + const struct kexec_file_ops kexec_elf_ops = { 79 + .probe = kexec_elf_probe, 80 + .load = elf_load, 81 + }; 82 + 83 + const struct kexec_file_ops * const kexec_file_loaders[] = { 84 + &kexec_elf_ops, 85 + NULL 86 + };
-4
arch/parisc/kernel/kprobes.c
··· 281 281 { 282 282 return p->addr == trampoline_p.addr; 283 283 } 284 - bool arch_kprobe_on_func_entry(unsigned long offset) 285 - { 286 - return !offset; 287 - } 288 284 289 285 int __init arch_init_kprobes(void) 290 286 {
+9
arch/parisc/kernel/pacache.S
··· 174 174 175 175 2: bv %r0(%r2) 176 176 nop 177 + 178 + /* 179 + * When running in qemu, drop whole flush_tlb_all_local function and 180 + * replace by one pdtlbe instruction, for which QEMU will drop all 181 + * local TLB entries. 182 + */ 183 + 3: pdtlbe %r0(%sr1,%r0) 184 + bv,n %r0(%r2) 185 + ALTERNATIVE_CODE(flush_tlb_all_local, 2, ALT_COND_RUN_ON_QEMU, 3b) 177 186 ENDPROC_CFI(flush_tlb_all_local) 178 187 179 188 .import cache_info,data
+4
arch/parisc/kernel/parisc_ksyms.c
··· 17 17 18 18 #include <linux/string.h> 19 19 EXPORT_SYMBOL(memset); 20 + EXPORT_SYMBOL(strlen); 21 + EXPORT_SYMBOL(strcpy); 22 + EXPORT_SYMBOL(strncpy); 23 + EXPORT_SYMBOL(strcat); 20 24 21 25 #include <linux/atomic.h> 22 26 EXPORT_SYMBOL(__xchg8);
-11
arch/parisc/kernel/pci.c
··· 34 34 #define DBG_RES(x...) 35 35 #endif 36 36 37 - /* To be used as: mdelay(pci_post_reset_delay); 38 - * 39 - * post_reset is the time the kernel should stall to prevent anyone from 40 - * accessing the PCI bus once #RESET is de-asserted. 41 - * PCI spec somewhere says 1 second but with multi-PCI bus systems, 42 - * this makes the boot time much longer than necessary. 43 - * 20ms seems to work for all the HP PCI implementations to date. 44 - * 45 - * #define pci_post_reset_delay 50 46 - */ 47 - 48 37 struct pci_port_ops *pci_port __ro_after_init; 49 38 struct pci_bios_ops *pci_bios __ro_after_init; 50 39
+149
arch/parisc/kernel/relocate_kernel.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #include <linux/linkage.h> 3 + #include <linux/kexec.h> 4 + 5 + #include <asm/assembly.h> 6 + #include <asm/asm-offsets.h> 7 + #include <asm/page.h> 8 + #include <asm/setup.h> 9 + #include <asm/psw.h> 10 + 11 + .level PA_ASM_LEVEL 12 + 13 + .macro kexec_param name 14 + .align 8 15 + ENTRY(kexec\()_\name) 16 + #ifdef CONFIG_64BIT 17 + .dword 0 18 + #else 19 + .word 0 20 + #endif 21 + 22 + ENTRY(kexec\()_\name\()_offset) 23 + .word kexec\()_\name - relocate_new_kernel 24 + .endm 25 + 26 + .text 27 + 28 + /* args: 29 + * r26 - kimage->head 30 + * r25 - start address of kernel 31 + * r24 - physical address of relocate code 32 + */ 33 + 34 + ENTRY_CFI(relocate_new_kernel) 35 + 0: copy %arg1, %rp 36 + /* disable I and Q bit, so we are allowed to execute RFI */ 37 + rsm PSW_SM_I, %r0 38 + nop 39 + nop 40 + nop 41 + nop 42 + nop 43 + nop 44 + nop 45 + 46 + rsm PSW_SM_Q, %r0 47 + nop 48 + nop 49 + nop 50 + nop 51 + nop 52 + nop 53 + nop 54 + 55 + /* 56 + * After return-from-interrupt, we want to run without Code/Data 57 + * translation enabled just like on a normal boot. 58 + */ 59 + 60 + /* calculate new physical execution address */ 61 + ldo 1f-0b(%arg2), %r1 62 + mtctl %r0, %cr17 /* IIASQ */ 63 + mtctl %r0, %cr17 /* IIASQ */ 64 + mtctl %r1, %cr18 /* IIAOQ */ 65 + ldo 4(%r1),%r1 66 + mtctl %r1, %cr18 /* IIAOQ */ 67 + #ifdef CONFIG_64BIT 68 + depdi,z 1, PSW_W_BIT, 1, %r1 69 + mtctl %r1, %cr22 /* IPSW */ 70 + #else 71 + mtctl %r0, %cr22 /* IPSW */ 72 + #endif 73 + /* lets go... */ 74 + rfi 75 + 1: nop 76 + nop 77 + 78 + .Lloop: 79 + LDREG,ma REG_SZ(%arg0), %r3 80 + /* If crash kernel, no copy needed */ 81 + cmpib,COND(=),n 0,%r3,boot 82 + 83 + bb,<,n %r3, 31 - IND_DONE_BIT, boot 84 + bb,>=,n %r3, 31 - IND_INDIRECTION_BIT, .Lnotind 85 + /* indirection, load and restart */ 86 + movb %r3, %arg0, .Lloop 87 + depi 0, 31, PAGE_SHIFT, %arg0 88 + 89 + .Lnotind: 90 + bb,>=,n %r3, 31 - IND_DESTINATION_BIT, .Lnotdest 91 + b .Lloop 92 + copy %r3, %r20 93 + 94 + .Lnotdest: 95 + bb,>= %r3, 31 - IND_SOURCE_BIT, .Lloop 96 + depi 0, 31, PAGE_SHIFT, %r3 97 + copy %r3, %r21 98 + 99 + /* copy page */ 100 + copy %r0, %r18 101 + zdepi 1, 31 - PAGE_SHIFT, 1, %r18 102 + add %r20, %r18, %r17 103 + 104 + depi 0, 31, PAGE_SHIFT, %r20 105 + .Lcopy: 106 + copy %r20, %r12 107 + LDREG,ma REG_SZ(%r21), %r8 108 + LDREG,ma REG_SZ(%r21), %r9 109 + LDREG,ma REG_SZ(%r21), %r10 110 + LDREG,ma REG_SZ(%r21), %r11 111 + STREG,ma %r8, REG_SZ(%r20) 112 + STREG,ma %r9, REG_SZ(%r20) 113 + STREG,ma %r10, REG_SZ(%r20) 114 + STREG,ma %r11, REG_SZ(%r20) 115 + 116 + #ifndef CONFIG_64BIT 117 + LDREG,ma REG_SZ(%r21), %r8 118 + LDREG,ma REG_SZ(%r21), %r9 119 + LDREG,ma REG_SZ(%r21), %r10 120 + LDREG,ma REG_SZ(%r21), %r11 121 + STREG,ma %r8, REG_SZ(%r20) 122 + STREG,ma %r9, REG_SZ(%r20) 123 + STREG,ma %r10, REG_SZ(%r20) 124 + STREG,ma %r11, REG_SZ(%r20) 125 + #endif 126 + 127 + fdc %r0(%r12) 128 + cmpb,COND(<<) %r20,%r17,.Lcopy 129 + fic (%sr4, %r12) 130 + b,n .Lloop 131 + 132 + boot: 133 + mtctl %r0, %cr15 134 + 135 + LDREG kexec_free_mem-0b(%arg2), %arg0 136 + LDREG kexec_cmdline-0b(%arg2), %arg1 137 + LDREG kexec_initrd_end-0b(%arg2), %arg3 138 + LDREG kexec_initrd_start-0b(%arg2), %arg2 139 + bv,n %r0(%rp) 140 + 141 + ENDPROC_CFI(relocate_new_kernel); 142 + 143 + ENTRY(relocate_new_kernel_size) 144 + .word relocate_new_kernel_size - relocate_new_kernel 145 + 146 + kexec_param cmdline 147 + kexec_param initrd_start 148 + kexec_param initrd_end 149 + kexec_param free_mem
+1
arch/parisc/kernel/smp.c
··· 109 109 /* REVISIT : does PM *know* this CPU isn't available? */ 110 110 set_cpu_online(smp_processor_id(), false); 111 111 local_irq_disable(); 112 + __pdc_cpu_rendezvous(); 112 113 for (;;) 113 114 ; 114 115 }
+2 -1
arch/parisc/kernel/syscalls/syscall.tbl
··· 399 399 352 common pkey_alloc sys_pkey_alloc 400 400 353 common pkey_free sys_pkey_free 401 401 354 common rseq sys_rseq 402 - # 355 through 402 are unassigned to sync up with generic numbers 402 + 355 common kexec_file_load sys_kexec_file_load sys_kexec_file_load 403 + # up to 402 is unassigned and reserved for arch specific syscalls 403 404 403 32 clock_gettime64 sys_clock_gettime sys_clock_gettime 404 405 404 32 clock_settime64 sys_clock_settime sys_clock_settime 405 406 405 32 clock_adjtime64 sys_clock_adjtime sys_clock_adjtime
+2
arch/parisc/kernel/traps.c
··· 29 29 #include <linux/bug.h> 30 30 #include <linux/ratelimit.h> 31 31 #include <linux/uaccess.h> 32 + #include <linux/kdebug.h> 32 33 33 34 #include <asm/assembly.h> 34 35 #include <asm/io.h> ··· 415 414 { 416 415 static DEFINE_SPINLOCK(terminate_lock); 417 416 417 + (void)notify_die(DIE_OOPS, msg, regs, 0, code, SIGTRAP); 418 418 bust_spinlocks(1); 419 419 420 420 set_eiem(0);
+2 -2
arch/parisc/lib/Makefile
··· 3 3 # Makefile for parisc-specific library files 4 4 # 5 5 6 - lib-y := lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \ 7 - ucmpdi2.o delay.o 6 + lib-y := lusercopy.o bitops.o checksum.o io.o memcpy.o \ 7 + ucmpdi2.o delay.o string.o 8 8 9 9 obj-y := iomap.o
-91
arch/parisc/lib/memset.c
··· 1 - /* Copyright (C) 1991, 1997 Free Software Foundation, Inc. 2 - This file is part of the GNU C Library. 3 - 4 - The GNU C Library is free software; you can redistribute it and/or 5 - modify it under the terms of the GNU Lesser General Public 6 - License as published by the Free Software Foundation; either 7 - version 2.1 of the License, or (at your option) any later version. 8 - 9 - The GNU C Library is distributed in the hope that it will be useful, 10 - but WITHOUT ANY WARRANTY; without even the implied warranty of 11 - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 - Lesser General Public License for more details. 13 - 14 - You should have received a copy of the GNU Lesser General Public 15 - License along with the GNU C Library; if not, write to the Free 16 - Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 17 - 02111-1307 USA. */ 18 - 19 - /* Slight modifications for pa-risc linux - Paul Bame <bame@debian.org> */ 20 - 21 - #include <linux/types.h> 22 - #include <asm/string.h> 23 - 24 - #define OPSIZ (BITS_PER_LONG/8) 25 - typedef unsigned long op_t; 26 - 27 - void * 28 - memset (void *dstpp, int sc, size_t len) 29 - { 30 - unsigned int c = sc; 31 - long int dstp = (long int) dstpp; 32 - 33 - if (len >= 8) 34 - { 35 - size_t xlen; 36 - op_t cccc; 37 - 38 - cccc = (unsigned char) c; 39 - cccc |= cccc << 8; 40 - cccc |= cccc << 16; 41 - if (OPSIZ > 4) 42 - /* Do the shift in two steps to avoid warning if long has 32 bits. */ 43 - cccc |= (cccc << 16) << 16; 44 - 45 - /* There are at least some bytes to set. 46 - No need to test for LEN == 0 in this alignment loop. */ 47 - while (dstp % OPSIZ != 0) 48 - { 49 - ((unsigned char *) dstp)[0] = c; 50 - dstp += 1; 51 - len -= 1; 52 - } 53 - 54 - /* Write 8 `op_t' per iteration until less than 8 `op_t' remain. */ 55 - xlen = len / (OPSIZ * 8); 56 - while (xlen > 0) 57 - { 58 - ((op_t *) dstp)[0] = cccc; 59 - ((op_t *) dstp)[1] = cccc; 60 - ((op_t *) dstp)[2] = cccc; 61 - ((op_t *) dstp)[3] = cccc; 62 - ((op_t *) dstp)[4] = cccc; 63 - ((op_t *) dstp)[5] = cccc; 64 - ((op_t *) dstp)[6] = cccc; 65 - ((op_t *) dstp)[7] = cccc; 66 - dstp += 8 * OPSIZ; 67 - xlen -= 1; 68 - } 69 - len %= OPSIZ * 8; 70 - 71 - /* Write 1 `op_t' per iteration until less than OPSIZ bytes remain. */ 72 - xlen = len / OPSIZ; 73 - while (xlen > 0) 74 - { 75 - ((op_t *) dstp)[0] = cccc; 76 - dstp += OPSIZ; 77 - xlen -= 1; 78 - } 79 - len %= OPSIZ; 80 - } 81 - 82 - /* Write the last few bytes. */ 83 - while (len > 0) 84 - { 85 - ((unsigned char *) dstp)[0] = c; 86 - dstp += 1; 87 - len -= 1; 88 - } 89 - 90 - return dstpp; 91 - }
+136
arch/parisc/lib/string.S
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * PA-RISC assembly string functions 4 + * 5 + * Copyright (C) 2019 Helge Deller <deller@gmx.de> 6 + */ 7 + 8 + #include <asm/assembly.h> 9 + #include <linux/linkage.h> 10 + 11 + .section .text.hot 12 + .level PA_ASM_LEVEL 13 + 14 + t0 = r20 15 + t1 = r21 16 + t2 = r22 17 + 18 + ENTRY_CFI(strlen, frame=0,no_calls) 19 + or,COND(<>) arg0,r0,ret0 20 + b,l,n .Lstrlen_null_ptr,r0 21 + depwi 0,31,2,ret0 22 + cmpb,COND(<>) arg0,ret0,.Lstrlen_not_aligned 23 + ldw,ma 4(ret0),t0 24 + cmpib,tr 0,r0,.Lstrlen_loop 25 + uxor,nbz r0,t0,r0 26 + .Lstrlen_not_aligned: 27 + uaddcm arg0,ret0,t1 28 + shladd t1,3,r0,t1 29 + mtsar t1 30 + depwi -1,%sar,32,t0 31 + uxor,nbz r0,t0,r0 32 + .Lstrlen_loop: 33 + b,l,n .Lstrlen_end_loop,r0 34 + ldw,ma 4(ret0),t0 35 + cmpib,tr 0,r0,.Lstrlen_loop 36 + uxor,nbz r0,t0,r0 37 + .Lstrlen_end_loop: 38 + extrw,u,<> t0,7,8,r0 39 + addib,tr,n -3,ret0,.Lstrlen_out 40 + extrw,u,<> t0,15,8,r0 41 + addib,tr,n -2,ret0,.Lstrlen_out 42 + extrw,u,<> t0,23,8,r0 43 + addi -1,ret0,ret0 44 + .Lstrlen_out: 45 + bv r0(rp) 46 + uaddcm ret0,arg0,ret0 47 + .Lstrlen_null_ptr: 48 + bv,n r0(rp) 49 + ENDPROC_CFI(strlen) 50 + 51 + 52 + ENTRY_CFI(strcpy, frame=0,no_calls) 53 + ldb 0(arg1),t0 54 + stb t0,0(arg0) 55 + ldo 0(arg0),ret0 56 + ldo 1(arg1),t1 57 + cmpb,= r0,t0,2f 58 + ldo 1(arg0),t2 59 + 1: ldb 0(t1),arg1 60 + stb arg1,0(t2) 61 + ldo 1(t1),t1 62 + cmpb,<> r0,arg1,1b 63 + ldo 1(t2),t2 64 + 2: bv,n r0(rp) 65 + ENDPROC_CFI(strcpy) 66 + 67 + 68 + ENTRY_CFI(strncpy, frame=0,no_calls) 69 + ldb 0(arg1),t0 70 + stb t0,0(arg0) 71 + ldo 1(arg1),t1 72 + ldo 0(arg0),ret0 73 + cmpb,= r0,t0,2f 74 + ldo 1(arg0),arg1 75 + 1: ldo -1(arg2),arg2 76 + cmpb,COND(=),n r0,arg2,2f 77 + ldb 0(t1),arg0 78 + stb arg0,0(arg1) 79 + ldo 1(t1),t1 80 + cmpb,<> r0,arg0,1b 81 + ldo 1(arg1),arg1 82 + 2: bv,n r0(rp) 83 + ENDPROC_CFI(strncpy) 84 + 85 + 86 + ENTRY_CFI(strcat, frame=0,no_calls) 87 + ldb 0(arg0),t0 88 + cmpb,= t0,r0,2f 89 + ldo 0(arg0),ret0 90 + ldo 1(arg0),arg0 91 + 1: ldb 0(arg0),t1 92 + cmpb,<>,n r0,t1,1b 93 + ldo 1(arg0),arg0 94 + 2: ldb 0(arg1),t2 95 + stb t2,0(arg0) 96 + ldo 1(arg0),arg0 97 + ldb 0(arg1),t0 98 + cmpb,<> r0,t0,2b 99 + ldo 1(arg1),arg1 100 + bv,n r0(rp) 101 + ENDPROC_CFI(strcat) 102 + 103 + 104 + ENTRY_CFI(memset, frame=0,no_calls) 105 + copy arg0,ret0 106 + cmpb,COND(=) r0,arg0,4f 107 + copy arg0,t2 108 + cmpb,COND(=) r0,arg2,4f 109 + ldo -1(arg2),arg3 110 + subi -1,arg3,t0 111 + subi 0,t0,t1 112 + cmpiclr,COND(>=) 0,t1,arg2 113 + ldo -1(t1),arg2 114 + extru arg2,31,2,arg0 115 + 2: stb arg1,0(t2) 116 + ldo 1(t2),t2 117 + addib,>= -1,arg0,2b 118 + ldo -1(arg3),arg3 119 + cmpiclr,COND(<=) 4,arg2,r0 120 + b,l,n 4f,r0 121 + #ifdef CONFIG_64BIT 122 + depd,* r0,63,2,arg2 123 + #else 124 + depw r0,31,2,arg2 125 + #endif 126 + ldo 1(t2),t2 127 + 3: stb arg1,-1(t2) 128 + stb arg1,0(t2) 129 + stb arg1,1(t2) 130 + stb arg1,2(t2) 131 + addib,COND(>) -4,arg2,3b 132 + ldo 4(t2),t2 133 + 4: bv,n r0(rp) 134 + ENDPROC_CFI(memset) 135 + 136 + .end
+1
arch/powerpc/Kconfig
··· 511 511 select KEXEC_CORE 512 512 select HAVE_IMA_KEXEC 513 513 select BUILD_BIN2C 514 + select KEXEC_ELF 514 515 depends on PPC64 515 516 depends on CRYPTO=y 516 517 depends on CRYPTO_SHA256=y
+5 -540
arch/powerpc/kernel/kexec_elf_64.c
··· 23 23 #include <linux/slab.h> 24 24 #include <linux/types.h> 25 25 26 - #define PURGATORY_STACK_SIZE (16 * 1024) 27 - 28 - #define elf_addr_to_cpu elf64_to_cpu 29 - 30 - #ifndef Elf_Rel 31 - #define Elf_Rel Elf64_Rel 32 - #endif /* Elf_Rel */ 33 - 34 - struct elf_info { 35 - /* 36 - * Where the ELF binary contents are kept. 37 - * Memory managed by the user of the struct. 38 - */ 39 - const char *buffer; 40 - 41 - const struct elfhdr *ehdr; 42 - const struct elf_phdr *proghdrs; 43 - struct elf_shdr *sechdrs; 44 - }; 45 - 46 - static inline bool elf_is_elf_file(const struct elfhdr *ehdr) 47 - { 48 - return memcmp(ehdr->e_ident, ELFMAG, SELFMAG) == 0; 49 - } 50 - 51 - static uint64_t elf64_to_cpu(const struct elfhdr *ehdr, uint64_t value) 52 - { 53 - if (ehdr->e_ident[EI_DATA] == ELFDATA2LSB) 54 - value = le64_to_cpu(value); 55 - else if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB) 56 - value = be64_to_cpu(value); 57 - 58 - return value; 59 - } 60 - 61 - static uint16_t elf16_to_cpu(const struct elfhdr *ehdr, uint16_t value) 62 - { 63 - if (ehdr->e_ident[EI_DATA] == ELFDATA2LSB) 64 - value = le16_to_cpu(value); 65 - else if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB) 66 - value = be16_to_cpu(value); 67 - 68 - return value; 69 - } 70 - 71 - static uint32_t elf32_to_cpu(const struct elfhdr *ehdr, uint32_t value) 72 - { 73 - if (ehdr->e_ident[EI_DATA] == ELFDATA2LSB) 74 - value = le32_to_cpu(value); 75 - else if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB) 76 - value = be32_to_cpu(value); 77 - 78 - return value; 79 - } 80 - 81 - /** 82 - * elf_is_ehdr_sane - check that it is safe to use the ELF header 83 - * @buf_len: size of the buffer in which the ELF file is loaded. 84 - */ 85 - static bool elf_is_ehdr_sane(const struct elfhdr *ehdr, size_t buf_len) 86 - { 87 - if (ehdr->e_phnum > 0 && ehdr->e_phentsize != sizeof(struct elf_phdr)) { 88 - pr_debug("Bad program header size.\n"); 89 - return false; 90 - } else if (ehdr->e_shnum > 0 && 91 - ehdr->e_shentsize != sizeof(struct elf_shdr)) { 92 - pr_debug("Bad section header size.\n"); 93 - return false; 94 - } else if (ehdr->e_ident[EI_VERSION] != EV_CURRENT || 95 - ehdr->e_version != EV_CURRENT) { 96 - pr_debug("Unknown ELF version.\n"); 97 - return false; 98 - } 99 - 100 - if (ehdr->e_phoff > 0 && ehdr->e_phnum > 0) { 101 - size_t phdr_size; 102 - 103 - /* 104 - * e_phnum is at most 65535 so calculating the size of the 105 - * program header cannot overflow. 106 - */ 107 - phdr_size = sizeof(struct elf_phdr) * ehdr->e_phnum; 108 - 109 - /* Sanity check the program header table location. */ 110 - if (ehdr->e_phoff + phdr_size < ehdr->e_phoff) { 111 - pr_debug("Program headers at invalid location.\n"); 112 - return false; 113 - } else if (ehdr->e_phoff + phdr_size > buf_len) { 114 - pr_debug("Program headers truncated.\n"); 115 - return false; 116 - } 117 - } 118 - 119 - if (ehdr->e_shoff > 0 && ehdr->e_shnum > 0) { 120 - size_t shdr_size; 121 - 122 - /* 123 - * e_shnum is at most 65536 so calculating 124 - * the size of the section header cannot overflow. 125 - */ 126 - shdr_size = sizeof(struct elf_shdr) * ehdr->e_shnum; 127 - 128 - /* Sanity check the section header table location. */ 129 - if (ehdr->e_shoff + shdr_size < ehdr->e_shoff) { 130 - pr_debug("Section headers at invalid location.\n"); 131 - return false; 132 - } else if (ehdr->e_shoff + shdr_size > buf_len) { 133 - pr_debug("Section headers truncated.\n"); 134 - return false; 135 - } 136 - } 137 - 138 - return true; 139 - } 140 - 141 - static int elf_read_ehdr(const char *buf, size_t len, struct elfhdr *ehdr) 142 - { 143 - struct elfhdr *buf_ehdr; 144 - 145 - if (len < sizeof(*buf_ehdr)) { 146 - pr_debug("Buffer is too small to hold ELF header.\n"); 147 - return -ENOEXEC; 148 - } 149 - 150 - memset(ehdr, 0, sizeof(*ehdr)); 151 - memcpy(ehdr->e_ident, buf, sizeof(ehdr->e_ident)); 152 - if (!elf_is_elf_file(ehdr)) { 153 - pr_debug("No ELF header magic.\n"); 154 - return -ENOEXEC; 155 - } 156 - 157 - if (ehdr->e_ident[EI_CLASS] != ELF_CLASS) { 158 - pr_debug("Not a supported ELF class.\n"); 159 - return -ENOEXEC; 160 - } else if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB && 161 - ehdr->e_ident[EI_DATA] != ELFDATA2MSB) { 162 - pr_debug("Not a supported ELF data format.\n"); 163 - return -ENOEXEC; 164 - } 165 - 166 - buf_ehdr = (struct elfhdr *) buf; 167 - if (elf16_to_cpu(ehdr, buf_ehdr->e_ehsize) != sizeof(*buf_ehdr)) { 168 - pr_debug("Bad ELF header size.\n"); 169 - return -ENOEXEC; 170 - } 171 - 172 - ehdr->e_type = elf16_to_cpu(ehdr, buf_ehdr->e_type); 173 - ehdr->e_machine = elf16_to_cpu(ehdr, buf_ehdr->e_machine); 174 - ehdr->e_version = elf32_to_cpu(ehdr, buf_ehdr->e_version); 175 - ehdr->e_entry = elf_addr_to_cpu(ehdr, buf_ehdr->e_entry); 176 - ehdr->e_phoff = elf_addr_to_cpu(ehdr, buf_ehdr->e_phoff); 177 - ehdr->e_shoff = elf_addr_to_cpu(ehdr, buf_ehdr->e_shoff); 178 - ehdr->e_flags = elf32_to_cpu(ehdr, buf_ehdr->e_flags); 179 - ehdr->e_phentsize = elf16_to_cpu(ehdr, buf_ehdr->e_phentsize); 180 - ehdr->e_phnum = elf16_to_cpu(ehdr, buf_ehdr->e_phnum); 181 - ehdr->e_shentsize = elf16_to_cpu(ehdr, buf_ehdr->e_shentsize); 182 - ehdr->e_shnum = elf16_to_cpu(ehdr, buf_ehdr->e_shnum); 183 - ehdr->e_shstrndx = elf16_to_cpu(ehdr, buf_ehdr->e_shstrndx); 184 - 185 - return elf_is_ehdr_sane(ehdr, len) ? 0 : -ENOEXEC; 186 - } 187 - 188 - /** 189 - * elf_is_phdr_sane - check that it is safe to use the program header 190 - * @buf_len: size of the buffer in which the ELF file is loaded. 191 - */ 192 - static bool elf_is_phdr_sane(const struct elf_phdr *phdr, size_t buf_len) 193 - { 194 - 195 - if (phdr->p_offset + phdr->p_filesz < phdr->p_offset) { 196 - pr_debug("ELF segment location wraps around.\n"); 197 - return false; 198 - } else if (phdr->p_offset + phdr->p_filesz > buf_len) { 199 - pr_debug("ELF segment not in file.\n"); 200 - return false; 201 - } else if (phdr->p_paddr + phdr->p_memsz < phdr->p_paddr) { 202 - pr_debug("ELF segment address wraps around.\n"); 203 - return false; 204 - } 205 - 206 - return true; 207 - } 208 - 209 - static int elf_read_phdr(const char *buf, size_t len, struct elf_info *elf_info, 210 - int idx) 211 - { 212 - /* Override the const in proghdrs, we are the ones doing the loading. */ 213 - struct elf_phdr *phdr = (struct elf_phdr *) &elf_info->proghdrs[idx]; 214 - const char *pbuf; 215 - struct elf_phdr *buf_phdr; 216 - 217 - pbuf = buf + elf_info->ehdr->e_phoff + (idx * sizeof(*buf_phdr)); 218 - buf_phdr = (struct elf_phdr *) pbuf; 219 - 220 - phdr->p_type = elf32_to_cpu(elf_info->ehdr, buf_phdr->p_type); 221 - phdr->p_offset = elf_addr_to_cpu(elf_info->ehdr, buf_phdr->p_offset); 222 - phdr->p_paddr = elf_addr_to_cpu(elf_info->ehdr, buf_phdr->p_paddr); 223 - phdr->p_vaddr = elf_addr_to_cpu(elf_info->ehdr, buf_phdr->p_vaddr); 224 - phdr->p_flags = elf32_to_cpu(elf_info->ehdr, buf_phdr->p_flags); 225 - 226 - /* 227 - * The following fields have a type equivalent to Elf_Addr 228 - * both in 32 bit and 64 bit ELF. 229 - */ 230 - phdr->p_filesz = elf_addr_to_cpu(elf_info->ehdr, buf_phdr->p_filesz); 231 - phdr->p_memsz = elf_addr_to_cpu(elf_info->ehdr, buf_phdr->p_memsz); 232 - phdr->p_align = elf_addr_to_cpu(elf_info->ehdr, buf_phdr->p_align); 233 - 234 - return elf_is_phdr_sane(phdr, len) ? 0 : -ENOEXEC; 235 - } 236 - 237 - /** 238 - * elf_read_phdrs - read the program headers from the buffer 239 - * 240 - * This function assumes that the program header table was checked for sanity. 241 - * Use elf_is_ehdr_sane() if it wasn't. 242 - */ 243 - static int elf_read_phdrs(const char *buf, size_t len, 244 - struct elf_info *elf_info) 245 - { 246 - size_t phdr_size, i; 247 - const struct elfhdr *ehdr = elf_info->ehdr; 248 - 249 - /* 250 - * e_phnum is at most 65535 so calculating the size of the 251 - * program header cannot overflow. 252 - */ 253 - phdr_size = sizeof(struct elf_phdr) * ehdr->e_phnum; 254 - 255 - elf_info->proghdrs = kzalloc(phdr_size, GFP_KERNEL); 256 - if (!elf_info->proghdrs) 257 - return -ENOMEM; 258 - 259 - for (i = 0; i < ehdr->e_phnum; i++) { 260 - int ret; 261 - 262 - ret = elf_read_phdr(buf, len, elf_info, i); 263 - if (ret) { 264 - kfree(elf_info->proghdrs); 265 - elf_info->proghdrs = NULL; 266 - return ret; 267 - } 268 - } 269 - 270 - return 0; 271 - } 272 - 273 - /** 274 - * elf_is_shdr_sane - check that it is safe to use the section header 275 - * @buf_len: size of the buffer in which the ELF file is loaded. 276 - */ 277 - static bool elf_is_shdr_sane(const struct elf_shdr *shdr, size_t buf_len) 278 - { 279 - bool size_ok; 280 - 281 - /* SHT_NULL headers have undefined values, so we can't check them. */ 282 - if (shdr->sh_type == SHT_NULL) 283 - return true; 284 - 285 - /* Now verify sh_entsize */ 286 - switch (shdr->sh_type) { 287 - case SHT_SYMTAB: 288 - size_ok = shdr->sh_entsize == sizeof(Elf_Sym); 289 - break; 290 - case SHT_RELA: 291 - size_ok = shdr->sh_entsize == sizeof(Elf_Rela); 292 - break; 293 - case SHT_DYNAMIC: 294 - size_ok = shdr->sh_entsize == sizeof(Elf_Dyn); 295 - break; 296 - case SHT_REL: 297 - size_ok = shdr->sh_entsize == sizeof(Elf_Rel); 298 - break; 299 - case SHT_NOTE: 300 - case SHT_PROGBITS: 301 - case SHT_HASH: 302 - case SHT_NOBITS: 303 - default: 304 - /* 305 - * This is a section whose entsize requirements 306 - * I don't care about. If I don't know about 307 - * the section I can't care about it's entsize 308 - * requirements. 309 - */ 310 - size_ok = true; 311 - break; 312 - } 313 - 314 - if (!size_ok) { 315 - pr_debug("ELF section with wrong entry size.\n"); 316 - return false; 317 - } else if (shdr->sh_addr + shdr->sh_size < shdr->sh_addr) { 318 - pr_debug("ELF section address wraps around.\n"); 319 - return false; 320 - } 321 - 322 - if (shdr->sh_type != SHT_NOBITS) { 323 - if (shdr->sh_offset + shdr->sh_size < shdr->sh_offset) { 324 - pr_debug("ELF section location wraps around.\n"); 325 - return false; 326 - } else if (shdr->sh_offset + shdr->sh_size > buf_len) { 327 - pr_debug("ELF section not in file.\n"); 328 - return false; 329 - } 330 - } 331 - 332 - return true; 333 - } 334 - 335 - static int elf_read_shdr(const char *buf, size_t len, struct elf_info *elf_info, 336 - int idx) 337 - { 338 - struct elf_shdr *shdr = &elf_info->sechdrs[idx]; 339 - const struct elfhdr *ehdr = elf_info->ehdr; 340 - const char *sbuf; 341 - struct elf_shdr *buf_shdr; 342 - 343 - sbuf = buf + ehdr->e_shoff + idx * sizeof(*buf_shdr); 344 - buf_shdr = (struct elf_shdr *) sbuf; 345 - 346 - shdr->sh_name = elf32_to_cpu(ehdr, buf_shdr->sh_name); 347 - shdr->sh_type = elf32_to_cpu(ehdr, buf_shdr->sh_type); 348 - shdr->sh_addr = elf_addr_to_cpu(ehdr, buf_shdr->sh_addr); 349 - shdr->sh_offset = elf_addr_to_cpu(ehdr, buf_shdr->sh_offset); 350 - shdr->sh_link = elf32_to_cpu(ehdr, buf_shdr->sh_link); 351 - shdr->sh_info = elf32_to_cpu(ehdr, buf_shdr->sh_info); 352 - 353 - /* 354 - * The following fields have a type equivalent to Elf_Addr 355 - * both in 32 bit and 64 bit ELF. 356 - */ 357 - shdr->sh_flags = elf_addr_to_cpu(ehdr, buf_shdr->sh_flags); 358 - shdr->sh_size = elf_addr_to_cpu(ehdr, buf_shdr->sh_size); 359 - shdr->sh_addralign = elf_addr_to_cpu(ehdr, buf_shdr->sh_addralign); 360 - shdr->sh_entsize = elf_addr_to_cpu(ehdr, buf_shdr->sh_entsize); 361 - 362 - return elf_is_shdr_sane(shdr, len) ? 0 : -ENOEXEC; 363 - } 364 - 365 - /** 366 - * elf_read_shdrs - read the section headers from the buffer 367 - * 368 - * This function assumes that the section header table was checked for sanity. 369 - * Use elf_is_ehdr_sane() if it wasn't. 370 - */ 371 - static int elf_read_shdrs(const char *buf, size_t len, 372 - struct elf_info *elf_info) 373 - { 374 - size_t shdr_size, i; 375 - 376 - /* 377 - * e_shnum is at most 65536 so calculating 378 - * the size of the section header cannot overflow. 379 - */ 380 - shdr_size = sizeof(struct elf_shdr) * elf_info->ehdr->e_shnum; 381 - 382 - elf_info->sechdrs = kzalloc(shdr_size, GFP_KERNEL); 383 - if (!elf_info->sechdrs) 384 - return -ENOMEM; 385 - 386 - for (i = 0; i < elf_info->ehdr->e_shnum; i++) { 387 - int ret; 388 - 389 - ret = elf_read_shdr(buf, len, elf_info, i); 390 - if (ret) { 391 - kfree(elf_info->sechdrs); 392 - elf_info->sechdrs = NULL; 393 - return ret; 394 - } 395 - } 396 - 397 - return 0; 398 - } 399 - 400 - /** 401 - * elf_read_from_buffer - read ELF file and sets up ELF header and ELF info 402 - * @buf: Buffer to read ELF file from. 403 - * @len: Size of @buf. 404 - * @ehdr: Pointer to existing struct which will be populated. 405 - * @elf_info: Pointer to existing struct which will be populated. 406 - * 407 - * This function allows reading ELF files with different byte order than 408 - * the kernel, byte-swapping the fields as needed. 409 - * 410 - * Return: 411 - * On success returns 0, and the caller should call elf_free_info(elf_info) to 412 - * free the memory allocated for the section and program headers. 413 - */ 414 - int elf_read_from_buffer(const char *buf, size_t len, struct elfhdr *ehdr, 415 - struct elf_info *elf_info) 416 - { 417 - int ret; 418 - 419 - ret = elf_read_ehdr(buf, len, ehdr); 420 - if (ret) 421 - return ret; 422 - 423 - elf_info->buffer = buf; 424 - elf_info->ehdr = ehdr; 425 - if (ehdr->e_phoff > 0 && ehdr->e_phnum > 0) { 426 - ret = elf_read_phdrs(buf, len, elf_info); 427 - if (ret) 428 - return ret; 429 - } 430 - if (ehdr->e_shoff > 0 && ehdr->e_shnum > 0) { 431 - ret = elf_read_shdrs(buf, len, elf_info); 432 - if (ret) { 433 - kfree(elf_info->proghdrs); 434 - return ret; 435 - } 436 - } 437 - 438 - return 0; 439 - } 440 - 441 - /** 442 - * elf_free_info - free memory allocated by elf_read_from_buffer 443 - */ 444 - void elf_free_info(struct elf_info *elf_info) 445 - { 446 - kfree(elf_info->proghdrs); 447 - kfree(elf_info->sechdrs); 448 - memset(elf_info, 0, sizeof(*elf_info)); 449 - } 450 - /** 451 - * build_elf_exec_info - read ELF executable and check that we can use it 452 - */ 453 - static int build_elf_exec_info(const char *buf, size_t len, struct elfhdr *ehdr, 454 - struct elf_info *elf_info) 455 - { 456 - int i; 457 - int ret; 458 - 459 - ret = elf_read_from_buffer(buf, len, ehdr, elf_info); 460 - if (ret) 461 - return ret; 462 - 463 - /* Big endian vmlinux has type ET_DYN. */ 464 - if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) { 465 - pr_err("Not an ELF executable.\n"); 466 - goto error; 467 - } else if (!elf_info->proghdrs) { 468 - pr_err("No ELF program header.\n"); 469 - goto error; 470 - } 471 - 472 - for (i = 0; i < ehdr->e_phnum; i++) { 473 - /* 474 - * Kexec does not support loading interpreters. 475 - * In addition this check keeps us from attempting 476 - * to kexec ordinay executables. 477 - */ 478 - if (elf_info->proghdrs[i].p_type == PT_INTERP) { 479 - pr_err("Requires an ELF interpreter.\n"); 480 - goto error; 481 - } 482 - } 483 - 484 - return 0; 485 - error: 486 - elf_free_info(elf_info); 487 - return -ENOEXEC; 488 - } 489 - 490 - static int elf64_probe(const char *buf, unsigned long len) 491 - { 492 - struct elfhdr ehdr; 493 - struct elf_info elf_info; 494 - int ret; 495 - 496 - ret = build_elf_exec_info(buf, len, &ehdr, &elf_info); 497 - if (ret) 498 - return ret; 499 - 500 - elf_free_info(&elf_info); 501 - 502 - return elf_check_arch(&ehdr) ? 0 : -ENOEXEC; 503 - } 504 - 505 - /** 506 - * elf_exec_load - load ELF executable image 507 - * @lowest_load_addr: On return, will be the address where the first PT_LOAD 508 - * section will be loaded in memory. 509 - * 510 - * Return: 511 - * 0 on success, negative value on failure. 512 - */ 513 - static int elf_exec_load(struct kimage *image, struct elfhdr *ehdr, 514 - struct elf_info *elf_info, 515 - unsigned long *lowest_load_addr) 516 - { 517 - unsigned long base = 0, lowest_addr = UINT_MAX; 518 - int ret; 519 - size_t i; 520 - struct kexec_buf kbuf = { .image = image, .buf_max = ppc64_rma_size, 521 - .top_down = false }; 522 - 523 - /* Read in the PT_LOAD segments. */ 524 - for (i = 0; i < ehdr->e_phnum; i++) { 525 - unsigned long load_addr; 526 - size_t size; 527 - const struct elf_phdr *phdr; 528 - 529 - phdr = &elf_info->proghdrs[i]; 530 - if (phdr->p_type != PT_LOAD) 531 - continue; 532 - 533 - size = phdr->p_filesz; 534 - if (size > phdr->p_memsz) 535 - size = phdr->p_memsz; 536 - 537 - kbuf.buffer = (void *) elf_info->buffer + phdr->p_offset; 538 - kbuf.bufsz = size; 539 - kbuf.memsz = phdr->p_memsz; 540 - kbuf.buf_align = phdr->p_align; 541 - kbuf.buf_min = phdr->p_paddr + base; 542 - kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; 543 - ret = kexec_add_buffer(&kbuf); 544 - if (ret) 545 - goto out; 546 - load_addr = kbuf.mem; 547 - 548 - if (load_addr < lowest_addr) 549 - lowest_addr = load_addr; 550 - } 551 - 552 - /* Update entry point to reflect new load address. */ 553 - ehdr->e_entry += base; 554 - 555 - *lowest_load_addr = lowest_addr; 556 - ret = 0; 557 - out: 558 - return ret; 559 - } 560 - 561 26 static void *elf64_load(struct kimage *image, char *kernel_buf, 562 27 unsigned long kernel_len, char *initrd, 563 28 unsigned long initrd_len, char *cmdline, ··· 35 570 void *fdt; 36 571 const void *slave_code; 37 572 struct elfhdr ehdr; 38 - struct elf_info elf_info; 573 + struct kexec_elf_info elf_info; 39 574 struct kexec_buf kbuf = { .image = image, .buf_min = 0, 40 575 .buf_max = ppc64_rma_size }; 41 576 struct kexec_buf pbuf = { .image = image, .buf_min = 0, 42 577 .buf_max = ppc64_rma_size, .top_down = true, 43 578 .mem = KEXEC_BUF_MEM_UNKNOWN }; 44 579 45 - ret = build_elf_exec_info(kernel_buf, kernel_len, &ehdr, &elf_info); 580 + ret = kexec_build_elf_info(kernel_buf, kernel_len, &ehdr, &elf_info); 46 581 if (ret) 47 582 goto out; 48 583 49 - ret = elf_exec_load(image, &ehdr, &elf_info, &kernel_load_addr); 584 + ret = kexec_elf_load(image, &ehdr, &elf_info, &kbuf, &kernel_load_addr); 50 585 if (ret) 51 586 goto out; 52 587 ··· 113 648 pr_err("Error setting up the purgatory.\n"); 114 649 115 650 out: 116 - elf_free_info(&elf_info); 651 + kexec_free_elf_info(&elf_info); 117 652 118 653 /* Make kimage_file_post_load_cleanup free the fdt buffer for us. */ 119 654 return ret ? ERR_PTR(ret) : fdt; 120 655 } 121 656 122 657 const struct kexec_file_ops kexec_elf64_ops = { 123 - .probe = elf64_probe, 658 + .probe = kexec_elf_probe, 124 659 .load = elf64_load, 125 660 };
+27 -3
drivers/parisc/dino.c
··· 6 6 ** (c) Copyright 1999 SuSE GmbH 7 7 ** (c) Copyright 1999,2000 Hewlett-Packard Company 8 8 ** (c) Copyright 2000 Grant Grundler 9 - ** (c) Copyright 2006 Helge Deller 9 + ** (c) Copyright 2006-2019 Helge Deller 10 10 ** 11 11 ** 12 12 ** This module provides access to Dino PCI bus (config/IOport spaces) ··· 154 154 static inline struct dino_device *DINO_DEV(struct pci_hba_data *hba) 155 155 { 156 156 return container_of(hba, struct dino_device, hba); 157 + } 158 + 159 + /* Check if PCI device is behind a Card-mode Dino. */ 160 + static int pci_dev_is_behind_card_dino(struct pci_dev *dev) 161 + { 162 + struct dino_device *dino_dev; 163 + 164 + dino_dev = DINO_DEV(parisc_walk_tree(dev->bus->bridge)); 165 + return is_card_dino(&dino_dev->hba.dev->id); 157 166 } 158 167 159 168 /* ··· 446 437 } 447 438 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus ); 448 439 440 + #ifdef CONFIG_TULIP 441 + static void pci_fixup_tulip(struct pci_dev *dev) 442 + { 443 + if (!pci_dev_is_behind_card_dino(dev)) 444 + return; 445 + if (!(pci_resource_flags(dev, 1) & IORESOURCE_MEM)) 446 + return; 447 + pr_warn("%s: HP HSC-PCI Cards with card-mode Dino not yet supported.\n", 448 + pci_name(dev)); 449 + /* Disable this card by zeroing the PCI resources */ 450 + memset(&dev->resource[0], 0, sizeof(dev->resource[0])); 451 + memset(&dev->resource[1], 0, sizeof(dev->resource[1])); 452 + } 453 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_DEC, PCI_ANY_ID, pci_fixup_tulip); 454 + #endif /* CONFIG_TULIP */ 449 455 450 456 static void __init 451 457 dino_bios_init(void) ··· 887 863 #define CUJO_RAVEN_BADPAGE 0x01003000UL 888 864 #define CUJO_FIREHAWK_BADPAGE 0x01607000UL 889 865 890 - static const char *dino_vers[] = { 866 + static const char dino_vers[][4] = { 891 867 "2.0", 892 868 "2.1", 893 869 "3.0", 894 870 "3.1" 895 871 }; 896 872 897 - static const char *cujo_vers[] = { 873 + static const char cujo_vers[][4] = { 898 874 "1.0", 899 875 "2.0" 900 876 };
+5 -5
drivers/parisc/eisa_enumerator.c
··· 93 93 res->start = mem_parent->start + get_24(buf+len+2); 94 94 res->end = res->start + get_16(buf+len+5)*1024; 95 95 res->flags = IORESOURCE_MEM; 96 - printk("memory %lx-%lx ", (unsigned long)res->start, (unsigned long)res->end); 96 + pr_cont("memory %pR ", res); 97 97 result = request_resource(mem_parent, res); 98 98 if (result < 0) { 99 99 printk(KERN_ERR "EISA Enumerator: failed to claim EISA Bus address space!\n"); ··· 123 123 for (i=0;i<HPEE_IRQ_MAX_ENT;i++) { 124 124 c = get_8(buf+len); 125 125 126 - printk("IRQ %d ", c & HPEE_IRQ_CHANNEL_MASK); 126 + pr_cont("IRQ %d ", c & HPEE_IRQ_CHANNEL_MASK); 127 127 if (c & HPEE_IRQ_TRIG_LEVEL) { 128 128 eisa_make_irq_level(c & HPEE_IRQ_CHANNEL_MASK); 129 129 } else { ··· 153 153 154 154 for (i=0;i<HPEE_DMA_MAX_ENT;i++) { 155 155 c = get_8(buf+len); 156 - printk("DMA %d ", c&HPEE_DMA_CHANNEL_MASK); 156 + pr_cont("DMA %d ", c&HPEE_DMA_CHANNEL_MASK); 157 157 /* fixme: maybe initialize the dma channel withthe timing ? */ 158 158 len+=2; 159 159 if (!(c & HPEE_DMA_MORE)) { ··· 183 183 res->start = get_16(buf+len+1); 184 184 res->end = get_16(buf+len+1)+(c&HPEE_PORT_SIZE_MASK)+1; 185 185 res->flags = IORESOURCE_IO; 186 - printk("ioports %lx-%lx ", (unsigned long)res->start, (unsigned long)res->end); 186 + pr_cont("ioports %pR ", res); 187 187 result = request_resource(io_parent, res); 188 188 if (result < 0) { 189 189 printk(KERN_ERR "EISA Enumerator: failed to claim EISA Bus address space!\n"); ··· 401 401 } 402 402 pos = p0 + function_len; 403 403 } 404 - printk("\n"); 404 + pr_cont("\n"); 405 405 if (!id_string_used) { 406 406 kfree(board); 407 407 }
+5 -6
drivers/parisc/hppb.c
··· 61 61 } 62 62 card = card->next; 63 63 } 64 - printk(KERN_INFO "Found GeckoBoa at 0x%llx\n", 65 - (unsigned long long) dev->hpa.start); 66 64 67 65 card->hpa = dev->hpa.start; 68 66 card->mmio_region.name = "HP-PB Bus"; ··· 70 72 card->mmio_region.end = gsc_readl(dev->hpa.start + IO_IO_HIGH) - 1; 71 73 72 74 status = ccio_request_resource(dev, &card->mmio_region); 73 - if(status < 0) { 74 - printk(KERN_ERR "%s: failed to claim HP-PB bus space (%pR)\n", 75 - __FILE__, &card->mmio_region); 76 - } 75 + 76 + pr_info("Found GeckoBoa at %pap, bus space %pR,%s claimed.\n", 77 + &dev->hpa.start, 78 + &card->mmio_region, 79 + (status < 0) ? " not":"" ); 77 80 78 81 return 0; 79 82 }
+23
include/linux/kexec.h
··· 216 216 void **addr, unsigned long *sz); 217 217 #endif /* CONFIG_KEXEC_FILE */ 218 218 219 + #ifdef CONFIG_KEXEC_ELF 220 + struct kexec_elf_info { 221 + /* 222 + * Where the ELF binary contents are kept. 223 + * Memory managed by the user of the struct. 224 + */ 225 + const char *buffer; 226 + 227 + const struct elfhdr *ehdr; 228 + const struct elf_phdr *proghdrs; 229 + }; 230 + 231 + int kexec_build_elf_info(const char *buf, size_t len, struct elfhdr *ehdr, 232 + struct kexec_elf_info *elf_info); 233 + 234 + int kexec_elf_load(struct kimage *image, struct elfhdr *ehdr, 235 + struct kexec_elf_info *elf_info, 236 + struct kexec_buf *kbuf, 237 + unsigned long *lowest_load_addr); 238 + 239 + void kexec_free_elf_info(struct kexec_elf_info *elf_info); 240 + int kexec_elf_probe(const char *buf, unsigned long len); 241 + #endif 219 242 struct kimage { 220 243 kimage_entry_t head; 221 244 kimage_entry_t *entry;
+1
include/uapi/linux/kexec.h
··· 31 31 #define KEXEC_ARCH_DEFAULT ( 0 << 16) 32 32 #define KEXEC_ARCH_386 ( 3 << 16) 33 33 #define KEXEC_ARCH_68K ( 4 << 16) 34 + #define KEXEC_ARCH_PARISC (15 << 16) 34 35 #define KEXEC_ARCH_X86_64 (62 << 16) 35 36 #define KEXEC_ARCH_PPC (20 << 16) 36 37 #define KEXEC_ARCH_PPC64 (21 << 16)
+1
kernel/Makefile
··· 64 64 obj-$(CONFIG_KEXEC_CORE) += kexec_core.o 65 65 obj-$(CONFIG_KEXEC) += kexec.o 66 66 obj-$(CONFIG_KEXEC_FILE) += kexec_file.o 67 + obj-$(CONFIG_KEXEC_ELF) += kexec_elf.o 67 68 obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o 68 69 obj-$(CONFIG_COMPAT) += compat.o 69 70 obj-$(CONFIG_CGROUPS) += cgroup/
+430
kernel/kexec_elf.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Load ELF vmlinux file for the kexec_file_load syscall. 4 + * 5 + * Copyright (C) 2004 Adam Litke (agl@us.ibm.com) 6 + * Copyright (C) 2004 IBM Corp. 7 + * Copyright (C) 2005 R Sharada (sharada@in.ibm.com) 8 + * Copyright (C) 2006 Mohan Kumar M (mohan@in.ibm.com) 9 + * Copyright (C) 2016 IBM Corporation 10 + * 11 + * Based on kexec-tools' kexec-elf-exec.c and kexec-elf-ppc64.c. 12 + * Heavily modified for the kernel by 13 + * Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com>. 14 + */ 15 + 16 + #define pr_fmt(fmt) "kexec_elf: " fmt 17 + 18 + #include <linux/elf.h> 19 + #include <linux/kexec.h> 20 + #include <linux/module.h> 21 + #include <linux/slab.h> 22 + #include <linux/types.h> 23 + 24 + static inline bool elf_is_elf_file(const struct elfhdr *ehdr) 25 + { 26 + return memcmp(ehdr->e_ident, ELFMAG, SELFMAG) == 0; 27 + } 28 + 29 + static uint64_t elf64_to_cpu(const struct elfhdr *ehdr, uint64_t value) 30 + { 31 + if (ehdr->e_ident[EI_DATA] == ELFDATA2LSB) 32 + value = le64_to_cpu(value); 33 + else if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB) 34 + value = be64_to_cpu(value); 35 + 36 + return value; 37 + } 38 + 39 + static uint32_t elf32_to_cpu(const struct elfhdr *ehdr, uint32_t value) 40 + { 41 + if (ehdr->e_ident[EI_DATA] == ELFDATA2LSB) 42 + value = le32_to_cpu(value); 43 + else if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB) 44 + value = be32_to_cpu(value); 45 + 46 + return value; 47 + } 48 + 49 + static uint16_t elf16_to_cpu(const struct elfhdr *ehdr, uint16_t value) 50 + { 51 + if (ehdr->e_ident[EI_DATA] == ELFDATA2LSB) 52 + value = le16_to_cpu(value); 53 + else if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB) 54 + value = be16_to_cpu(value); 55 + 56 + return value; 57 + } 58 + 59 + /** 60 + * elf_is_ehdr_sane - check that it is safe to use the ELF header 61 + * @buf_len: size of the buffer in which the ELF file is loaded. 62 + */ 63 + static bool elf_is_ehdr_sane(const struct elfhdr *ehdr, size_t buf_len) 64 + { 65 + if (ehdr->e_phnum > 0 && ehdr->e_phentsize != sizeof(struct elf_phdr)) { 66 + pr_debug("Bad program header size.\n"); 67 + return false; 68 + } else if (ehdr->e_shnum > 0 && 69 + ehdr->e_shentsize != sizeof(struct elf_shdr)) { 70 + pr_debug("Bad section header size.\n"); 71 + return false; 72 + } else if (ehdr->e_ident[EI_VERSION] != EV_CURRENT || 73 + ehdr->e_version != EV_CURRENT) { 74 + pr_debug("Unknown ELF version.\n"); 75 + return false; 76 + } 77 + 78 + if (ehdr->e_phoff > 0 && ehdr->e_phnum > 0) { 79 + size_t phdr_size; 80 + 81 + /* 82 + * e_phnum is at most 65535 so calculating the size of the 83 + * program header cannot overflow. 84 + */ 85 + phdr_size = sizeof(struct elf_phdr) * ehdr->e_phnum; 86 + 87 + /* Sanity check the program header table location. */ 88 + if (ehdr->e_phoff + phdr_size < ehdr->e_phoff) { 89 + pr_debug("Program headers at invalid location.\n"); 90 + return false; 91 + } else if (ehdr->e_phoff + phdr_size > buf_len) { 92 + pr_debug("Program headers truncated.\n"); 93 + return false; 94 + } 95 + } 96 + 97 + if (ehdr->e_shoff > 0 && ehdr->e_shnum > 0) { 98 + size_t shdr_size; 99 + 100 + /* 101 + * e_shnum is at most 65536 so calculating 102 + * the size of the section header cannot overflow. 103 + */ 104 + shdr_size = sizeof(struct elf_shdr) * ehdr->e_shnum; 105 + 106 + /* Sanity check the section header table location. */ 107 + if (ehdr->e_shoff + shdr_size < ehdr->e_shoff) { 108 + pr_debug("Section headers at invalid location.\n"); 109 + return false; 110 + } else if (ehdr->e_shoff + shdr_size > buf_len) { 111 + pr_debug("Section headers truncated.\n"); 112 + return false; 113 + } 114 + } 115 + 116 + return true; 117 + } 118 + 119 + static int elf_read_ehdr(const char *buf, size_t len, struct elfhdr *ehdr) 120 + { 121 + struct elfhdr *buf_ehdr; 122 + 123 + if (len < sizeof(*buf_ehdr)) { 124 + pr_debug("Buffer is too small to hold ELF header.\n"); 125 + return -ENOEXEC; 126 + } 127 + 128 + memset(ehdr, 0, sizeof(*ehdr)); 129 + memcpy(ehdr->e_ident, buf, sizeof(ehdr->e_ident)); 130 + if (!elf_is_elf_file(ehdr)) { 131 + pr_debug("No ELF header magic.\n"); 132 + return -ENOEXEC; 133 + } 134 + 135 + if (ehdr->e_ident[EI_CLASS] != ELF_CLASS) { 136 + pr_debug("Not a supported ELF class.\n"); 137 + return -ENOEXEC; 138 + } else if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB && 139 + ehdr->e_ident[EI_DATA] != ELFDATA2MSB) { 140 + pr_debug("Not a supported ELF data format.\n"); 141 + return -ENOEXEC; 142 + } 143 + 144 + buf_ehdr = (struct elfhdr *) buf; 145 + if (elf16_to_cpu(ehdr, buf_ehdr->e_ehsize) != sizeof(*buf_ehdr)) { 146 + pr_debug("Bad ELF header size.\n"); 147 + return -ENOEXEC; 148 + } 149 + 150 + ehdr->e_type = elf16_to_cpu(ehdr, buf_ehdr->e_type); 151 + ehdr->e_machine = elf16_to_cpu(ehdr, buf_ehdr->e_machine); 152 + ehdr->e_version = elf32_to_cpu(ehdr, buf_ehdr->e_version); 153 + ehdr->e_flags = elf32_to_cpu(ehdr, buf_ehdr->e_flags); 154 + ehdr->e_phentsize = elf16_to_cpu(ehdr, buf_ehdr->e_phentsize); 155 + ehdr->e_phnum = elf16_to_cpu(ehdr, buf_ehdr->e_phnum); 156 + ehdr->e_shentsize = elf16_to_cpu(ehdr, buf_ehdr->e_shentsize); 157 + ehdr->e_shnum = elf16_to_cpu(ehdr, buf_ehdr->e_shnum); 158 + ehdr->e_shstrndx = elf16_to_cpu(ehdr, buf_ehdr->e_shstrndx); 159 + 160 + switch (ehdr->e_ident[EI_CLASS]) { 161 + case ELFCLASS64: 162 + ehdr->e_entry = elf64_to_cpu(ehdr, buf_ehdr->e_entry); 163 + ehdr->e_phoff = elf64_to_cpu(ehdr, buf_ehdr->e_phoff); 164 + ehdr->e_shoff = elf64_to_cpu(ehdr, buf_ehdr->e_shoff); 165 + break; 166 + 167 + case ELFCLASS32: 168 + ehdr->e_entry = elf32_to_cpu(ehdr, buf_ehdr->e_entry); 169 + ehdr->e_phoff = elf32_to_cpu(ehdr, buf_ehdr->e_phoff); 170 + ehdr->e_shoff = elf32_to_cpu(ehdr, buf_ehdr->e_shoff); 171 + break; 172 + 173 + default: 174 + pr_debug("Unknown ELF class.\n"); 175 + return -EINVAL; 176 + } 177 + 178 + return elf_is_ehdr_sane(ehdr, len) ? 0 : -ENOEXEC; 179 + } 180 + 181 + /** 182 + * elf_is_phdr_sane - check that it is safe to use the program header 183 + * @buf_len: size of the buffer in which the ELF file is loaded. 184 + */ 185 + static bool elf_is_phdr_sane(const struct elf_phdr *phdr, size_t buf_len) 186 + { 187 + 188 + if (phdr->p_offset + phdr->p_filesz < phdr->p_offset) { 189 + pr_debug("ELF segment location wraps around.\n"); 190 + return false; 191 + } else if (phdr->p_offset + phdr->p_filesz > buf_len) { 192 + pr_debug("ELF segment not in file.\n"); 193 + return false; 194 + } else if (phdr->p_paddr + phdr->p_memsz < phdr->p_paddr) { 195 + pr_debug("ELF segment address wraps around.\n"); 196 + return false; 197 + } 198 + 199 + return true; 200 + } 201 + 202 + static int elf_read_phdr(const char *buf, size_t len, 203 + struct kexec_elf_info *elf_info, 204 + int idx) 205 + { 206 + /* Override the const in proghdrs, we are the ones doing the loading. */ 207 + struct elf_phdr *phdr = (struct elf_phdr *) &elf_info->proghdrs[idx]; 208 + const struct elfhdr *ehdr = elf_info->ehdr; 209 + const char *pbuf; 210 + struct elf_phdr *buf_phdr; 211 + 212 + pbuf = buf + elf_info->ehdr->e_phoff + (idx * sizeof(*buf_phdr)); 213 + buf_phdr = (struct elf_phdr *) pbuf; 214 + 215 + phdr->p_type = elf32_to_cpu(elf_info->ehdr, buf_phdr->p_type); 216 + phdr->p_flags = elf32_to_cpu(elf_info->ehdr, buf_phdr->p_flags); 217 + 218 + switch (ehdr->e_ident[EI_CLASS]) { 219 + case ELFCLASS64: 220 + phdr->p_offset = elf64_to_cpu(ehdr, buf_phdr->p_offset); 221 + phdr->p_paddr = elf64_to_cpu(ehdr, buf_phdr->p_paddr); 222 + phdr->p_vaddr = elf64_to_cpu(ehdr, buf_phdr->p_vaddr); 223 + phdr->p_filesz = elf64_to_cpu(ehdr, buf_phdr->p_filesz); 224 + phdr->p_memsz = elf64_to_cpu(ehdr, buf_phdr->p_memsz); 225 + phdr->p_align = elf64_to_cpu(ehdr, buf_phdr->p_align); 226 + break; 227 + 228 + case ELFCLASS32: 229 + phdr->p_offset = elf32_to_cpu(ehdr, buf_phdr->p_offset); 230 + phdr->p_paddr = elf32_to_cpu(ehdr, buf_phdr->p_paddr); 231 + phdr->p_vaddr = elf32_to_cpu(ehdr, buf_phdr->p_vaddr); 232 + phdr->p_filesz = elf32_to_cpu(ehdr, buf_phdr->p_filesz); 233 + phdr->p_memsz = elf32_to_cpu(ehdr, buf_phdr->p_memsz); 234 + phdr->p_align = elf32_to_cpu(ehdr, buf_phdr->p_align); 235 + break; 236 + 237 + default: 238 + pr_debug("Unknown ELF class.\n"); 239 + return -EINVAL; 240 + } 241 + 242 + return elf_is_phdr_sane(phdr, len) ? 0 : -ENOEXEC; 243 + } 244 + 245 + /** 246 + * elf_read_phdrs - read the program headers from the buffer 247 + * 248 + * This function assumes that the program header table was checked for sanity. 249 + * Use elf_is_ehdr_sane() if it wasn't. 250 + */ 251 + static int elf_read_phdrs(const char *buf, size_t len, 252 + struct kexec_elf_info *elf_info) 253 + { 254 + size_t phdr_size, i; 255 + const struct elfhdr *ehdr = elf_info->ehdr; 256 + 257 + /* 258 + * e_phnum is at most 65535 so calculating the size of the 259 + * program header cannot overflow. 260 + */ 261 + phdr_size = sizeof(struct elf_phdr) * ehdr->e_phnum; 262 + 263 + elf_info->proghdrs = kzalloc(phdr_size, GFP_KERNEL); 264 + if (!elf_info->proghdrs) 265 + return -ENOMEM; 266 + 267 + for (i = 0; i < ehdr->e_phnum; i++) { 268 + int ret; 269 + 270 + ret = elf_read_phdr(buf, len, elf_info, i); 271 + if (ret) { 272 + kfree(elf_info->proghdrs); 273 + elf_info->proghdrs = NULL; 274 + return ret; 275 + } 276 + } 277 + 278 + return 0; 279 + } 280 + 281 + /** 282 + * elf_read_from_buffer - read ELF file and sets up ELF header and ELF info 283 + * @buf: Buffer to read ELF file from. 284 + * @len: Size of @buf. 285 + * @ehdr: Pointer to existing struct which will be populated. 286 + * @elf_info: Pointer to existing struct which will be populated. 287 + * 288 + * This function allows reading ELF files with different byte order than 289 + * the kernel, byte-swapping the fields as needed. 290 + * 291 + * Return: 292 + * On success returns 0, and the caller should call 293 + * kexec_free_elf_info(elf_info) to free the memory allocated for the section 294 + * and program headers. 295 + */ 296 + static int elf_read_from_buffer(const char *buf, size_t len, 297 + struct elfhdr *ehdr, 298 + struct kexec_elf_info *elf_info) 299 + { 300 + int ret; 301 + 302 + ret = elf_read_ehdr(buf, len, ehdr); 303 + if (ret) 304 + return ret; 305 + 306 + elf_info->buffer = buf; 307 + elf_info->ehdr = ehdr; 308 + if (ehdr->e_phoff > 0 && ehdr->e_phnum > 0) { 309 + ret = elf_read_phdrs(buf, len, elf_info); 310 + if (ret) 311 + return ret; 312 + } 313 + return 0; 314 + } 315 + 316 + /** 317 + * kexec_free_elf_info - free memory allocated by elf_read_from_buffer 318 + */ 319 + void kexec_free_elf_info(struct kexec_elf_info *elf_info) 320 + { 321 + kfree(elf_info->proghdrs); 322 + memset(elf_info, 0, sizeof(*elf_info)); 323 + } 324 + /** 325 + * kexec_build_elf_info - read ELF executable and check that we can use it 326 + */ 327 + int kexec_build_elf_info(const char *buf, size_t len, struct elfhdr *ehdr, 328 + struct kexec_elf_info *elf_info) 329 + { 330 + int i; 331 + int ret; 332 + 333 + ret = elf_read_from_buffer(buf, len, ehdr, elf_info); 334 + if (ret) 335 + return ret; 336 + 337 + /* Big endian vmlinux has type ET_DYN. */ 338 + if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) { 339 + pr_err("Not an ELF executable.\n"); 340 + goto error; 341 + } else if (!elf_info->proghdrs) { 342 + pr_err("No ELF program header.\n"); 343 + goto error; 344 + } 345 + 346 + for (i = 0; i < ehdr->e_phnum; i++) { 347 + /* 348 + * Kexec does not support loading interpreters. 349 + * In addition this check keeps us from attempting 350 + * to kexec ordinay executables. 351 + */ 352 + if (elf_info->proghdrs[i].p_type == PT_INTERP) { 353 + pr_err("Requires an ELF interpreter.\n"); 354 + goto error; 355 + } 356 + } 357 + 358 + return 0; 359 + error: 360 + kexec_free_elf_info(elf_info); 361 + return -ENOEXEC; 362 + } 363 + 364 + 365 + int kexec_elf_probe(const char *buf, unsigned long len) 366 + { 367 + struct elfhdr ehdr; 368 + struct kexec_elf_info elf_info; 369 + int ret; 370 + 371 + ret = kexec_build_elf_info(buf, len, &ehdr, &elf_info); 372 + if (ret) 373 + return ret; 374 + 375 + kexec_free_elf_info(&elf_info); 376 + 377 + return elf_check_arch(&ehdr) ? 0 : -ENOEXEC; 378 + } 379 + 380 + /** 381 + * kexec_elf_load - load ELF executable image 382 + * @lowest_load_addr: On return, will be the address where the first PT_LOAD 383 + * section will be loaded in memory. 384 + * 385 + * Return: 386 + * 0 on success, negative value on failure. 387 + */ 388 + int kexec_elf_load(struct kimage *image, struct elfhdr *ehdr, 389 + struct kexec_elf_info *elf_info, 390 + struct kexec_buf *kbuf, 391 + unsigned long *lowest_load_addr) 392 + { 393 + unsigned long lowest_addr = UINT_MAX; 394 + int ret; 395 + size_t i; 396 + 397 + /* Read in the PT_LOAD segments. */ 398 + for (i = 0; i < ehdr->e_phnum; i++) { 399 + unsigned long load_addr; 400 + size_t size; 401 + const struct elf_phdr *phdr; 402 + 403 + phdr = &elf_info->proghdrs[i]; 404 + if (phdr->p_type != PT_LOAD) 405 + continue; 406 + 407 + size = phdr->p_filesz; 408 + if (size > phdr->p_memsz) 409 + size = phdr->p_memsz; 410 + 411 + kbuf->buffer = (void *) elf_info->buffer + phdr->p_offset; 412 + kbuf->bufsz = size; 413 + kbuf->memsz = phdr->p_memsz; 414 + kbuf->buf_align = phdr->p_align; 415 + kbuf->buf_min = phdr->p_paddr; 416 + kbuf->mem = KEXEC_BUF_MEM_UNKNOWN; 417 + ret = kexec_add_buffer(kbuf); 418 + if (ret) 419 + goto out; 420 + load_addr = kbuf->mem; 421 + 422 + if (load_addr < lowest_addr) 423 + lowest_addr = load_addr; 424 + } 425 + 426 + *lowest_load_addr = lowest_addr; 427 + ret = 0; 428 + out: 429 + return ret; 430 + }