[PATCH] kexec code cleanup

o Following patch provides purely cosmetic changes and corrects CodingStyle
guide lines related certain issues like below in kexec related files

o braces for one line "if" statements, "for" loops,
o more than 80 column wide lines,
o No space after "while", "for" and "switch" key words

o Changes:
o take-2: Removed the extra tab before "case" key words.
o take-3: Put operator at the end of line and space before "*/"

Signed-off-by: Maneesh Soni <maneesh@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Maneesh Soni and committed by
Linus Torvalds
72414d3f 4f339ecb

+243 -211
+13 -10
arch/i386/kernel/crash.c
··· 31 /* This keeps a track of which one is crashing cpu. */ 32 static int crashing_cpu; 33 34 - static u32 *append_elf_note(u32 *buf, 35 - char *name, unsigned type, void *data, size_t data_len) 36 { 37 struct elf_note note; 38 note.n_namesz = strlen(name) + 1; 39 note.n_descsz = data_len; 40 note.n_type = type; ··· 45 buf += (note.n_namesz + 3)/4; 46 memcpy(buf, data, note.n_descsz); 47 buf += (note.n_descsz + 3)/4; 48 return buf; 49 } 50 51 static void final_note(u32 *buf) 52 { 53 struct elf_note note; 54 note.n_namesz = 0; 55 note.n_descsz = 0; 56 note.n_type = 0; 57 memcpy(buf, &note, sizeof(note)); 58 } 59 60 - 61 static void crash_save_this_cpu(struct pt_regs *regs, int cpu) 62 { 63 struct elf_prstatus prstatus; 64 u32 *buf; 65 - if ((cpu < 0) || (cpu >= NR_CPUS)) { 66 return; 67 - } 68 /* Using ELF notes here is opportunistic. 69 * I need a well defined structure format 70 * for the data I pass, and I need tags ··· 78 memset(&prstatus, 0, sizeof(prstatus)); 79 prstatus.pr_pid = current->pid; 80 elf_core_copy_regs(&prstatus.pr_reg, regs); 81 - buf = append_elf_note(buf, "CORE", NT_PRSTATUS, 82 - &prstatus, sizeof(prstatus)); 83 - 84 final_note(buf); 85 } 86 ··· 121 { 122 struct pt_regs regs; 123 int cpu; 124 - cpu = smp_processor_id(); 125 126 if (saved_regs) 127 crash_setup_regs(&regs, saved_regs); 128 else ··· 155 /* Assume hlt works */ 156 __asm__("hlt"); 157 for(;;); 158 return 1; 159 } 160 ··· 172 static void nmi_shootdown_cpus(void) 173 { 174 unsigned long msecs; 175 - atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); 176 177 /* Would it be better to replace the trap vector here? */ 178 set_nmi_callback(crash_nmi_callback); 179 /* Ensure the new callback function is set before sending
··· 31 /* This keeps a track of which one is crashing cpu. */ 32 static int crashing_cpu; 33 34 + static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, 35 + size_t data_len) 36 { 37 struct elf_note note; 38 + 39 note.n_namesz = strlen(name) + 1; 40 note.n_descsz = data_len; 41 note.n_type = type; ··· 44 buf += (note.n_namesz + 3)/4; 45 memcpy(buf, data, note.n_descsz); 46 buf += (note.n_descsz + 3)/4; 47 + 48 return buf; 49 } 50 51 static void final_note(u32 *buf) 52 { 53 struct elf_note note; 54 + 55 note.n_namesz = 0; 56 note.n_descsz = 0; 57 note.n_type = 0; 58 memcpy(buf, &note, sizeof(note)); 59 } 60 61 static void crash_save_this_cpu(struct pt_regs *regs, int cpu) 62 { 63 struct elf_prstatus prstatus; 64 u32 *buf; 65 + 66 + if ((cpu < 0) || (cpu >= NR_CPUS)) 67 return; 68 + 69 /* Using ELF notes here is opportunistic. 70 * I need a well defined structure format 71 * for the data I pass, and I need tags ··· 75 memset(&prstatus, 0, sizeof(prstatus)); 76 prstatus.pr_pid = current->pid; 77 elf_core_copy_regs(&prstatus.pr_reg, regs); 78 + buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus, 79 + sizeof(prstatus)); 80 final_note(buf); 81 } 82 ··· 119 { 120 struct pt_regs regs; 121 int cpu; 122 123 + cpu = smp_processor_id(); 124 if (saved_regs) 125 crash_setup_regs(&regs, saved_regs); 126 else ··· 153 /* Assume hlt works */ 154 __asm__("hlt"); 155 for(;;); 156 + 157 return 1; 158 } 159 ··· 169 static void nmi_shootdown_cpus(void) 170 { 171 unsigned long msecs; 172 173 + atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); 174 /* Would it be better to replace the trap vector here? */ 175 set_nmi_callback(crash_nmi_callback); 176 /* Ensure the new callback function is set before sending
+11 -5
arch/i386/kernel/machine_kexec.c
··· 80 /* Identity map the page table entry */ 81 pgtable_level1[level1_index] = address | L0_ATTR; 82 pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR; 83 - set_64bit(&pgtable_level3[level3_index], __pa(pgtable_level2) | L2_ATTR); 84 85 /* Flush the tlb so the new mapping takes effect. 86 * Global tlb entries are not flushed but that is not an issue. ··· 140 } 141 142 typedef asmlinkage NORET_TYPE void (*relocate_new_kernel_t)( 143 - unsigned long indirection_page, unsigned long reboot_code_buffer, 144 - unsigned long start_address, unsigned int has_pae) ATTRIB_NORET; 145 146 const extern unsigned char relocate_new_kernel[]; 147 extern void relocate_new_kernel_end(void); ··· 183 { 184 unsigned long page_list; 185 unsigned long reboot_code_buffer; 186 relocate_new_kernel_t rnk; 187 188 /* Interrupts aren't acceptable while we reboot */ 189 local_irq_disable(); 190 191 /* Compute some offsets */ 192 - reboot_code_buffer = page_to_pfn(image->control_code_page) << PAGE_SHIFT; 193 page_list = image->head; 194 195 /* Set up an identity mapping for the reboot_code_buffer */ 196 identity_map_page(reboot_code_buffer); 197 198 /* copy it out */ 199 - memcpy((void *)reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); 200 201 /* The segment registers are funny things, they are 202 * automatically loaded from a table, in memory wherever you
··· 80 /* Identity map the page table entry */ 81 pgtable_level1[level1_index] = address | L0_ATTR; 82 pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR; 83 + set_64bit(&pgtable_level3[level3_index], 84 + __pa(pgtable_level2) | L2_ATTR); 85 86 /* Flush the tlb so the new mapping takes effect. 87 * Global tlb entries are not flushed but that is not an issue. ··· 139 } 140 141 typedef asmlinkage NORET_TYPE void (*relocate_new_kernel_t)( 142 + unsigned long indirection_page, 143 + unsigned long reboot_code_buffer, 144 + unsigned long start_address, 145 + unsigned int has_pae) ATTRIB_NORET; 146 147 const extern unsigned char relocate_new_kernel[]; 148 extern void relocate_new_kernel_end(void); ··· 180 { 181 unsigned long page_list; 182 unsigned long reboot_code_buffer; 183 + 184 relocate_new_kernel_t rnk; 185 186 /* Interrupts aren't acceptable while we reboot */ 187 local_irq_disable(); 188 189 /* Compute some offsets */ 190 + reboot_code_buffer = page_to_pfn(image->control_code_page) 191 + << PAGE_SHIFT; 192 page_list = image->head; 193 194 /* Set up an identity mapping for the reboot_code_buffer */ 195 identity_map_page(reboot_code_buffer); 196 197 /* copy it out */ 198 + memcpy((void *)reboot_code_buffer, relocate_new_kernel, 199 + relocate_new_kernel_size); 200 201 /* The segment registers are funny things, they are 202 * automatically loaded from a table, in memory wherever you
+13 -17
arch/ppc/kernel/machine_kexec.c
··· 21 #include <asm/machdep.h> 22 23 typedef NORET_TYPE void (*relocate_new_kernel_t)( 24 - unsigned long indirection_page, unsigned long reboot_code_buffer, 25 - unsigned long start_address) ATTRIB_NORET; 26 27 const extern unsigned char relocate_new_kernel[]; 28 const extern unsigned int relocate_new_kernel_size; 29 30 void machine_shutdown(void) 31 { 32 - if (ppc_md.machine_shutdown) { 33 ppc_md.machine_shutdown(); 34 - } 35 } 36 37 void machine_crash_shutdown(struct pt_regs *regs) 38 { 39 - if (ppc_md.machine_crash_shutdown) { 40 ppc_md.machine_crash_shutdown(); 41 - } 42 } 43 44 /* ··· 47 */ 48 int machine_kexec_prepare(struct kimage *image) 49 { 50 - if (ppc_md.machine_kexec_prepare) { 51 return ppc_md.machine_kexec_prepare(image); 52 - } 53 /* 54 * Fail if platform doesn't provide its own machine_kexec_prepare 55 * implementation. ··· 58 59 void machine_kexec_cleanup(struct kimage *image) 60 { 61 - if (ppc_md.machine_kexec_cleanup) { 62 ppc_md.machine_kexec_cleanup(image); 63 - } 64 } 65 66 /* ··· 68 */ 69 NORET_TYPE void machine_kexec(struct kimage *image) 70 { 71 - if (ppc_md.machine_kexec) { 72 ppc_md.machine_kexec(image); 73 - } else { 74 /* 75 * Fall back to normal restart if platform doesn't provide 76 * its own kexec function, and user insist to kexec... ··· 79 } 80 for(;;); 81 } 82 - 83 84 /* 85 * This is a generic machine_kexec function suitable at least for ··· 100 101 /* we need both effective and real address here */ 102 reboot_code_buffer = 103 - (unsigned long)page_address(image->control_code_page); 104 reboot_code_buffer_phys = virt_to_phys((void *)reboot_code_buffer); 105 106 /* copy our kernel relocation code to the control code page */ 107 - memcpy((void *)reboot_code_buffer, 108 - relocate_new_kernel, relocate_new_kernel_size); 109 110 flush_icache_range(reboot_code_buffer, 111 - reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE); 112 printk(KERN_INFO "Bye!\n"); 113 114 /* now call it */
··· 21 #include <asm/machdep.h> 22 23 typedef NORET_TYPE void (*relocate_new_kernel_t)( 24 + unsigned long indirection_page, 25 + unsigned long reboot_code_buffer, 26 + unsigned long start_address) ATTRIB_NORET; 27 28 const extern unsigned char relocate_new_kernel[]; 29 const extern unsigned int relocate_new_kernel_size; 30 31 void machine_shutdown(void) 32 { 33 + if (ppc_md.machine_shutdown) 34 ppc_md.machine_shutdown(); 35 } 36 37 void machine_crash_shutdown(struct pt_regs *regs) 38 { 39 + if (ppc_md.machine_crash_shutdown) 40 ppc_md.machine_crash_shutdown(); 41 } 42 43 /* ··· 48 */ 49 int machine_kexec_prepare(struct kimage *image) 50 { 51 + if (ppc_md.machine_kexec_prepare) 52 return ppc_md.machine_kexec_prepare(image); 53 /* 54 * Fail if platform doesn't provide its own machine_kexec_prepare 55 * implementation. ··· 60 61 void machine_kexec_cleanup(struct kimage *image) 62 { 63 + if (ppc_md.machine_kexec_cleanup) 64 ppc_md.machine_kexec_cleanup(image); 65 } 66 67 /* ··· 71 */ 72 NORET_TYPE void machine_kexec(struct kimage *image) 73 { 74 + if (ppc_md.machine_kexec) 75 ppc_md.machine_kexec(image); 76 + else { 77 /* 78 * Fall back to normal restart if platform doesn't provide 79 * its own kexec function, and user insist to kexec... ··· 82 } 83 for(;;); 84 } 85 86 /* 87 * This is a generic machine_kexec function suitable at least for ··· 104 105 /* we need both effective and real address here */ 106 reboot_code_buffer = 107 + (unsigned long)page_address(image->control_code_page); 108 reboot_code_buffer_phys = virt_to_phys((void *)reboot_code_buffer); 109 110 /* copy our kernel relocation code to the control code page */ 111 + memcpy((void *)reboot_code_buffer, relocate_new_kernel, 112 + relocate_new_kernel_size); 113 114 flush_icache_range(reboot_code_buffer, 115 + reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE); 116 printk(KERN_INFO "Bye!\n"); 117 118 /* now call it */
+5 -4
arch/ppc64/kernel/machine_kexec.c
··· 58 * handle the virtual mode, we must make sure no destination 59 * overlaps kernel static data or bss. 60 */ 61 - for(i = 0; i < image->nr_segments; i++) 62 if (image->segment[i].mem < __pa(_end)) 63 return -ETXTBSY; 64 ··· 76 low = __pa(htab_address); 77 high = low + (htab_hash_mask + 1) * HASH_GROUP_SIZE; 78 79 - for(i = 0; i < image->nr_segments; i++) { 80 begin = image->segment[i].mem; 81 end = begin + image->segment[i].memsz; 82 ··· 98 low = *basep; 99 high = low + (*sizep); 100 101 - for(i = 0; i < image->nr_segments; i++) { 102 begin = image->segment[i].mem; 103 end = begin + image->segment[i].memsz; 104 ··· 274 275 /* Our assembly helper, in kexec_stub.S */ 276 extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start, 277 - void *image, void *control, void (*clear_all)(void)) ATTRIB_NORET; 278 279 /* too late to fail here */ 280 void machine_kexec(struct kimage *image)
··· 58 * handle the virtual mode, we must make sure no destination 59 * overlaps kernel static data or bss. 60 */ 61 + for (i = 0; i < image->nr_segments; i++) 62 if (image->segment[i].mem < __pa(_end)) 63 return -ETXTBSY; 64 ··· 76 low = __pa(htab_address); 77 high = low + (htab_hash_mask + 1) * HASH_GROUP_SIZE; 78 79 + for (i = 0; i < image->nr_segments; i++) { 80 begin = image->segment[i].mem; 81 end = begin + image->segment[i].memsz; 82 ··· 98 low = *basep; 99 high = low + (*sizep); 100 101 + for (i = 0; i < image->nr_segments; i++) { 102 begin = image->segment[i].mem; 103 end = begin + image->segment[i].memsz; 104 ··· 274 275 /* Our assembly helper, in kexec_stub.S */ 276 extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start, 277 + void *image, void *control, 278 + void (*clear_all)(void)) ATTRIB_NORET; 279 280 /* too late to fail here */ 281 void machine_kexec(struct kimage *image)
+2 -2
arch/s390/kernel/machine_kexec.c
··· 67 ctl_clear_bit(0,28); 68 69 on_each_cpu(kexec_halt_all_cpus, image, 0, 0); 70 - for(;;); 71 } 72 73 static void ··· 85 for_each_online_cpu(cpu) { 86 if (cpu == smp_processor_id()) 87 continue; 88 - while(!smp_cpu_not_running(cpu)) 89 cpu_relax(); 90 } 91
··· 67 ctl_clear_bit(0,28); 68 69 on_each_cpu(kexec_halt_all_cpus, image, 0, 0); 70 + for (;;); 71 } 72 73 static void ··· 85 for_each_online_cpu(cpu) { 86 if (cpu == smp_processor_id()) 87 continue; 88 + while (!smp_cpu_not_running(cpu)) 89 cpu_relax(); 90 } 91
+27 -22
arch/x86_64/kernel/machine_kexec.c
··· 32 #define L2_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 33 #define L3_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 34 35 - static void init_level2_page( 36 - u64 *level2p, unsigned long addr) 37 { 38 unsigned long end_addr; 39 addr &= PAGE_MASK; 40 end_addr = addr + LEVEL2_SIZE; 41 - while(addr < end_addr) { 42 *(level2p++) = addr | L1_ATTR; 43 addr += LEVEL1_SIZE; 44 } 45 } 46 47 - static int init_level3_page(struct kimage *image, 48 - u64 *level3p, unsigned long addr, unsigned long last_addr) 49 { 50 unsigned long end_addr; 51 int result; 52 result = 0; 53 addr &= PAGE_MASK; 54 end_addr = addr + LEVEL3_SIZE; 55 - while((addr < last_addr) && (addr < end_addr)) { 56 struct page *page; 57 u64 *level2p; 58 page = kimage_alloc_control_pages(image, 0); 59 if (!page) { 60 result = -ENOMEM; ··· 68 addr += LEVEL2_SIZE; 69 } 70 /* clear the unused entries */ 71 - while(addr < end_addr) { 72 *(level3p++) = 0; 73 addr += LEVEL2_SIZE; 74 } ··· 77 } 78 79 80 - static int init_level4_page(struct kimage *image, 81 - u64 *level4p, unsigned long addr, unsigned long last_addr) 82 { 83 unsigned long end_addr; 84 int result; 85 result = 0; 86 addr &= PAGE_MASK; 87 end_addr = addr + LEVEL4_SIZE; 88 - while((addr < last_addr) && (addr < end_addr)) { 89 struct page *page; 90 u64 *level3p; 91 page = kimage_alloc_control_pages(image, 0); 92 if (!page) { 93 result = -ENOMEM; ··· 104 addr += LEVEL3_SIZE; 105 } 106 /* clear the unused entries */ 107 - while(addr < end_addr) { 108 *(level4p++) = 0; 109 addr += LEVEL3_SIZE; 110 } 111 - out: 112 return result; 113 } 114 ··· 117 { 118 u64 *level4p; 119 level4p = (u64 *)__va(start_pgtable); 120 - return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT); 121 } 122 123 static void set_idt(void *newidt, u16 limit) ··· 163 #undef __STR 164 } 165 166 - typedef NORET_TYPE void (*relocate_new_kernel_t)( 167 - unsigned long indirection_page, unsigned long control_code_buffer, 168 - unsigned long start_address, unsigned long pgtable) ATTRIB_NORET; 169 170 const extern unsigned char relocate_new_kernel[]; 171 const extern unsigned long relocate_new_kernel_size; ··· 177 int result; 178 179 /* Calculate the offsets */ 180 - start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT; 181 control_code_buffer = start_pgtable + 4096UL; 182 183 /* Setup the identity mapped 64bit page table */ 184 result = init_pgtable(image, start_pgtable); 185 - if (result) { 186 return result; 187 - } 188 189 /* Place the code in the reboot code buffer */ 190 - memcpy(__va(control_code_buffer), relocate_new_kernel, relocate_new_kernel_size); 191 192 return 0; 193 } ··· 212 local_irq_disable(); 213 214 /* Calculate the offsets */ 215 - page_list = image->head; 216 - start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT; 217 control_code_buffer = start_pgtable + 4096UL; 218 219 /* Set the low half of the page table to my identity mapped
··· 32 #define L2_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 33 #define L3_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 34 35 + static void init_level2_page(u64 *level2p, unsigned long addr) 36 { 37 unsigned long end_addr; 38 + 39 addr &= PAGE_MASK; 40 end_addr = addr + LEVEL2_SIZE; 41 + while (addr < end_addr) { 42 *(level2p++) = addr | L1_ATTR; 43 addr += LEVEL1_SIZE; 44 } 45 } 46 47 + static int init_level3_page(struct kimage *image, u64 *level3p, 48 + unsigned long addr, unsigned long last_addr) 49 { 50 unsigned long end_addr; 51 int result; 52 + 53 result = 0; 54 addr &= PAGE_MASK; 55 end_addr = addr + LEVEL3_SIZE; 56 + while ((addr < last_addr) && (addr < end_addr)) { 57 struct page *page; 58 u64 *level2p; 59 + 60 page = kimage_alloc_control_pages(image, 0); 61 if (!page) { 62 result = -ENOMEM; ··· 66 addr += LEVEL2_SIZE; 67 } 68 /* clear the unused entries */ 69 + while (addr < end_addr) { 70 *(level3p++) = 0; 71 addr += LEVEL2_SIZE; 72 } ··· 75 } 76 77 78 + static int init_level4_page(struct kimage *image, u64 *level4p, 79 + unsigned long addr, unsigned long last_addr) 80 { 81 unsigned long end_addr; 82 int result; 83 + 84 result = 0; 85 addr &= PAGE_MASK; 86 end_addr = addr + LEVEL4_SIZE; 87 + while ((addr < last_addr) && (addr < end_addr)) { 88 struct page *page; 89 u64 *level3p; 90 + 91 page = kimage_alloc_control_pages(image, 0); 92 if (!page) { 93 result = -ENOMEM; ··· 100 addr += LEVEL3_SIZE; 101 } 102 /* clear the unused entries */ 103 + while (addr < end_addr) { 104 *(level4p++) = 0; 105 addr += LEVEL3_SIZE; 106 } 107 + out: 108 return result; 109 } 110 ··· 113 { 114 u64 *level4p; 115 level4p = (u64 *)__va(start_pgtable); 116 + return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT); 117 } 118 119 static void set_idt(void *newidt, u16 limit) ··· 159 #undef __STR 160 } 161 162 + typedef NORET_TYPE void (*relocate_new_kernel_t)(unsigned long indirection_page, 163 + unsigned long control_code_buffer, 164 + unsigned long start_address, 165 + unsigned long pgtable) ATTRIB_NORET; 166 167 const extern unsigned char relocate_new_kernel[]; 168 const extern unsigned long relocate_new_kernel_size; ··· 172 int result; 173 174 /* Calculate the offsets */ 175 + start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT; 176 control_code_buffer = start_pgtable + 4096UL; 177 178 /* Setup the identity mapped 64bit page table */ 179 result = init_pgtable(image, start_pgtable); 180 + if (result) 181 return result; 182 183 /* Place the code in the reboot code buffer */ 184 + memcpy(__va(control_code_buffer), relocate_new_kernel, 185 + relocate_new_kernel_size); 186 187 return 0; 188 } ··· 207 local_irq_disable(); 208 209 /* Calculate the offsets */ 210 + page_list = image->head; 211 + start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT; 212 control_code_buffer = start_pgtable + 4096UL; 213 214 /* Set the low half of the page table to my identity mapped
+1 -1
drivers/char/mem.c
··· 287 size_t read = 0, csize; 288 int rc = 0; 289 290 - while(count) { 291 pfn = *ppos / PAGE_SIZE; 292 if (pfn > saved_max_pfn) 293 return read;
··· 287 size_t read = 0, csize; 288 int rc = 0; 289 290 + while (count) { 291 pfn = *ppos / PAGE_SIZE; 292 if (pfn > saved_max_pfn) 293 return read;
+8 -5
include/linux/kexec.h
··· 91 extern int machine_kexec_prepare(struct kimage *image); 92 extern void machine_kexec_cleanup(struct kimage *image); 93 extern asmlinkage long sys_kexec_load(unsigned long entry, 94 - unsigned long nr_segments, struct kexec_segment __user *segments, 95 - unsigned long flags); 96 #ifdef CONFIG_COMPAT 97 extern asmlinkage long compat_sys_kexec_load(unsigned long entry, 98 - unsigned long nr_segments, struct compat_kexec_segment __user *segments, 99 - unsigned long flags); 100 #endif 101 - extern struct page *kimage_alloc_control_pages(struct kimage *image, unsigned int order); 102 extern void crash_kexec(struct pt_regs *); 103 int kexec_should_crash(struct task_struct *); 104 extern struct kimage *kexec_image;
··· 91 extern int machine_kexec_prepare(struct kimage *image); 92 extern void machine_kexec_cleanup(struct kimage *image); 93 extern asmlinkage long sys_kexec_load(unsigned long entry, 94 + unsigned long nr_segments, 95 + struct kexec_segment __user *segments, 96 + unsigned long flags); 97 #ifdef CONFIG_COMPAT 98 extern asmlinkage long compat_sys_kexec_load(unsigned long entry, 99 + unsigned long nr_segments, 100 + struct compat_kexec_segment __user *segments, 101 + unsigned long flags); 102 #endif 103 + extern struct page *kimage_alloc_control_pages(struct kimage *image, 104 + unsigned int order); 105 extern void crash_kexec(struct pt_regs *); 106 int kexec_should_crash(struct task_struct *); 107 extern struct kimage *kexec_image;
+3 -3
include/linux/syscalls.h
··· 159 asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, 160 void __user *arg); 161 asmlinkage long sys_restart_syscall(void); 162 - asmlinkage long sys_kexec_load(unsigned long entry, 163 - unsigned long nr_segments, struct kexec_segment __user *segments, 164 - unsigned long flags); 165 166 asmlinkage long sys_exit(int error_code); 167 asmlinkage void sys_exit_group(int error_code);
··· 159 asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, 160 void __user *arg); 161 asmlinkage long sys_restart_syscall(void); 162 + asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, 163 + struct kexec_segment __user *segments, 164 + unsigned long flags); 165 166 asmlinkage long sys_exit(int error_code); 167 asmlinkage void sys_exit_group(int error_code);
+160 -142
kernel/kexec.c
··· 87 */ 88 #define KIMAGE_NO_DEST (-1UL) 89 90 - static int kimage_is_destination_range( 91 - struct kimage *image, unsigned long start, unsigned long end); 92 - static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mask, unsigned long dest); 93 94 static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, 95 - unsigned long nr_segments, struct kexec_segment __user *segments) 96 { 97 size_t segment_bytes; 98 struct kimage *image; ··· 105 /* Allocate a controlling structure */ 106 result = -ENOMEM; 107 image = kmalloc(sizeof(*image), GFP_KERNEL); 108 - if (!image) { 109 goto out; 110 - } 111 memset(image, 0, sizeof(*image)); 112 image->head = 0; 113 image->entry = &image->head; ··· 148 result = -EADDRNOTAVAIL; 149 for (i = 0; i < nr_segments; i++) { 150 unsigned long mstart, mend; 151 mstart = image->segment[i].mem; 152 mend = mstart + image->segment[i].memsz; 153 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) ··· 163 * easy explanation as one segment stops on another. 164 */ 165 result = -EINVAL; 166 - for(i = 0; i < nr_segments; i++) { 167 unsigned long mstart, mend; 168 unsigned long j; 169 mstart = image->segment[i].mem; 170 mend = mstart + image->segment[i].memsz; 171 - for(j = 0; j < i; j++) { 172 unsigned long pstart, pend; 173 pstart = image->segment[j].mem; 174 pend = pstart + image->segment[j].memsz; ··· 185 * later on. 186 */ 187 result = -EINVAL; 188 - for(i = 0; i < nr_segments; i++) { 189 if (image->segment[i].bufsz > image->segment[i].memsz) 190 goto out; 191 } 192 193 - 194 result = 0; 195 - out: 196 - if (result == 0) { 197 *rimage = image; 198 - } else { 199 kfree(image); 200 - } 201 return result; 202 203 } 204 205 static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, 206 - unsigned long nr_segments, struct kexec_segment __user *segments) 207 { 208 int result; 209 struct kimage *image; ··· 211 /* Allocate and initialize a controlling structure */ 212 image = NULL; 213 result = do_kimage_alloc(&image, entry, nr_segments, segments); 214 - if (result) { 215 goto out; 216 - } 217 *rimage = image; 218 219 /* ··· 223 */ 224 result = -ENOMEM; 225 image->control_code_page = kimage_alloc_control_pages(image, 226 - get_order(KEXEC_CONTROL_CODE_SIZE)); 227 if (!image->control_code_page) { 228 printk(KERN_ERR "Could not allocate control_code_buffer\n"); 229 goto out; ··· 231 232 result = 0; 233 out: 234 - if (result == 0) { 235 *rimage = image; 236 - } else { 237 kfree(image); 238 - } 239 return result; 240 } 241 242 static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, 243 - unsigned long nr_segments, struct kexec_segment *segments) 244 { 245 int result; 246 struct kimage *image; ··· 256 257 /* Allocate and initialize a controlling structure */ 258 result = do_kimage_alloc(&image, entry, nr_segments, segments); 259 - if (result) { 260 goto out; 261 - } 262 263 /* Enable the special crash kernel control page 264 * allocation policy. ··· 277 result = -EADDRNOTAVAIL; 278 for (i = 0; i < nr_segments; i++) { 279 unsigned long mstart, mend; 280 mstart = image->segment[i].mem; 281 mend = mstart + image->segment[i].memsz - 1; 282 /* Ensure we are within the crash kernel limits */ 283 if ((mstart < crashk_res.start) || (mend > crashk_res.end)) 284 goto out; 285 } 286 - 287 288 /* 289 * Find a location for the control code buffer, and add ··· 292 */ 293 result = -ENOMEM; 294 image->control_code_page = kimage_alloc_control_pages(image, 295 - get_order(KEXEC_CONTROL_CODE_SIZE)); 296 if (!image->control_code_page) { 297 printk(KERN_ERR "Could not allocate control_code_buffer\n"); 298 goto out; 299 } 300 301 result = 0; 302 - out: 303 - if (result == 0) { 304 *rimage = image; 305 - } else { 306 kfree(image); 307 - } 308 return result; 309 } 310 311 - static int kimage_is_destination_range( 312 - struct kimage *image, unsigned long start, unsigned long end) 313 { 314 unsigned long i; 315 316 for (i = 0; i < image->nr_segments; i++) { 317 unsigned long mstart, mend; 318 mstart = image->segment[i].mem; 319 - mend = mstart + image->segment[i].memsz; 320 - if ((end > mstart) && (start < mend)) { 321 return 1; 322 - } 323 } 324 return 0; 325 } 326 327 - static struct page *kimage_alloc_pages(unsigned int gfp_mask, unsigned int order) 328 { 329 struct page *pages; 330 pages = alloc_pages(gfp_mask, order); 331 if (pages) { 332 unsigned int count, i; 333 pages->mapping = NULL; 334 pages->private = order; 335 count = 1 << order; 336 - for(i = 0; i < count; i++) { 337 SetPageReserved(pages + i); 338 - } 339 } 340 return pages; 341 } 342 343 static void kimage_free_pages(struct page *page) 344 { 345 unsigned int order, count, i; 346 order = page->private; 347 count = 1 << order; 348 - for(i = 0; i < count; i++) { 349 ClearPageReserved(page + i); 350 - } 351 __free_pages(page, order); 352 } 353 354 static void kimage_free_page_list(struct list_head *list) 355 { 356 struct list_head *pos, *next; 357 list_for_each_safe(pos, next, list) { 358 struct page *page; 359 360 page = list_entry(pos, struct page, lru); 361 list_del(&page->lru); 362 - 363 kimage_free_pages(page); 364 } 365 } 366 367 - static struct page *kimage_alloc_normal_control_pages( 368 - struct kimage *image, unsigned int order) 369 { 370 /* Control pages are special, they are the intermediaries 371 * that are needed while we copy the rest of the pages ··· 396 */ 397 do { 398 unsigned long pfn, epfn, addr, eaddr; 399 pages = kimage_alloc_pages(GFP_KERNEL, order); 400 if (!pages) 401 break; ··· 405 addr = pfn << PAGE_SHIFT; 406 eaddr = epfn << PAGE_SHIFT; 407 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) || 408 - kimage_is_destination_range(image, addr, eaddr)) 409 - { 410 list_add(&pages->lru, &extra_pages); 411 pages = NULL; 412 } 413 - } while(!pages); 414 if (pages) { 415 /* Remember the allocated page... */ 416 list_add(&pages->lru, &image->control_pages); ··· 430 * For now it is simpler to just free the pages. 431 */ 432 kimage_free_page_list(&extra_pages); 433 - return pages; 434 435 } 436 437 - static struct page *kimage_alloc_crash_control_pages( 438 - struct kimage *image, unsigned int order) 439 { 440 /* Control pages are special, they are the intermediaries 441 * that are needed while we copy the rest of the pages ··· 460 */ 461 unsigned long hole_start, hole_end, size; 462 struct page *pages; 463 pages = NULL; 464 size = (1 << order) << PAGE_SHIFT; 465 hole_start = (image->control_page + (size - 1)) & ~(size - 1); 466 hole_end = hole_start + size - 1; 467 - while(hole_end <= crashk_res.end) { 468 unsigned long i; 469 - if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT) { 470 break; 471 - } 472 - if (hole_end > crashk_res.end) { 473 break; 474 - } 475 /* See if I overlap any of the segments */ 476 - for(i = 0; i < image->nr_segments; i++) { 477 unsigned long mstart, mend; 478 mstart = image->segment[i].mem; 479 mend = mstart + image->segment[i].memsz - 1; 480 if ((hole_end >= mstart) && (hole_start <= mend)) { ··· 491 break; 492 } 493 } 494 - if (pages) { 495 image->control_page = hole_end; 496 - } 497 return pages; 498 } 499 500 501 - struct page *kimage_alloc_control_pages( 502 - struct kimage *image, unsigned int order) 503 { 504 struct page *pages = NULL; 505 - switch(image->type) { 506 case KEXEC_TYPE_DEFAULT: 507 pages = kimage_alloc_normal_control_pages(image, order); 508 break; ··· 511 pages = kimage_alloc_crash_control_pages(image, order); 512 break; 513 } 514 return pages; 515 } 516 517 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) 518 { 519 - if (*image->entry != 0) { 520 image->entry++; 521 - } 522 if (image->entry == image->last_entry) { 523 kimage_entry_t *ind_page; 524 struct page *page; 525 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); 526 - if (!page) { 527 return -ENOMEM; 528 - } 529 ind_page = page_address(page); 530 *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION; 531 image->entry = ind_page; 532 - image->last_entry = 533 - ind_page + ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); 534 } 535 *image->entry = entry; 536 image->entry++; 537 *image->entry = 0; 538 return 0; 539 } 540 541 - static int kimage_set_destination( 542 - struct kimage *image, unsigned long destination) 543 { 544 int result; 545 546 destination &= PAGE_MASK; 547 result = kimage_add_entry(image, destination | IND_DESTINATION); 548 - if (result == 0) { 549 image->destination = destination; 550 - } 551 return result; 552 } 553 ··· 561 562 page &= PAGE_MASK; 563 result = kimage_add_entry(image, page | IND_SOURCE); 564 - if (result == 0) { 565 image->destination += PAGE_SIZE; 566 - } 567 return result; 568 } 569 ··· 579 } 580 static int kimage_terminate(struct kimage *image) 581 { 582 - if (*image->entry != 0) { 583 image->entry++; 584 - } 585 *image->entry = IND_DONE; 586 return 0; 587 } 588 ··· 607 608 if (!image) 609 return; 610 kimage_free_extra_pages(image); 611 for_each_kimage_entry(image, ptr, entry) { 612 if (entry & IND_INDIRECTION) { 613 /* Free the previous indirection page */ 614 - if (ind & IND_INDIRECTION) { 615 kimage_free_entry(ind); 616 - } 617 /* Save this indirection page until we are 618 * done with it. 619 */ 620 ind = entry; 621 } 622 - else if (entry & IND_SOURCE) { 623 kimage_free_entry(entry); 624 - } 625 } 626 /* Free the final indirection page */ 627 - if (ind & IND_INDIRECTION) { 628 kimage_free_entry(ind); 629 - } 630 631 /* Handle any machine specific cleanup */ 632 machine_kexec_cleanup(image); ··· 634 kfree(image); 635 } 636 637 - static kimage_entry_t *kimage_dst_used(struct kimage *image, unsigned long page) 638 { 639 kimage_entry_t *ptr, entry; 640 unsigned long destination = 0; 641 642 for_each_kimage_entry(image, ptr, entry) { 643 - if (entry & IND_DESTINATION) { 644 destination = entry & PAGE_MASK; 645 - } 646 else if (entry & IND_SOURCE) { 647 - if (page == destination) { 648 return ptr; 649 - } 650 destination += PAGE_SIZE; 651 } 652 } 653 return 0; 654 } 655 656 - static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mask, unsigned long destination) 657 { 658 /* 659 * Here we implement safeguards to ensure that a source page ··· 695 696 /* Allocate a page, if we run out of memory give up */ 697 page = kimage_alloc_pages(gfp_mask, 0); 698 - if (!page) { 699 return 0; 700 - } 701 /* If the page cannot be used file it away */ 702 - if (page_to_pfn(page) > (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { 703 list_add(&page->lru, &image->unuseable_pages); 704 continue; 705 } ··· 710 break; 711 712 /* If the page is not a destination page use it */ 713 - if (!kimage_is_destination_range(image, addr, addr + PAGE_SIZE)) 714 break; 715 716 /* ··· 744 list_add(&page->lru, &image->dest_pages); 745 } 746 } 747 return page; 748 } 749 750 static int kimage_load_normal_segment(struct kimage *image, 751 - struct kexec_segment *segment) 752 { 753 unsigned long maddr; 754 unsigned long ubytes, mbytes; ··· 763 maddr = segment->mem; 764 765 result = kimage_set_destination(image, maddr); 766 - if (result < 0) { 767 goto out; 768 - } 769 - while(mbytes) { 770 struct page *page; 771 char *ptr; 772 size_t uchunk, mchunk; 773 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); 774 if (page == 0) { 775 result = -ENOMEM; 776 goto out; 777 } 778 - result = kimage_add_page(image, page_to_pfn(page) << PAGE_SHIFT); 779 - if (result < 0) { 780 goto out; 781 - } 782 ptr = kmap(page); 783 /* Start with a clear page */ 784 memset(ptr, 0, PAGE_SIZE); 785 ptr += maddr & ~PAGE_MASK; 786 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); 787 - if (mchunk > mbytes) { 788 mchunk = mbytes; 789 - } 790 uchunk = mchunk; 791 - if (uchunk > ubytes) { 792 uchunk = ubytes; 793 - } 794 result = copy_from_user(ptr, buf, uchunk); 795 kunmap(page); 796 if (result) { ··· 804 buf += mchunk; 805 mbytes -= mchunk; 806 } 807 - out: 808 return result; 809 } 810 811 static int kimage_load_crash_segment(struct kimage *image, 812 - struct kexec_segment *segment) 813 { 814 /* For crash dumps kernels we simply copy the data from 815 * user space to it's destination. ··· 825 ubytes = segment->bufsz; 826 mbytes = segment->memsz; 827 maddr = segment->mem; 828 - while(mbytes) { 829 struct page *page; 830 char *ptr; 831 size_t uchunk, mchunk; 832 page = pfn_to_page(maddr >> PAGE_SHIFT); 833 if (page == 0) { 834 result = -ENOMEM; ··· 838 ptr = kmap(page); 839 ptr += maddr & ~PAGE_MASK; 840 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); 841 - if (mchunk > mbytes) { 842 mchunk = mbytes; 843 - } 844 uchunk = mchunk; 845 if (uchunk > ubytes) { 846 uchunk = ubytes; ··· 858 buf += mchunk; 859 mbytes -= mchunk; 860 } 861 - out: 862 return result; 863 } 864 865 static int kimage_load_segment(struct kimage *image, 866 - struct kexec_segment *segment) 867 { 868 int result = -ENOMEM; 869 - switch(image->type) { 870 case KEXEC_TYPE_DEFAULT: 871 result = kimage_load_normal_segment(image, segment); 872 break; ··· 875 result = kimage_load_crash_segment(image, segment); 876 break; 877 } 878 return result; 879 } 880 ··· 908 */ 909 static int kexec_lock = 0; 910 911 - asmlinkage long sys_kexec_load(unsigned long entry, 912 - unsigned long nr_segments, struct kexec_segment __user *segments, 913 - unsigned long flags) 914 { 915 struct kimage **dest_image, *image; 916 int locked; ··· 930 /* Verify we are on the appropriate architecture */ 931 if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) && 932 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) 933 - { 934 return -EINVAL; 935 - } 936 937 /* Put an artificial cap on the number 938 * of segments passed to kexec_load. ··· 950 * KISS: always take the mutex. 951 */ 952 locked = xchg(&kexec_lock, 1); 953 - if (locked) { 954 return -EBUSY; 955 - } 956 dest_image = &kexec_image; 957 - if (flags & KEXEC_ON_CRASH) { 958 dest_image = &kexec_crash_image; 959 - } 960 if (nr_segments > 0) { 961 unsigned long i; 962 /* Loading another kernel to reboot into */ 963 - if ((flags & KEXEC_ON_CRASH) == 0) { 964 - result = kimage_normal_alloc(&image, entry, nr_segments, segments); 965 - } 966 /* Loading another kernel to switch to if this one crashes */ 967 else if (flags & KEXEC_ON_CRASH) { 968 /* Free any current crash dump kernel before 969 * we corrupt it. 970 */ 971 kimage_free(xchg(&kexec_crash_image, NULL)); 972 - result = kimage_crash_alloc(&image, entry, nr_segments, segments); 973 } 974 - if (result) { 975 goto out; 976 - } 977 result = machine_kexec_prepare(image); 978 - if (result) { 979 goto out; 980 - } 981 - for(i = 0; i < nr_segments; i++) { 982 result = kimage_load_segment(image, &image->segment[i]); 983 - if (result) { 984 goto out; 985 - } 986 } 987 result = kimage_terminate(image); 988 - if (result) { 989 goto out; 990 - } 991 } 992 /* Install the new kernel, and Uninstall the old */ 993 image = xchg(dest_image, image); 994 995 - out: 996 xchg(&kexec_lock, 0); /* Release the mutex */ 997 kimage_free(image); 998 return result; 999 } 1000 1001 #ifdef CONFIG_COMPAT 1002 asmlinkage long compat_sys_kexec_load(unsigned long entry, 1003 - unsigned long nr_segments, struct compat_kexec_segment __user *segments, 1004 - unsigned long flags) 1005 { 1006 struct compat_kexec_segment in; 1007 struct kexec_segment out, __user *ksegments; ··· 1011 /* Don't allow clients that don't understand the native 1012 * architecture to do anything. 1013 */ 1014 - if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) { 1015 return -EINVAL; 1016 - } 1017 1018 - if (nr_segments > KEXEC_SEGMENT_MAX) { 1019 return -EINVAL; 1020 - } 1021 1022 ksegments = compat_alloc_user_space(nr_segments * sizeof(out)); 1023 for (i=0; i < nr_segments; i++) { 1024 result = copy_from_user(&in, &segments[i], sizeof(in)); 1025 - if (result) { 1026 return -EFAULT; 1027 - } 1028 1029 out.buf = compat_ptr(in.buf); 1030 out.bufsz = in.bufsz; ··· 1029 out.memsz = in.memsz; 1030 1031 result = copy_to_user(&ksegments[i], &out, sizeof(out)); 1032 - if (result) { 1033 return -EFAULT; 1034 - } 1035 } 1036 1037 return sys_kexec_load(entry, nr_segments, ksegments, flags);
··· 87 */ 88 #define KIMAGE_NO_DEST (-1UL) 89 90 + static int kimage_is_destination_range(struct kimage *image, 91 + unsigned long start, unsigned long end); 92 + static struct page *kimage_alloc_page(struct kimage *image, 93 + unsigned int gfp_mask, 94 + unsigned long dest); 95 96 static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, 97 + unsigned long nr_segments, 98 + struct kexec_segment __user *segments) 99 { 100 size_t segment_bytes; 101 struct kimage *image; ··· 102 /* Allocate a controlling structure */ 103 result = -ENOMEM; 104 image = kmalloc(sizeof(*image), GFP_KERNEL); 105 + if (!image) 106 goto out; 107 + 108 memset(image, 0, sizeof(*image)); 109 image->head = 0; 110 image->entry = &image->head; ··· 145 result = -EADDRNOTAVAIL; 146 for (i = 0; i < nr_segments; i++) { 147 unsigned long mstart, mend; 148 + 149 mstart = image->segment[i].mem; 150 mend = mstart + image->segment[i].memsz; 151 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) ··· 159 * easy explanation as one segment stops on another. 160 */ 161 result = -EINVAL; 162 + for (i = 0; i < nr_segments; i++) { 163 unsigned long mstart, mend; 164 unsigned long j; 165 + 166 mstart = image->segment[i].mem; 167 mend = mstart + image->segment[i].memsz; 168 + for (j = 0; j < i; j++) { 169 unsigned long pstart, pend; 170 pstart = image->segment[j].mem; 171 pend = pstart + image->segment[j].memsz; ··· 180 * later on. 181 */ 182 result = -EINVAL; 183 + for (i = 0; i < nr_segments; i++) { 184 if (image->segment[i].bufsz > image->segment[i].memsz) 185 goto out; 186 } 187 188 result = 0; 189 + out: 190 + if (result == 0) 191 *rimage = image; 192 + else 193 kfree(image); 194 + 195 return result; 196 197 } 198 199 static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, 200 + unsigned long nr_segments, 201 + struct kexec_segment __user *segments) 202 { 203 int result; 204 struct kimage *image; ··· 206 /* Allocate and initialize a controlling structure */ 207 image = NULL; 208 result = do_kimage_alloc(&image, entry, nr_segments, segments); 209 + if (result) 210 goto out; 211 + 212 *rimage = image; 213 214 /* ··· 218 */ 219 result = -ENOMEM; 220 image->control_code_page = kimage_alloc_control_pages(image, 221 + get_order(KEXEC_CONTROL_CODE_SIZE)); 222 if (!image->control_code_page) { 223 printk(KERN_ERR "Could not allocate control_code_buffer\n"); 224 goto out; ··· 226 227 result = 0; 228 out: 229 + if (result == 0) 230 *rimage = image; 231 + else 232 kfree(image); 233 + 234 return result; 235 } 236 237 static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, 238 + unsigned long nr_segments, 239 + struct kexec_segment *segments) 240 { 241 int result; 242 struct kimage *image; ··· 250 251 /* Allocate and initialize a controlling structure */ 252 result = do_kimage_alloc(&image, entry, nr_segments, segments); 253 + if (result) 254 goto out; 255 256 /* Enable the special crash kernel control page 257 * allocation policy. ··· 272 result = -EADDRNOTAVAIL; 273 for (i = 0; i < nr_segments; i++) { 274 unsigned long mstart, mend; 275 + 276 mstart = image->segment[i].mem; 277 mend = mstart + image->segment[i].memsz - 1; 278 /* Ensure we are within the crash kernel limits */ 279 if ((mstart < crashk_res.start) || (mend > crashk_res.end)) 280 goto out; 281 } 282 283 /* 284 * Find a location for the control code buffer, and add ··· 287 */ 288 result = -ENOMEM; 289 image->control_code_page = kimage_alloc_control_pages(image, 290 + get_order(KEXEC_CONTROL_CODE_SIZE)); 291 if (!image->control_code_page) { 292 printk(KERN_ERR "Could not allocate control_code_buffer\n"); 293 goto out; 294 } 295 296 result = 0; 297 + out: 298 + if (result == 0) 299 *rimage = image; 300 + else 301 kfree(image); 302 + 303 return result; 304 } 305 306 + static int kimage_is_destination_range(struct kimage *image, 307 + unsigned long start, 308 + unsigned long end) 309 { 310 unsigned long i; 311 312 for (i = 0; i < image->nr_segments; i++) { 313 unsigned long mstart, mend; 314 + 315 mstart = image->segment[i].mem; 316 + mend = mstart + image->segment[i].memsz; 317 + if ((end > mstart) && (start < mend)) 318 return 1; 319 } 320 + 321 return 0; 322 } 323 324 + static struct page *kimage_alloc_pages(unsigned int gfp_mask, 325 + unsigned int order) 326 { 327 struct page *pages; 328 + 329 pages = alloc_pages(gfp_mask, order); 330 if (pages) { 331 unsigned int count, i; 332 pages->mapping = NULL; 333 pages->private = order; 334 count = 1 << order; 335 + for (i = 0; i < count; i++) 336 SetPageReserved(pages + i); 337 } 338 + 339 return pages; 340 } 341 342 static void kimage_free_pages(struct page *page) 343 { 344 unsigned int order, count, i; 345 + 346 order = page->private; 347 count = 1 << order; 348 + for (i = 0; i < count; i++) 349 ClearPageReserved(page + i); 350 __free_pages(page, order); 351 } 352 353 static void kimage_free_page_list(struct list_head *list) 354 { 355 struct list_head *pos, *next; 356 + 357 list_for_each_safe(pos, next, list) { 358 struct page *page; 359 360 page = list_entry(pos, struct page, lru); 361 list_del(&page->lru); 362 kimage_free_pages(page); 363 } 364 } 365 366 + static struct page *kimage_alloc_normal_control_pages(struct kimage *image, 367 + unsigned int order) 368 { 369 /* Control pages are special, they are the intermediaries 370 * that are needed while we copy the rest of the pages ··· 387 */ 388 do { 389 unsigned long pfn, epfn, addr, eaddr; 390 + 391 pages = kimage_alloc_pages(GFP_KERNEL, order); 392 if (!pages) 393 break; ··· 395 addr = pfn << PAGE_SHIFT; 396 eaddr = epfn << PAGE_SHIFT; 397 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) || 398 + kimage_is_destination_range(image, addr, eaddr)) { 399 list_add(&pages->lru, &extra_pages); 400 pages = NULL; 401 } 402 + } while (!pages); 403 + 404 if (pages) { 405 /* Remember the allocated page... */ 406 list_add(&pages->lru, &image->control_pages); ··· 420 * For now it is simpler to just free the pages. 421 */ 422 kimage_free_page_list(&extra_pages); 423 424 + return pages; 425 } 426 427 + static struct page *kimage_alloc_crash_control_pages(struct kimage *image, 428 + unsigned int order) 429 { 430 /* Control pages are special, they are the intermediaries 431 * that are needed while we copy the rest of the pages ··· 450 */ 451 unsigned long hole_start, hole_end, size; 452 struct page *pages; 453 + 454 pages = NULL; 455 size = (1 << order) << PAGE_SHIFT; 456 hole_start = (image->control_page + (size - 1)) & ~(size - 1); 457 hole_end = hole_start + size - 1; 458 + while (hole_end <= crashk_res.end) { 459 unsigned long i; 460 + 461 + if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT) 462 break; 463 + if (hole_end > crashk_res.end) 464 break; 465 /* See if I overlap any of the segments */ 466 + for (i = 0; i < image->nr_segments; i++) { 467 unsigned long mstart, mend; 468 + 469 mstart = image->segment[i].mem; 470 mend = mstart + image->segment[i].memsz - 1; 471 if ((hole_end >= mstart) && (hole_start <= mend)) { ··· 480 break; 481 } 482 } 483 + if (pages) 484 image->control_page = hole_end; 485 + 486 return pages; 487 } 488 489 490 + struct page *kimage_alloc_control_pages(struct kimage *image, 491 + unsigned int order) 492 { 493 struct page *pages = NULL; 494 + 495 + switch (image->type) { 496 case KEXEC_TYPE_DEFAULT: 497 pages = kimage_alloc_normal_control_pages(image, order); 498 break; ··· 499 pages = kimage_alloc_crash_control_pages(image, order); 500 break; 501 } 502 + 503 return pages; 504 } 505 506 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) 507 { 508 + if (*image->entry != 0) 509 image->entry++; 510 + 511 if (image->entry == image->last_entry) { 512 kimage_entry_t *ind_page; 513 struct page *page; 514 + 515 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); 516 + if (!page) 517 return -ENOMEM; 518 + 519 ind_page = page_address(page); 520 *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION; 521 image->entry = ind_page; 522 + image->last_entry = ind_page + 523 + ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); 524 } 525 *image->entry = entry; 526 image->entry++; 527 *image->entry = 0; 528 + 529 return 0; 530 } 531 532 + static int kimage_set_destination(struct kimage *image, 533 + unsigned long destination) 534 { 535 int result; 536 537 destination &= PAGE_MASK; 538 result = kimage_add_entry(image, destination | IND_DESTINATION); 539 + if (result == 0) 540 image->destination = destination; 541 + 542 return result; 543 } 544 ··· 546 547 page &= PAGE_MASK; 548 result = kimage_add_entry(image, page | IND_SOURCE); 549 + if (result == 0) 550 image->destination += PAGE_SIZE; 551 + 552 return result; 553 } 554 ··· 564 } 565 static int kimage_terminate(struct kimage *image) 566 { 567 + if (*image->entry != 0) 568 image->entry++; 569 + 570 *image->entry = IND_DONE; 571 + 572 return 0; 573 } 574 ··· 591 592 if (!image) 593 return; 594 + 595 kimage_free_extra_pages(image); 596 for_each_kimage_entry(image, ptr, entry) { 597 if (entry & IND_INDIRECTION) { 598 /* Free the previous indirection page */ 599 + if (ind & IND_INDIRECTION) 600 kimage_free_entry(ind); 601 /* Save this indirection page until we are 602 * done with it. 603 */ 604 ind = entry; 605 } 606 + else if (entry & IND_SOURCE) 607 kimage_free_entry(entry); 608 } 609 /* Free the final indirection page */ 610 + if (ind & IND_INDIRECTION) 611 kimage_free_entry(ind); 612 613 /* Handle any machine specific cleanup */ 614 machine_kexec_cleanup(image); ··· 620 kfree(image); 621 } 622 623 + static kimage_entry_t *kimage_dst_used(struct kimage *image, 624 + unsigned long page) 625 { 626 kimage_entry_t *ptr, entry; 627 unsigned long destination = 0; 628 629 for_each_kimage_entry(image, ptr, entry) { 630 + if (entry & IND_DESTINATION) 631 destination = entry & PAGE_MASK; 632 else if (entry & IND_SOURCE) { 633 + if (page == destination) 634 return ptr; 635 destination += PAGE_SIZE; 636 } 637 } 638 + 639 return 0; 640 } 641 642 + static struct page *kimage_alloc_page(struct kimage *image, 643 + unsigned int gfp_mask, 644 + unsigned long destination) 645 { 646 /* 647 * Here we implement safeguards to ensure that a source page ··· 679 680 /* Allocate a page, if we run out of memory give up */ 681 page = kimage_alloc_pages(gfp_mask, 0); 682 + if (!page) 683 return 0; 684 /* If the page cannot be used file it away */ 685 + if (page_to_pfn(page) > 686 + (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { 687 list_add(&page->lru, &image->unuseable_pages); 688 continue; 689 } ··· 694 break; 695 696 /* If the page is not a destination page use it */ 697 + if (!kimage_is_destination_range(image, addr, 698 + addr + PAGE_SIZE)) 699 break; 700 701 /* ··· 727 list_add(&page->lru, &image->dest_pages); 728 } 729 } 730 + 731 return page; 732 } 733 734 static int kimage_load_normal_segment(struct kimage *image, 735 + struct kexec_segment *segment) 736 { 737 unsigned long maddr; 738 unsigned long ubytes, mbytes; ··· 745 maddr = segment->mem; 746 747 result = kimage_set_destination(image, maddr); 748 + if (result < 0) 749 goto out; 750 + 751 + while (mbytes) { 752 struct page *page; 753 char *ptr; 754 size_t uchunk, mchunk; 755 + 756 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); 757 if (page == 0) { 758 result = -ENOMEM; 759 goto out; 760 } 761 + result = kimage_add_page(image, page_to_pfn(page) 762 + << PAGE_SHIFT); 763 + if (result < 0) 764 goto out; 765 + 766 ptr = kmap(page); 767 /* Start with a clear page */ 768 memset(ptr, 0, PAGE_SIZE); 769 ptr += maddr & ~PAGE_MASK; 770 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); 771 + if (mchunk > mbytes) 772 mchunk = mbytes; 773 + 774 uchunk = mchunk; 775 + if (uchunk > ubytes) 776 uchunk = ubytes; 777 + 778 result = copy_from_user(ptr, buf, uchunk); 779 kunmap(page); 780 if (result) { ··· 784 buf += mchunk; 785 mbytes -= mchunk; 786 } 787 + out: 788 return result; 789 } 790 791 static int kimage_load_crash_segment(struct kimage *image, 792 + struct kexec_segment *segment) 793 { 794 /* For crash dumps kernels we simply copy the data from 795 * user space to it's destination. ··· 805 ubytes = segment->bufsz; 806 mbytes = segment->memsz; 807 maddr = segment->mem; 808 + while (mbytes) { 809 struct page *page; 810 char *ptr; 811 size_t uchunk, mchunk; 812 + 813 page = pfn_to_page(maddr >> PAGE_SHIFT); 814 if (page == 0) { 815 result = -ENOMEM; ··· 817 ptr = kmap(page); 818 ptr += maddr & ~PAGE_MASK; 819 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); 820 + if (mchunk > mbytes) 821 mchunk = mbytes; 822 + 823 uchunk = mchunk; 824 if (uchunk > ubytes) { 825 uchunk = ubytes; ··· 837 buf += mchunk; 838 mbytes -= mchunk; 839 } 840 + out: 841 return result; 842 } 843 844 static int kimage_load_segment(struct kimage *image, 845 + struct kexec_segment *segment) 846 { 847 int result = -ENOMEM; 848 + 849 + switch (image->type) { 850 case KEXEC_TYPE_DEFAULT: 851 result = kimage_load_normal_segment(image, segment); 852 break; ··· 853 result = kimage_load_crash_segment(image, segment); 854 break; 855 } 856 + 857 return result; 858 } 859 ··· 885 */ 886 static int kexec_lock = 0; 887 888 + asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, 889 + struct kexec_segment __user *segments, 890 + unsigned long flags) 891 { 892 struct kimage **dest_image, *image; 893 int locked; ··· 907 /* Verify we are on the appropriate architecture */ 908 if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) && 909 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) 910 return -EINVAL; 911 912 /* Put an artificial cap on the number 913 * of segments passed to kexec_load. ··· 929 * KISS: always take the mutex. 930 */ 931 locked = xchg(&kexec_lock, 1); 932 + if (locked) 933 return -EBUSY; 934 + 935 dest_image = &kexec_image; 936 + if (flags & KEXEC_ON_CRASH) 937 dest_image = &kexec_crash_image; 938 if (nr_segments > 0) { 939 unsigned long i; 940 + 941 /* Loading another kernel to reboot into */ 942 + if ((flags & KEXEC_ON_CRASH) == 0) 943 + result = kimage_normal_alloc(&image, entry, 944 + nr_segments, segments); 945 /* Loading another kernel to switch to if this one crashes */ 946 else if (flags & KEXEC_ON_CRASH) { 947 /* Free any current crash dump kernel before 948 * we corrupt it. 949 */ 950 kimage_free(xchg(&kexec_crash_image, NULL)); 951 + result = kimage_crash_alloc(&image, entry, 952 + nr_segments, segments); 953 } 954 + if (result) 955 goto out; 956 + 957 result = machine_kexec_prepare(image); 958 + if (result) 959 goto out; 960 + 961 + for (i = 0; i < nr_segments; i++) { 962 result = kimage_load_segment(image, &image->segment[i]); 963 + if (result) 964 goto out; 965 } 966 result = kimage_terminate(image); 967 + if (result) 968 goto out; 969 } 970 /* Install the new kernel, and Uninstall the old */ 971 image = xchg(dest_image, image); 972 973 + out: 974 xchg(&kexec_lock, 0); /* Release the mutex */ 975 kimage_free(image); 976 + 977 return result; 978 } 979 980 #ifdef CONFIG_COMPAT 981 asmlinkage long compat_sys_kexec_load(unsigned long entry, 982 + unsigned long nr_segments, 983 + struct compat_kexec_segment __user *segments, 984 + unsigned long flags) 985 { 986 struct compat_kexec_segment in; 987 struct kexec_segment out, __user *ksegments; ··· 989 /* Don't allow clients that don't understand the native 990 * architecture to do anything. 991 */ 992 + if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) 993 return -EINVAL; 994 995 + if (nr_segments > KEXEC_SEGMENT_MAX) 996 return -EINVAL; 997 998 ksegments = compat_alloc_user_space(nr_segments * sizeof(out)); 999 for (i=0; i < nr_segments; i++) { 1000 result = copy_from_user(&in, &segments[i], sizeof(in)); 1001 + if (result) 1002 return -EFAULT; 1003 1004 out.buf = compat_ptr(in.buf); 1005 out.bufsz = in.bufsz; ··· 1010 out.memsz = in.memsz; 1011 1012 result = copy_to_user(&ksegments[i], &out, sizeof(out)); 1013 + if (result) 1014 return -EFAULT; 1015 } 1016 1017 return sys_kexec_load(entry, nr_segments, ksegments, flags);