[PATCH] kexec code cleanup

o Following patch provides purely cosmetic changes and corrects CodingStyle
guide lines related certain issues like below in kexec related files

o braces for one line "if" statements, "for" loops,
o more than 80 column wide lines,
o No space after "while", "for" and "switch" key words

o Changes:
o take-2: Removed the extra tab before "case" key words.
o take-3: Put operator at the end of line and space before "*/"

Signed-off-by: Maneesh Soni <maneesh@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Maneesh Soni and committed by
Linus Torvalds
72414d3f 4f339ecb

+243 -211
+13 -10
arch/i386/kernel/crash.c
··· 31 31 /* This keeps a track of which one is crashing cpu. */ 32 32 static int crashing_cpu; 33 33 34 - static u32 *append_elf_note(u32 *buf, 35 - char *name, unsigned type, void *data, size_t data_len) 34 + static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, 35 + size_t data_len) 36 36 { 37 37 struct elf_note note; 38 + 38 39 note.n_namesz = strlen(name) + 1; 39 40 note.n_descsz = data_len; 40 41 note.n_type = type; ··· 45 44 buf += (note.n_namesz + 3)/4; 46 45 memcpy(buf, data, note.n_descsz); 47 46 buf += (note.n_descsz + 3)/4; 47 + 48 48 return buf; 49 49 } 50 50 51 51 static void final_note(u32 *buf) 52 52 { 53 53 struct elf_note note; 54 + 54 55 note.n_namesz = 0; 55 56 note.n_descsz = 0; 56 57 note.n_type = 0; 57 58 memcpy(buf, &note, sizeof(note)); 58 59 } 59 60 60 - 61 61 static void crash_save_this_cpu(struct pt_regs *regs, int cpu) 62 62 { 63 63 struct elf_prstatus prstatus; 64 64 u32 *buf; 65 - if ((cpu < 0) || (cpu >= NR_CPUS)) { 65 + 66 + if ((cpu < 0) || (cpu >= NR_CPUS)) 66 67 return; 67 - } 68 + 68 69 /* Using ELF notes here is opportunistic. 69 70 * I need a well defined structure format 70 71 * for the data I pass, and I need tags ··· 78 75 memset(&prstatus, 0, sizeof(prstatus)); 79 76 prstatus.pr_pid = current->pid; 80 77 elf_core_copy_regs(&prstatus.pr_reg, regs); 81 - buf = append_elf_note(buf, "CORE", NT_PRSTATUS, 82 - &prstatus, sizeof(prstatus)); 83 - 78 + buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus, 79 + sizeof(prstatus)); 84 80 final_note(buf); 85 81 } 86 82 ··· 121 119 { 122 120 struct pt_regs regs; 123 121 int cpu; 124 - cpu = smp_processor_id(); 125 122 123 + cpu = smp_processor_id(); 126 124 if (saved_regs) 127 125 crash_setup_regs(&regs, saved_regs); 128 126 else ··· 155 153 /* Assume hlt works */ 156 154 __asm__("hlt"); 157 155 for(;;); 156 + 158 157 return 1; 159 158 } 160 159 ··· 172 169 static void nmi_shootdown_cpus(void) 173 170 { 174 171 unsigned long msecs; 175 - atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); 176 172 173 + atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); 177 174 /* Would it be better to replace the trap vector here? */ 178 175 set_nmi_callback(crash_nmi_callback); 179 176 /* Ensure the new callback function is set before sending
+11 -5
arch/i386/kernel/machine_kexec.c
··· 80 80 /* Identity map the page table entry */ 81 81 pgtable_level1[level1_index] = address | L0_ATTR; 82 82 pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR; 83 - set_64bit(&pgtable_level3[level3_index], __pa(pgtable_level2) | L2_ATTR); 83 + set_64bit(&pgtable_level3[level3_index], 84 + __pa(pgtable_level2) | L2_ATTR); 84 85 85 86 /* Flush the tlb so the new mapping takes effect. 86 87 * Global tlb entries are not flushed but that is not an issue. ··· 140 139 } 141 140 142 141 typedef asmlinkage NORET_TYPE void (*relocate_new_kernel_t)( 143 - unsigned long indirection_page, unsigned long reboot_code_buffer, 144 - unsigned long start_address, unsigned int has_pae) ATTRIB_NORET; 142 + unsigned long indirection_page, 143 + unsigned long reboot_code_buffer, 144 + unsigned long start_address, 145 + unsigned int has_pae) ATTRIB_NORET; 145 146 146 147 const extern unsigned char relocate_new_kernel[]; 147 148 extern void relocate_new_kernel_end(void); ··· 183 180 { 184 181 unsigned long page_list; 185 182 unsigned long reboot_code_buffer; 183 + 186 184 relocate_new_kernel_t rnk; 187 185 188 186 /* Interrupts aren't acceptable while we reboot */ 189 187 local_irq_disable(); 190 188 191 189 /* Compute some offsets */ 192 - reboot_code_buffer = page_to_pfn(image->control_code_page) << PAGE_SHIFT; 190 + reboot_code_buffer = page_to_pfn(image->control_code_page) 191 + << PAGE_SHIFT; 193 192 page_list = image->head; 194 193 195 194 /* Set up an identity mapping for the reboot_code_buffer */ 196 195 identity_map_page(reboot_code_buffer); 197 196 198 197 /* copy it out */ 199 - memcpy((void *)reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); 198 + memcpy((void *)reboot_code_buffer, relocate_new_kernel, 199 + relocate_new_kernel_size); 200 200 201 201 /* The segment registers are funny things, they are 202 202 * automatically loaded from a table, in memory wherever you
+13 -17
arch/ppc/kernel/machine_kexec.c
··· 21 21 #include <asm/machdep.h> 22 22 23 23 typedef NORET_TYPE void (*relocate_new_kernel_t)( 24 - unsigned long indirection_page, unsigned long reboot_code_buffer, 25 - unsigned long start_address) ATTRIB_NORET; 24 + unsigned long indirection_page, 25 + unsigned long reboot_code_buffer, 26 + unsigned long start_address) ATTRIB_NORET; 26 27 27 28 const extern unsigned char relocate_new_kernel[]; 28 29 const extern unsigned int relocate_new_kernel_size; 29 30 30 31 void machine_shutdown(void) 31 32 { 32 - if (ppc_md.machine_shutdown) { 33 + if (ppc_md.machine_shutdown) 33 34 ppc_md.machine_shutdown(); 34 - } 35 35 } 36 36 37 37 void machine_crash_shutdown(struct pt_regs *regs) 38 38 { 39 - if (ppc_md.machine_crash_shutdown) { 39 + if (ppc_md.machine_crash_shutdown) 40 40 ppc_md.machine_crash_shutdown(); 41 - } 42 41 } 43 42 44 43 /* ··· 47 48 */ 48 49 int machine_kexec_prepare(struct kimage *image) 49 50 { 50 - if (ppc_md.machine_kexec_prepare) { 51 + if (ppc_md.machine_kexec_prepare) 51 52 return ppc_md.machine_kexec_prepare(image); 52 - } 53 53 /* 54 54 * Fail if platform doesn't provide its own machine_kexec_prepare 55 55 * implementation. ··· 58 60 59 61 void machine_kexec_cleanup(struct kimage *image) 60 62 { 61 - if (ppc_md.machine_kexec_cleanup) { 63 + if (ppc_md.machine_kexec_cleanup) 62 64 ppc_md.machine_kexec_cleanup(image); 63 - } 64 65 } 65 66 66 67 /* ··· 68 71 */ 69 72 NORET_TYPE void machine_kexec(struct kimage *image) 70 73 { 71 - if (ppc_md.machine_kexec) { 74 + if (ppc_md.machine_kexec) 72 75 ppc_md.machine_kexec(image); 73 - } else { 76 + else { 74 77 /* 75 78 * Fall back to normal restart if platform doesn't provide 76 79 * its own kexec function, and user insist to kexec... ··· 79 82 } 80 83 for(;;); 81 84 } 82 - 83 85 84 86 /* 85 87 * This is a generic machine_kexec function suitable at least for ··· 100 104 101 105 /* we need both effective and real address here */ 102 106 reboot_code_buffer = 103 - (unsigned long)page_address(image->control_code_page); 107 + (unsigned long)page_address(image->control_code_page); 104 108 reboot_code_buffer_phys = virt_to_phys((void *)reboot_code_buffer); 105 109 106 110 /* copy our kernel relocation code to the control code page */ 107 - memcpy((void *)reboot_code_buffer, 108 - relocate_new_kernel, relocate_new_kernel_size); 111 + memcpy((void *)reboot_code_buffer, relocate_new_kernel, 112 + relocate_new_kernel_size); 109 113 110 114 flush_icache_range(reboot_code_buffer, 111 - reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE); 115 + reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE); 112 116 printk(KERN_INFO "Bye!\n"); 113 117 114 118 /* now call it */
+5 -4
arch/ppc64/kernel/machine_kexec.c
··· 58 58 * handle the virtual mode, we must make sure no destination 59 59 * overlaps kernel static data or bss. 60 60 */ 61 - for(i = 0; i < image->nr_segments; i++) 61 + for (i = 0; i < image->nr_segments; i++) 62 62 if (image->segment[i].mem < __pa(_end)) 63 63 return -ETXTBSY; 64 64 ··· 76 76 low = __pa(htab_address); 77 77 high = low + (htab_hash_mask + 1) * HASH_GROUP_SIZE; 78 78 79 - for(i = 0; i < image->nr_segments; i++) { 79 + for (i = 0; i < image->nr_segments; i++) { 80 80 begin = image->segment[i].mem; 81 81 end = begin + image->segment[i].memsz; 82 82 ··· 98 98 low = *basep; 99 99 high = low + (*sizep); 100 100 101 - for(i = 0; i < image->nr_segments; i++) { 101 + for (i = 0; i < image->nr_segments; i++) { 102 102 begin = image->segment[i].mem; 103 103 end = begin + image->segment[i].memsz; 104 104 ··· 274 274 275 275 /* Our assembly helper, in kexec_stub.S */ 276 276 extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start, 277 - void *image, void *control, void (*clear_all)(void)) ATTRIB_NORET; 277 + void *image, void *control, 278 + void (*clear_all)(void)) ATTRIB_NORET; 278 279 279 280 /* too late to fail here */ 280 281 void machine_kexec(struct kimage *image)
+2 -2
arch/s390/kernel/machine_kexec.c
··· 67 67 ctl_clear_bit(0,28); 68 68 69 69 on_each_cpu(kexec_halt_all_cpus, image, 0, 0); 70 - for(;;); 70 + for (;;); 71 71 } 72 72 73 73 static void ··· 85 85 for_each_online_cpu(cpu) { 86 86 if (cpu == smp_processor_id()) 87 87 continue; 88 - while(!smp_cpu_not_running(cpu)) 88 + while (!smp_cpu_not_running(cpu)) 89 89 cpu_relax(); 90 90 } 91 91
+27 -22
arch/x86_64/kernel/machine_kexec.c
··· 32 32 #define L2_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 33 33 #define L3_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 34 34 35 - static void init_level2_page( 36 - u64 *level2p, unsigned long addr) 35 + static void init_level2_page(u64 *level2p, unsigned long addr) 37 36 { 38 37 unsigned long end_addr; 38 + 39 39 addr &= PAGE_MASK; 40 40 end_addr = addr + LEVEL2_SIZE; 41 - while(addr < end_addr) { 41 + while (addr < end_addr) { 42 42 *(level2p++) = addr | L1_ATTR; 43 43 addr += LEVEL1_SIZE; 44 44 } 45 45 } 46 46 47 - static int init_level3_page(struct kimage *image, 48 - u64 *level3p, unsigned long addr, unsigned long last_addr) 47 + static int init_level3_page(struct kimage *image, u64 *level3p, 48 + unsigned long addr, unsigned long last_addr) 49 49 { 50 50 unsigned long end_addr; 51 51 int result; 52 + 52 53 result = 0; 53 54 addr &= PAGE_MASK; 54 55 end_addr = addr + LEVEL3_SIZE; 55 - while((addr < last_addr) && (addr < end_addr)) { 56 + while ((addr < last_addr) && (addr < end_addr)) { 56 57 struct page *page; 57 58 u64 *level2p; 59 + 58 60 page = kimage_alloc_control_pages(image, 0); 59 61 if (!page) { 60 62 result = -ENOMEM; ··· 68 66 addr += LEVEL2_SIZE; 69 67 } 70 68 /* clear the unused entries */ 71 - while(addr < end_addr) { 69 + while (addr < end_addr) { 72 70 *(level3p++) = 0; 73 71 addr += LEVEL2_SIZE; 74 72 } ··· 77 75 } 78 76 79 77 80 - static int init_level4_page(struct kimage *image, 81 - u64 *level4p, unsigned long addr, unsigned long last_addr) 78 + static int init_level4_page(struct kimage *image, u64 *level4p, 79 + unsigned long addr, unsigned long last_addr) 82 80 { 83 81 unsigned long end_addr; 84 82 int result; 83 + 85 84 result = 0; 86 85 addr &= PAGE_MASK; 87 86 end_addr = addr + LEVEL4_SIZE; 88 - while((addr < last_addr) && (addr < end_addr)) { 87 + while ((addr < last_addr) && (addr < end_addr)) { 89 88 struct page *page; 90 89 u64 *level3p; 90 + 91 91 page = kimage_alloc_control_pages(image, 0); 92 92 if (!page) { 93 93 result = -ENOMEM; ··· 104 100 addr += LEVEL3_SIZE; 105 101 } 106 102 /* clear the unused entries */ 107 - while(addr < end_addr) { 103 + while (addr < end_addr) { 108 104 *(level4p++) = 0; 109 105 addr += LEVEL3_SIZE; 110 106 } 111 - out: 107 + out: 112 108 return result; 113 109 } 114 110 ··· 117 113 { 118 114 u64 *level4p; 119 115 level4p = (u64 *)__va(start_pgtable); 120 - return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT); 116 + return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT); 121 117 } 122 118 123 119 static void set_idt(void *newidt, u16 limit) ··· 163 159 #undef __STR 164 160 } 165 161 166 - typedef NORET_TYPE void (*relocate_new_kernel_t)( 167 - unsigned long indirection_page, unsigned long control_code_buffer, 168 - unsigned long start_address, unsigned long pgtable) ATTRIB_NORET; 162 + typedef NORET_TYPE void (*relocate_new_kernel_t)(unsigned long indirection_page, 163 + unsigned long control_code_buffer, 164 + unsigned long start_address, 165 + unsigned long pgtable) ATTRIB_NORET; 169 166 170 167 const extern unsigned char relocate_new_kernel[]; 171 168 const extern unsigned long relocate_new_kernel_size; ··· 177 172 int result; 178 173 179 174 /* Calculate the offsets */ 180 - start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT; 175 + start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT; 181 176 control_code_buffer = start_pgtable + 4096UL; 182 177 183 178 /* Setup the identity mapped 64bit page table */ 184 179 result = init_pgtable(image, start_pgtable); 185 - if (result) { 180 + if (result) 186 181 return result; 187 - } 188 182 189 183 /* Place the code in the reboot code buffer */ 190 - memcpy(__va(control_code_buffer), relocate_new_kernel, relocate_new_kernel_size); 184 + memcpy(__va(control_code_buffer), relocate_new_kernel, 185 + relocate_new_kernel_size); 191 186 192 187 return 0; 193 188 } ··· 212 207 local_irq_disable(); 213 208 214 209 /* Calculate the offsets */ 215 - page_list = image->head; 216 - start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT; 210 + page_list = image->head; 211 + start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT; 217 212 control_code_buffer = start_pgtable + 4096UL; 218 213 219 214 /* Set the low half of the page table to my identity mapped
+1 -1
drivers/char/mem.c
··· 287 287 size_t read = 0, csize; 288 288 int rc = 0; 289 289 290 - while(count) { 290 + while (count) { 291 291 pfn = *ppos / PAGE_SIZE; 292 292 if (pfn > saved_max_pfn) 293 293 return read;
+8 -5
include/linux/kexec.h
··· 91 91 extern int machine_kexec_prepare(struct kimage *image); 92 92 extern void machine_kexec_cleanup(struct kimage *image); 93 93 extern asmlinkage long sys_kexec_load(unsigned long entry, 94 - unsigned long nr_segments, struct kexec_segment __user *segments, 95 - unsigned long flags); 94 + unsigned long nr_segments, 95 + struct kexec_segment __user *segments, 96 + unsigned long flags); 96 97 #ifdef CONFIG_COMPAT 97 98 extern asmlinkage long compat_sys_kexec_load(unsigned long entry, 98 - unsigned long nr_segments, struct compat_kexec_segment __user *segments, 99 - unsigned long flags); 99 + unsigned long nr_segments, 100 + struct compat_kexec_segment __user *segments, 101 + unsigned long flags); 100 102 #endif 101 - extern struct page *kimage_alloc_control_pages(struct kimage *image, unsigned int order); 103 + extern struct page *kimage_alloc_control_pages(struct kimage *image, 104 + unsigned int order); 102 105 extern void crash_kexec(struct pt_regs *); 103 106 int kexec_should_crash(struct task_struct *); 104 107 extern struct kimage *kexec_image;
+3 -3
include/linux/syscalls.h
··· 159 159 asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, 160 160 void __user *arg); 161 161 asmlinkage long sys_restart_syscall(void); 162 - asmlinkage long sys_kexec_load(unsigned long entry, 163 - unsigned long nr_segments, struct kexec_segment __user *segments, 164 - unsigned long flags); 162 + asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, 163 + struct kexec_segment __user *segments, 164 + unsigned long flags); 165 165 166 166 asmlinkage long sys_exit(int error_code); 167 167 asmlinkage void sys_exit_group(int error_code);
+160 -142
kernel/kexec.c
··· 87 87 */ 88 88 #define KIMAGE_NO_DEST (-1UL) 89 89 90 - static int kimage_is_destination_range( 91 - struct kimage *image, unsigned long start, unsigned long end); 92 - static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mask, unsigned long dest); 90 + static int kimage_is_destination_range(struct kimage *image, 91 + unsigned long start, unsigned long end); 92 + static struct page *kimage_alloc_page(struct kimage *image, 93 + unsigned int gfp_mask, 94 + unsigned long dest); 93 95 94 96 static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, 95 - unsigned long nr_segments, struct kexec_segment __user *segments) 97 + unsigned long nr_segments, 98 + struct kexec_segment __user *segments) 96 99 { 97 100 size_t segment_bytes; 98 101 struct kimage *image; ··· 105 102 /* Allocate a controlling structure */ 106 103 result = -ENOMEM; 107 104 image = kmalloc(sizeof(*image), GFP_KERNEL); 108 - if (!image) { 105 + if (!image) 109 106 goto out; 110 - } 107 + 111 108 memset(image, 0, sizeof(*image)); 112 109 image->head = 0; 113 110 image->entry = &image->head; ··· 148 145 result = -EADDRNOTAVAIL; 149 146 for (i = 0; i < nr_segments; i++) { 150 147 unsigned long mstart, mend; 148 + 151 149 mstart = image->segment[i].mem; 152 150 mend = mstart + image->segment[i].memsz; 153 151 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) ··· 163 159 * easy explanation as one segment stops on another. 164 160 */ 165 161 result = -EINVAL; 166 - for(i = 0; i < nr_segments; i++) { 162 + for (i = 0; i < nr_segments; i++) { 167 163 unsigned long mstart, mend; 168 164 unsigned long j; 165 + 169 166 mstart = image->segment[i].mem; 170 167 mend = mstart + image->segment[i].memsz; 171 - for(j = 0; j < i; j++) { 168 + for (j = 0; j < i; j++) { 172 169 unsigned long pstart, pend; 173 170 pstart = image->segment[j].mem; 174 171 pend = pstart + image->segment[j].memsz; ··· 185 180 * later on. 186 181 */ 187 182 result = -EINVAL; 188 - for(i = 0; i < nr_segments; i++) { 183 + for (i = 0; i < nr_segments; i++) { 189 184 if (image->segment[i].bufsz > image->segment[i].memsz) 190 185 goto out; 191 186 } 192 187 193 - 194 188 result = 0; 195 - out: 196 - if (result == 0) { 189 + out: 190 + if (result == 0) 197 191 *rimage = image; 198 - } else { 192 + else 199 193 kfree(image); 200 - } 194 + 201 195 return result; 202 196 203 197 } 204 198 205 199 static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, 206 - unsigned long nr_segments, struct kexec_segment __user *segments) 200 + unsigned long nr_segments, 201 + struct kexec_segment __user *segments) 207 202 { 208 203 int result; 209 204 struct kimage *image; ··· 211 206 /* Allocate and initialize a controlling structure */ 212 207 image = NULL; 213 208 result = do_kimage_alloc(&image, entry, nr_segments, segments); 214 - if (result) { 209 + if (result) 215 210 goto out; 216 - } 211 + 217 212 *rimage = image; 218 213 219 214 /* ··· 223 218 */ 224 219 result = -ENOMEM; 225 220 image->control_code_page = kimage_alloc_control_pages(image, 226 - get_order(KEXEC_CONTROL_CODE_SIZE)); 221 + get_order(KEXEC_CONTROL_CODE_SIZE)); 227 222 if (!image->control_code_page) { 228 223 printk(KERN_ERR "Could not allocate control_code_buffer\n"); 229 224 goto out; ··· 231 226 232 227 result = 0; 233 228 out: 234 - if (result == 0) { 229 + if (result == 0) 235 230 *rimage = image; 236 - } else { 231 + else 237 232 kfree(image); 238 - } 233 + 239 234 return result; 240 235 } 241 236 242 237 static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, 243 - unsigned long nr_segments, struct kexec_segment *segments) 238 + unsigned long nr_segments, 239 + struct kexec_segment *segments) 244 240 { 245 241 int result; 246 242 struct kimage *image; ··· 256 250 257 251 /* Allocate and initialize a controlling structure */ 258 252 result = do_kimage_alloc(&image, entry, nr_segments, segments); 259 - if (result) { 253 + if (result) 260 254 goto out; 261 - } 262 255 263 256 /* Enable the special crash kernel control page 264 257 * allocation policy. ··· 277 272 result = -EADDRNOTAVAIL; 278 273 for (i = 0; i < nr_segments; i++) { 279 274 unsigned long mstart, mend; 275 + 280 276 mstart = image->segment[i].mem; 281 277 mend = mstart + image->segment[i].memsz - 1; 282 278 /* Ensure we are within the crash kernel limits */ 283 279 if ((mstart < crashk_res.start) || (mend > crashk_res.end)) 284 280 goto out; 285 281 } 286 - 287 282 288 283 /* 289 284 * Find a location for the control code buffer, and add ··· 292 287 */ 293 288 result = -ENOMEM; 294 289 image->control_code_page = kimage_alloc_control_pages(image, 295 - get_order(KEXEC_CONTROL_CODE_SIZE)); 290 + get_order(KEXEC_CONTROL_CODE_SIZE)); 296 291 if (!image->control_code_page) { 297 292 printk(KERN_ERR "Could not allocate control_code_buffer\n"); 298 293 goto out; 299 294 } 300 295 301 296 result = 0; 302 - out: 303 - if (result == 0) { 297 + out: 298 + if (result == 0) 304 299 *rimage = image; 305 - } else { 300 + else 306 301 kfree(image); 307 - } 302 + 308 303 return result; 309 304 } 310 305 311 - static int kimage_is_destination_range( 312 - struct kimage *image, unsigned long start, unsigned long end) 306 + static int kimage_is_destination_range(struct kimage *image, 307 + unsigned long start, 308 + unsigned long end) 313 309 { 314 310 unsigned long i; 315 311 316 312 for (i = 0; i < image->nr_segments; i++) { 317 313 unsigned long mstart, mend; 314 + 318 315 mstart = image->segment[i].mem; 319 - mend = mstart + image->segment[i].memsz; 320 - if ((end > mstart) && (start < mend)) { 316 + mend = mstart + image->segment[i].memsz; 317 + if ((end > mstart) && (start < mend)) 321 318 return 1; 322 - } 323 319 } 320 + 324 321 return 0; 325 322 } 326 323 327 - static struct page *kimage_alloc_pages(unsigned int gfp_mask, unsigned int order) 324 + static struct page *kimage_alloc_pages(unsigned int gfp_mask, 325 + unsigned int order) 328 326 { 329 327 struct page *pages; 328 + 330 329 pages = alloc_pages(gfp_mask, order); 331 330 if (pages) { 332 331 unsigned int count, i; 333 332 pages->mapping = NULL; 334 333 pages->private = order; 335 334 count = 1 << order; 336 - for(i = 0; i < count; i++) { 335 + for (i = 0; i < count; i++) 337 336 SetPageReserved(pages + i); 338 - } 339 337 } 338 + 340 339 return pages; 341 340 } 342 341 343 342 static void kimage_free_pages(struct page *page) 344 343 { 345 344 unsigned int order, count, i; 345 + 346 346 order = page->private; 347 347 count = 1 << order; 348 - for(i = 0; i < count; i++) { 348 + for (i = 0; i < count; i++) 349 349 ClearPageReserved(page + i); 350 - } 351 350 __free_pages(page, order); 352 351 } 353 352 354 353 static void kimage_free_page_list(struct list_head *list) 355 354 { 356 355 struct list_head *pos, *next; 356 + 357 357 list_for_each_safe(pos, next, list) { 358 358 struct page *page; 359 359 360 360 page = list_entry(pos, struct page, lru); 361 361 list_del(&page->lru); 362 - 363 362 kimage_free_pages(page); 364 363 } 365 364 } 366 365 367 - static struct page *kimage_alloc_normal_control_pages( 368 - struct kimage *image, unsigned int order) 366 + static struct page *kimage_alloc_normal_control_pages(struct kimage *image, 367 + unsigned int order) 369 368 { 370 369 /* Control pages are special, they are the intermediaries 371 370 * that are needed while we copy the rest of the pages ··· 396 387 */ 397 388 do { 398 389 unsigned long pfn, epfn, addr, eaddr; 390 + 399 391 pages = kimage_alloc_pages(GFP_KERNEL, order); 400 392 if (!pages) 401 393 break; ··· 405 395 addr = pfn << PAGE_SHIFT; 406 396 eaddr = epfn << PAGE_SHIFT; 407 397 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) || 408 - kimage_is_destination_range(image, addr, eaddr)) 409 - { 398 + kimage_is_destination_range(image, addr, eaddr)) { 410 399 list_add(&pages->lru, &extra_pages); 411 400 pages = NULL; 412 401 } 413 - } while(!pages); 402 + } while (!pages); 403 + 414 404 if (pages) { 415 405 /* Remember the allocated page... */ 416 406 list_add(&pages->lru, &image->control_pages); ··· 430 420 * For now it is simpler to just free the pages. 431 421 */ 432 422 kimage_free_page_list(&extra_pages); 433 - return pages; 434 423 424 + return pages; 435 425 } 436 426 437 - static struct page *kimage_alloc_crash_control_pages( 438 - struct kimage *image, unsigned int order) 427 + static struct page *kimage_alloc_crash_control_pages(struct kimage *image, 428 + unsigned int order) 439 429 { 440 430 /* Control pages are special, they are the intermediaries 441 431 * that are needed while we copy the rest of the pages ··· 460 450 */ 461 451 unsigned long hole_start, hole_end, size; 462 452 struct page *pages; 453 + 463 454 pages = NULL; 464 455 size = (1 << order) << PAGE_SHIFT; 465 456 hole_start = (image->control_page + (size - 1)) & ~(size - 1); 466 457 hole_end = hole_start + size - 1; 467 - while(hole_end <= crashk_res.end) { 458 + while (hole_end <= crashk_res.end) { 468 459 unsigned long i; 469 - if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT) { 460 + 461 + if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT) 470 462 break; 471 - } 472 - if (hole_end > crashk_res.end) { 463 + if (hole_end > crashk_res.end) 473 464 break; 474 - } 475 465 /* See if I overlap any of the segments */ 476 - for(i = 0; i < image->nr_segments; i++) { 466 + for (i = 0; i < image->nr_segments; i++) { 477 467 unsigned long mstart, mend; 468 + 478 469 mstart = image->segment[i].mem; 479 470 mend = mstart + image->segment[i].memsz - 1; 480 471 if ((hole_end >= mstart) && (hole_start <= mend)) { ··· 491 480 break; 492 481 } 493 482 } 494 - if (pages) { 483 + if (pages) 495 484 image->control_page = hole_end; 496 - } 485 + 497 486 return pages; 498 487 } 499 488 500 489 501 - struct page *kimage_alloc_control_pages( 502 - struct kimage *image, unsigned int order) 490 + struct page *kimage_alloc_control_pages(struct kimage *image, 491 + unsigned int order) 503 492 { 504 493 struct page *pages = NULL; 505 - switch(image->type) { 494 + 495 + switch (image->type) { 506 496 case KEXEC_TYPE_DEFAULT: 507 497 pages = kimage_alloc_normal_control_pages(image, order); 508 498 break; ··· 511 499 pages = kimage_alloc_crash_control_pages(image, order); 512 500 break; 513 501 } 502 + 514 503 return pages; 515 504 } 516 505 517 506 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) 518 507 { 519 - if (*image->entry != 0) { 508 + if (*image->entry != 0) 520 509 image->entry++; 521 - } 510 + 522 511 if (image->entry == image->last_entry) { 523 512 kimage_entry_t *ind_page; 524 513 struct page *page; 514 + 525 515 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); 526 - if (!page) { 516 + if (!page) 527 517 return -ENOMEM; 528 - } 518 + 529 519 ind_page = page_address(page); 530 520 *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION; 531 521 image->entry = ind_page; 532 - image->last_entry = 533 - ind_page + ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); 522 + image->last_entry = ind_page + 523 + ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); 534 524 } 535 525 *image->entry = entry; 536 526 image->entry++; 537 527 *image->entry = 0; 528 + 538 529 return 0; 539 530 } 540 531 541 - static int kimage_set_destination( 542 - struct kimage *image, unsigned long destination) 532 + static int kimage_set_destination(struct kimage *image, 533 + unsigned long destination) 543 534 { 544 535 int result; 545 536 546 537 destination &= PAGE_MASK; 547 538 result = kimage_add_entry(image, destination | IND_DESTINATION); 548 - if (result == 0) { 539 + if (result == 0) 549 540 image->destination = destination; 550 - } 541 + 551 542 return result; 552 543 } 553 544 ··· 561 546 562 547 page &= PAGE_MASK; 563 548 result = kimage_add_entry(image, page | IND_SOURCE); 564 - if (result == 0) { 549 + if (result == 0) 565 550 image->destination += PAGE_SIZE; 566 - } 551 + 567 552 return result; 568 553 } 569 554 ··· 579 564 } 580 565 static int kimage_terminate(struct kimage *image) 581 566 { 582 - if (*image->entry != 0) { 567 + if (*image->entry != 0) 583 568 image->entry++; 584 - } 569 + 585 570 *image->entry = IND_DONE; 571 + 586 572 return 0; 587 573 } 588 574 ··· 607 591 608 592 if (!image) 609 593 return; 594 + 610 595 kimage_free_extra_pages(image); 611 596 for_each_kimage_entry(image, ptr, entry) { 612 597 if (entry & IND_INDIRECTION) { 613 598 /* Free the previous indirection page */ 614 - if (ind & IND_INDIRECTION) { 599 + if (ind & IND_INDIRECTION) 615 600 kimage_free_entry(ind); 616 - } 617 601 /* Save this indirection page until we are 618 602 * done with it. 619 603 */ 620 604 ind = entry; 621 605 } 622 - else if (entry & IND_SOURCE) { 606 + else if (entry & IND_SOURCE) 623 607 kimage_free_entry(entry); 624 - } 625 608 } 626 609 /* Free the final indirection page */ 627 - if (ind & IND_INDIRECTION) { 610 + if (ind & IND_INDIRECTION) 628 611 kimage_free_entry(ind); 629 - } 630 612 631 613 /* Handle any machine specific cleanup */ 632 614 machine_kexec_cleanup(image); ··· 634 620 kfree(image); 635 621 } 636 622 637 - static kimage_entry_t *kimage_dst_used(struct kimage *image, unsigned long page) 623 + static kimage_entry_t *kimage_dst_used(struct kimage *image, 624 + unsigned long page) 638 625 { 639 626 kimage_entry_t *ptr, entry; 640 627 unsigned long destination = 0; 641 628 642 629 for_each_kimage_entry(image, ptr, entry) { 643 - if (entry & IND_DESTINATION) { 630 + if (entry & IND_DESTINATION) 644 631 destination = entry & PAGE_MASK; 645 - } 646 632 else if (entry & IND_SOURCE) { 647 - if (page == destination) { 633 + if (page == destination) 648 634 return ptr; 649 - } 650 635 destination += PAGE_SIZE; 651 636 } 652 637 } 638 + 653 639 return 0; 654 640 } 655 641 656 - static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mask, unsigned long destination) 642 + static struct page *kimage_alloc_page(struct kimage *image, 643 + unsigned int gfp_mask, 644 + unsigned long destination) 657 645 { 658 646 /* 659 647 * Here we implement safeguards to ensure that a source page ··· 695 679 696 680 /* Allocate a page, if we run out of memory give up */ 697 681 page = kimage_alloc_pages(gfp_mask, 0); 698 - if (!page) { 682 + if (!page) 699 683 return 0; 700 - } 701 684 /* If the page cannot be used file it away */ 702 - if (page_to_pfn(page) > (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { 685 + if (page_to_pfn(page) > 686 + (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { 703 687 list_add(&page->lru, &image->unuseable_pages); 704 688 continue; 705 689 } ··· 710 694 break; 711 695 712 696 /* If the page is not a destination page use it */ 713 - if (!kimage_is_destination_range(image, addr, addr + PAGE_SIZE)) 697 + if (!kimage_is_destination_range(image, addr, 698 + addr + PAGE_SIZE)) 714 699 break; 715 700 716 701 /* ··· 744 727 list_add(&page->lru, &image->dest_pages); 745 728 } 746 729 } 730 + 747 731 return page; 748 732 } 749 733 750 734 static int kimage_load_normal_segment(struct kimage *image, 751 - struct kexec_segment *segment) 735 + struct kexec_segment *segment) 752 736 { 753 737 unsigned long maddr; 754 738 unsigned long ubytes, mbytes; ··· 763 745 maddr = segment->mem; 764 746 765 747 result = kimage_set_destination(image, maddr); 766 - if (result < 0) { 748 + if (result < 0) 767 749 goto out; 768 - } 769 - while(mbytes) { 750 + 751 + while (mbytes) { 770 752 struct page *page; 771 753 char *ptr; 772 754 size_t uchunk, mchunk; 755 + 773 756 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); 774 757 if (page == 0) { 775 758 result = -ENOMEM; 776 759 goto out; 777 760 } 778 - result = kimage_add_page(image, page_to_pfn(page) << PAGE_SHIFT); 779 - if (result < 0) { 761 + result = kimage_add_page(image, page_to_pfn(page) 762 + << PAGE_SHIFT); 763 + if (result < 0) 780 764 goto out; 781 - } 765 + 782 766 ptr = kmap(page); 783 767 /* Start with a clear page */ 784 768 memset(ptr, 0, PAGE_SIZE); 785 769 ptr += maddr & ~PAGE_MASK; 786 770 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); 787 - if (mchunk > mbytes) { 771 + if (mchunk > mbytes) 788 772 mchunk = mbytes; 789 - } 773 + 790 774 uchunk = mchunk; 791 - if (uchunk > ubytes) { 775 + if (uchunk > ubytes) 792 776 uchunk = ubytes; 793 - } 777 + 794 778 result = copy_from_user(ptr, buf, uchunk); 795 779 kunmap(page); 796 780 if (result) { ··· 804 784 buf += mchunk; 805 785 mbytes -= mchunk; 806 786 } 807 - out: 787 + out: 808 788 return result; 809 789 } 810 790 811 791 static int kimage_load_crash_segment(struct kimage *image, 812 - struct kexec_segment *segment) 792 + struct kexec_segment *segment) 813 793 { 814 794 /* For crash dumps kernels we simply copy the data from 815 795 * user space to it's destination. ··· 825 805 ubytes = segment->bufsz; 826 806 mbytes = segment->memsz; 827 807 maddr = segment->mem; 828 - while(mbytes) { 808 + while (mbytes) { 829 809 struct page *page; 830 810 char *ptr; 831 811 size_t uchunk, mchunk; 812 + 832 813 page = pfn_to_page(maddr >> PAGE_SHIFT); 833 814 if (page == 0) { 834 815 result = -ENOMEM; ··· 838 817 ptr = kmap(page); 839 818 ptr += maddr & ~PAGE_MASK; 840 819 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); 841 - if (mchunk > mbytes) { 820 + if (mchunk > mbytes) 842 821 mchunk = mbytes; 843 - } 822 + 844 823 uchunk = mchunk; 845 824 if (uchunk > ubytes) { 846 825 uchunk = ubytes; ··· 858 837 buf += mchunk; 859 838 mbytes -= mchunk; 860 839 } 861 - out: 840 + out: 862 841 return result; 863 842 } 864 843 865 844 static int kimage_load_segment(struct kimage *image, 866 - struct kexec_segment *segment) 845 + struct kexec_segment *segment) 867 846 { 868 847 int result = -ENOMEM; 869 - switch(image->type) { 848 + 849 + switch (image->type) { 870 850 case KEXEC_TYPE_DEFAULT: 871 851 result = kimage_load_normal_segment(image, segment); 872 852 break; ··· 875 853 result = kimage_load_crash_segment(image, segment); 876 854 break; 877 855 } 856 + 878 857 return result; 879 858 } 880 859 ··· 908 885 */ 909 886 static int kexec_lock = 0; 910 887 911 - asmlinkage long sys_kexec_load(unsigned long entry, 912 - unsigned long nr_segments, struct kexec_segment __user *segments, 913 - unsigned long flags) 888 + asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, 889 + struct kexec_segment __user *segments, 890 + unsigned long flags) 914 891 { 915 892 struct kimage **dest_image, *image; 916 893 int locked; ··· 930 907 /* Verify we are on the appropriate architecture */ 931 908 if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) && 932 909 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) 933 - { 934 910 return -EINVAL; 935 - } 936 911 937 912 /* Put an artificial cap on the number 938 913 * of segments passed to kexec_load. ··· 950 929 * KISS: always take the mutex. 951 930 */ 952 931 locked = xchg(&kexec_lock, 1); 953 - if (locked) { 932 + if (locked) 954 933 return -EBUSY; 955 - } 934 + 956 935 dest_image = &kexec_image; 957 - if (flags & KEXEC_ON_CRASH) { 936 + if (flags & KEXEC_ON_CRASH) 958 937 dest_image = &kexec_crash_image; 959 - } 960 938 if (nr_segments > 0) { 961 939 unsigned long i; 940 + 962 941 /* Loading another kernel to reboot into */ 963 - if ((flags & KEXEC_ON_CRASH) == 0) { 964 - result = kimage_normal_alloc(&image, entry, nr_segments, segments); 965 - } 942 + if ((flags & KEXEC_ON_CRASH) == 0) 943 + result = kimage_normal_alloc(&image, entry, 944 + nr_segments, segments); 966 945 /* Loading another kernel to switch to if this one crashes */ 967 946 else if (flags & KEXEC_ON_CRASH) { 968 947 /* Free any current crash dump kernel before 969 948 * we corrupt it. 970 949 */ 971 950 kimage_free(xchg(&kexec_crash_image, NULL)); 972 - result = kimage_crash_alloc(&image, entry, nr_segments, segments); 951 + result = kimage_crash_alloc(&image, entry, 952 + nr_segments, segments); 973 953 } 974 - if (result) { 954 + if (result) 975 955 goto out; 976 - } 956 + 977 957 result = machine_kexec_prepare(image); 978 - if (result) { 958 + if (result) 979 959 goto out; 980 - } 981 - for(i = 0; i < nr_segments; i++) { 960 + 961 + for (i = 0; i < nr_segments; i++) { 982 962 result = kimage_load_segment(image, &image->segment[i]); 983 - if (result) { 963 + if (result) 984 964 goto out; 985 - } 986 965 } 987 966 result = kimage_terminate(image); 988 - if (result) { 967 + if (result) 989 968 goto out; 990 - } 991 969 } 992 970 /* Install the new kernel, and Uninstall the old */ 993 971 image = xchg(dest_image, image); 994 972 995 - out: 973 + out: 996 974 xchg(&kexec_lock, 0); /* Release the mutex */ 997 975 kimage_free(image); 976 + 998 977 return result; 999 978 } 1000 979 1001 980 #ifdef CONFIG_COMPAT 1002 981 asmlinkage long compat_sys_kexec_load(unsigned long entry, 1003 - unsigned long nr_segments, struct compat_kexec_segment __user *segments, 1004 - unsigned long flags) 982 + unsigned long nr_segments, 983 + struct compat_kexec_segment __user *segments, 984 + unsigned long flags) 1005 985 { 1006 986 struct compat_kexec_segment in; 1007 987 struct kexec_segment out, __user *ksegments; ··· 1011 989 /* Don't allow clients that don't understand the native 1012 990 * architecture to do anything. 1013 991 */ 1014 - if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) { 992 + if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) 1015 993 return -EINVAL; 1016 - } 1017 994 1018 - if (nr_segments > KEXEC_SEGMENT_MAX) { 995 + if (nr_segments > KEXEC_SEGMENT_MAX) 1019 996 return -EINVAL; 1020 - } 1021 997 1022 998 ksegments = compat_alloc_user_space(nr_segments * sizeof(out)); 1023 999 for (i=0; i < nr_segments; i++) { 1024 1000 result = copy_from_user(&in, &segments[i], sizeof(in)); 1025 - if (result) { 1001 + if (result) 1026 1002 return -EFAULT; 1027 - } 1028 1003 1029 1004 out.buf = compat_ptr(in.buf); 1030 1005 out.bufsz = in.bufsz; ··· 1029 1010 out.memsz = in.memsz; 1030 1011 1031 1012 result = copy_to_user(&ksegments[i], &out, sizeof(out)); 1032 - if (result) { 1013 + if (result) 1033 1014 return -EFAULT; 1034 - } 1035 1015 } 1036 1016 1037 1017 return sys_kexec_load(entry, nr_segments, ksegments, flags);