Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v3.12-rc4 509 lines 13 kB view raw
1/* 2 * linux/arch/arm/kernel/process.c 3 * 4 * Copyright (C) 1996-2000 Russell King - Converted to ARM. 5 * Original Copyright (C) 1995 Linus Torvalds 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11#include <stdarg.h> 12 13#include <linux/export.h> 14#include <linux/sched.h> 15#include <linux/kernel.h> 16#include <linux/mm.h> 17#include <linux/stddef.h> 18#include <linux/unistd.h> 19#include <linux/user.h> 20#include <linux/delay.h> 21#include <linux/reboot.h> 22#include <linux/interrupt.h> 23#include <linux/kallsyms.h> 24#include <linux/init.h> 25#include <linux/cpu.h> 26#include <linux/elfcore.h> 27#include <linux/pm.h> 28#include <linux/tick.h> 29#include <linux/utsname.h> 30#include <linux/uaccess.h> 31#include <linux/random.h> 32#include <linux/hw_breakpoint.h> 33#include <linux/cpuidle.h> 34#include <linux/leds.h> 35#include <linux/reboot.h> 36 37#include <asm/cacheflush.h> 38#include <asm/idmap.h> 39#include <asm/processor.h> 40#include <asm/thread_notify.h> 41#include <asm/stacktrace.h> 42#include <asm/mach/time.h> 43#include <asm/tls.h> 44 45#ifdef CONFIG_CC_STACKPROTECTOR 46#include <linux/stackprotector.h> 47unsigned long __stack_chk_guard __read_mostly; 48EXPORT_SYMBOL(__stack_chk_guard); 49#endif 50 51static const char *processor_modes[] = { 52 "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" , 53 "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26", 54 "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" , 55 "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32" 56}; 57 58static const char *isa_modes[] = { 59 "ARM" , "Thumb" , "Jazelle", "ThumbEE" 60}; 61 62extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); 63typedef void (*phys_reset_t)(unsigned long); 64 65/* 66 * A temporary stack to use for CPU reset. This is static so that we 67 * don't clobber it with the identity mapping. When running with this 68 * stack, any references to the current task *will not work* so you 69 * should really do as little as possible before jumping to your reset 70 * code. 71 */ 72static u64 soft_restart_stack[16]; 73 74static void __soft_restart(void *addr) 75{ 76 phys_reset_t phys_reset; 77 78 /* Take out a flat memory mapping. */ 79 setup_mm_for_reboot(); 80 81 /* Clean and invalidate caches */ 82 flush_cache_all(); 83 84 /* Turn off caching */ 85 cpu_proc_fin(); 86 87 /* Push out any further dirty data, and ensure cache is empty */ 88 flush_cache_all(); 89 90 /* Switch to the identity mapping. */ 91 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); 92 phys_reset((unsigned long)addr); 93 94 /* Should never get here. */ 95 BUG(); 96} 97 98void soft_restart(unsigned long addr) 99{ 100 u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack); 101 102 /* Disable interrupts first */ 103 local_irq_disable(); 104 local_fiq_disable(); 105 106 /* Disable the L2 if we're the last man standing. */ 107 if (num_online_cpus() == 1) 108 outer_disable(); 109 110 /* Change to the new stack and continue with the reset. */ 111 call_with_stack(__soft_restart, (void *)addr, (void *)stack); 112 113 /* Should never get here. */ 114 BUG(); 115} 116 117static void null_restart(enum reboot_mode reboot_mode, const char *cmd) 118{ 119} 120 121/* 122 * Function pointers to optional machine specific functions 123 */ 124void (*pm_power_off)(void); 125EXPORT_SYMBOL(pm_power_off); 126 127void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd) = null_restart; 128EXPORT_SYMBOL_GPL(arm_pm_restart); 129 130/* 131 * This is our default idle handler. 132 */ 133 134void (*arm_pm_idle)(void); 135 136static void default_idle(void) 137{ 138 if (arm_pm_idle) 139 arm_pm_idle(); 140 else 141 cpu_do_idle(); 142 local_irq_enable(); 143} 144 145void arch_cpu_idle_prepare(void) 146{ 147 local_fiq_enable(); 148} 149 150void arch_cpu_idle_enter(void) 151{ 152 ledtrig_cpu(CPU_LED_IDLE_START); 153#ifdef CONFIG_PL310_ERRATA_769419 154 wmb(); 155#endif 156} 157 158void arch_cpu_idle_exit(void) 159{ 160 ledtrig_cpu(CPU_LED_IDLE_END); 161} 162 163#ifdef CONFIG_HOTPLUG_CPU 164void arch_cpu_idle_dead(void) 165{ 166 cpu_die(); 167} 168#endif 169 170/* 171 * Called from the core idle loop. 172 */ 173void arch_cpu_idle(void) 174{ 175 if (cpuidle_idle_call()) 176 default_idle(); 177} 178 179/* 180 * Called by kexec, immediately prior to machine_kexec(). 181 * 182 * This must completely disable all secondary CPUs; simply causing those CPUs 183 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the 184 * kexec'd kernel to use any and all RAM as it sees fit, without having to 185 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug 186 * functionality embodied in disable_nonboot_cpus() to achieve this. 187 */ 188void machine_shutdown(void) 189{ 190 disable_nonboot_cpus(); 191} 192 193/* 194 * Halting simply requires that the secondary CPUs stop performing any 195 * activity (executing tasks, handling interrupts). smp_send_stop() 196 * achieves this. 197 */ 198void machine_halt(void) 199{ 200 local_irq_disable(); 201 smp_send_stop(); 202 203 local_irq_disable(); 204 while (1); 205} 206 207/* 208 * Power-off simply requires that the secondary CPUs stop performing any 209 * activity (executing tasks, handling interrupts). smp_send_stop() 210 * achieves this. When the system power is turned off, it will take all CPUs 211 * with it. 212 */ 213void machine_power_off(void) 214{ 215 local_irq_disable(); 216 smp_send_stop(); 217 218 if (pm_power_off) 219 pm_power_off(); 220} 221 222/* 223 * Restart requires that the secondary CPUs stop performing any activity 224 * while the primary CPU resets the system. Systems with a single CPU can 225 * use soft_restart() as their machine descriptor's .restart hook, since that 226 * will cause the only available CPU to reset. Systems with multiple CPUs must 227 * provide a HW restart implementation, to ensure that all CPUs reset at once. 228 * This is required so that any code running after reset on the primary CPU 229 * doesn't have to co-ordinate with other CPUs to ensure they aren't still 230 * executing pre-reset code, and using RAM that the primary CPU's code wishes 231 * to use. Implementing such co-ordination would be essentially impossible. 232 */ 233void machine_restart(char *cmd) 234{ 235 local_irq_disable(); 236 smp_send_stop(); 237 238 arm_pm_restart(reboot_mode, cmd); 239 240 /* Give a grace period for failure to restart of 1s */ 241 mdelay(1000); 242 243 /* Whoops - the platform was unable to reboot. Tell the user! */ 244 printk("Reboot failed -- System halted\n"); 245 local_irq_disable(); 246 while (1); 247} 248 249void __show_regs(struct pt_regs *regs) 250{ 251 unsigned long flags; 252 char buf[64]; 253 254 show_regs_print_info(KERN_DEFAULT); 255 256 print_symbol("PC is at %s\n", instruction_pointer(regs)); 257 print_symbol("LR is at %s\n", regs->ARM_lr); 258 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" 259 "sp : %08lx ip : %08lx fp : %08lx\n", 260 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr, 261 regs->ARM_sp, regs->ARM_ip, regs->ARM_fp); 262 printk("r10: %08lx r9 : %08lx r8 : %08lx\n", 263 regs->ARM_r10, regs->ARM_r9, 264 regs->ARM_r8); 265 printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", 266 regs->ARM_r7, regs->ARM_r6, 267 regs->ARM_r5, regs->ARM_r4); 268 printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", 269 regs->ARM_r3, regs->ARM_r2, 270 regs->ARM_r1, regs->ARM_r0); 271 272 flags = regs->ARM_cpsr; 273 buf[0] = flags & PSR_N_BIT ? 'N' : 'n'; 274 buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z'; 275 buf[2] = flags & PSR_C_BIT ? 'C' : 'c'; 276 buf[3] = flags & PSR_V_BIT ? 'V' : 'v'; 277 buf[4] = '\0'; 278 279 printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n", 280 buf, interrupts_enabled(regs) ? "n" : "ff", 281 fast_interrupts_enabled(regs) ? "n" : "ff", 282 processor_modes[processor_mode(regs)], 283 isa_modes[isa_mode(regs)], 284 get_fs() == get_ds() ? "kernel" : "user"); 285#ifdef CONFIG_CPU_CP15 286 { 287 unsigned int ctrl; 288 289 buf[0] = '\0'; 290#ifdef CONFIG_CPU_CP15_MMU 291 { 292 unsigned int transbase, dac; 293 asm("mrc p15, 0, %0, c2, c0\n\t" 294 "mrc p15, 0, %1, c3, c0\n" 295 : "=r" (transbase), "=r" (dac)); 296 snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x", 297 transbase, dac); 298 } 299#endif 300 asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl)); 301 302 printk("Control: %08x%s\n", ctrl, buf); 303 } 304#endif 305} 306 307void show_regs(struct pt_regs * regs) 308{ 309 printk("\n"); 310 __show_regs(regs); 311 dump_stack(); 312} 313 314ATOMIC_NOTIFIER_HEAD(thread_notify_head); 315 316EXPORT_SYMBOL_GPL(thread_notify_head); 317 318/* 319 * Free current thread data structures etc.. 320 */ 321void exit_thread(void) 322{ 323 thread_notify(THREAD_NOTIFY_EXIT, current_thread_info()); 324} 325 326void flush_thread(void) 327{ 328 struct thread_info *thread = current_thread_info(); 329 struct task_struct *tsk = current; 330 331 flush_ptrace_hw_breakpoint(tsk); 332 333 memset(thread->used_cp, 0, sizeof(thread->used_cp)); 334 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 335 memset(&thread->fpstate, 0, sizeof(union fp_state)); 336 337 thread_notify(THREAD_NOTIFY_FLUSH, thread); 338} 339 340void release_thread(struct task_struct *dead_task) 341{ 342} 343 344asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 345 346int 347copy_thread(unsigned long clone_flags, unsigned long stack_start, 348 unsigned long stk_sz, struct task_struct *p) 349{ 350 struct thread_info *thread = task_thread_info(p); 351 struct pt_regs *childregs = task_pt_regs(p); 352 353 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); 354 355 if (likely(!(p->flags & PF_KTHREAD))) { 356 *childregs = *current_pt_regs(); 357 childregs->ARM_r0 = 0; 358 if (stack_start) 359 childregs->ARM_sp = stack_start; 360 } else { 361 memset(childregs, 0, sizeof(struct pt_regs)); 362 thread->cpu_context.r4 = stk_sz; 363 thread->cpu_context.r5 = stack_start; 364 childregs->ARM_cpsr = SVC_MODE; 365 } 366 thread->cpu_context.pc = (unsigned long)ret_from_fork; 367 thread->cpu_context.sp = (unsigned long)childregs; 368 369 clear_ptrace_hw_breakpoint(p); 370 371 if (clone_flags & CLONE_SETTLS) 372 thread->tp_value[0] = childregs->ARM_r3; 373 thread->tp_value[1] = get_tpuser(); 374 375 thread_notify(THREAD_NOTIFY_COPY, thread); 376 377 return 0; 378} 379 380/* 381 * Fill in the task's elfregs structure for a core dump. 382 */ 383int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs) 384{ 385 elf_core_copy_regs(elfregs, task_pt_regs(t)); 386 return 1; 387} 388 389/* 390 * fill in the fpe structure for a core dump... 391 */ 392int dump_fpu (struct pt_regs *regs, struct user_fp *fp) 393{ 394 struct thread_info *thread = current_thread_info(); 395 int used_math = thread->used_cp[1] | thread->used_cp[2]; 396 397 if (used_math) 398 memcpy(fp, &thread->fpstate.soft, sizeof (*fp)); 399 400 return used_math != 0; 401} 402EXPORT_SYMBOL(dump_fpu); 403 404unsigned long get_wchan(struct task_struct *p) 405{ 406 struct stackframe frame; 407 int count = 0; 408 if (!p || p == current || p->state == TASK_RUNNING) 409 return 0; 410 411 frame.fp = thread_saved_fp(p); 412 frame.sp = thread_saved_sp(p); 413 frame.lr = 0; /* recovered from the stack */ 414 frame.pc = thread_saved_pc(p); 415 do { 416 int ret = unwind_frame(&frame); 417 if (ret < 0) 418 return 0; 419 if (!in_sched_functions(frame.pc)) 420 return frame.pc; 421 } while (count ++ < 16); 422 return 0; 423} 424 425unsigned long arch_randomize_brk(struct mm_struct *mm) 426{ 427 unsigned long range_end = mm->brk + 0x02000000; 428 return randomize_range(mm->brk, range_end, 0) ? : mm->brk; 429} 430 431#ifdef CONFIG_MMU 432#ifdef CONFIG_KUSER_HELPERS 433/* 434 * The vectors page is always readable from user space for the 435 * atomic helpers. Insert it into the gate_vma so that it is visible 436 * through ptrace and /proc/<pid>/mem. 437 */ 438static struct vm_area_struct gate_vma = { 439 .vm_start = 0xffff0000, 440 .vm_end = 0xffff0000 + PAGE_SIZE, 441 .vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC, 442}; 443 444static int __init gate_vma_init(void) 445{ 446 gate_vma.vm_page_prot = PAGE_READONLY_EXEC; 447 return 0; 448} 449arch_initcall(gate_vma_init); 450 451struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 452{ 453 return &gate_vma; 454} 455 456int in_gate_area(struct mm_struct *mm, unsigned long addr) 457{ 458 return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end); 459} 460 461int in_gate_area_no_mm(unsigned long addr) 462{ 463 return in_gate_area(NULL, addr); 464} 465#define is_gate_vma(vma) ((vma) == &gate_vma) 466#else 467#define is_gate_vma(vma) 0 468#endif 469 470const char *arch_vma_name(struct vm_area_struct *vma) 471{ 472 return is_gate_vma(vma) ? "[vectors]" : 473 (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ? 474 "[sigpage]" : NULL; 475} 476 477static struct page *signal_page; 478extern struct page *get_signal_page(void); 479 480int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 481{ 482 struct mm_struct *mm = current->mm; 483 unsigned long addr; 484 int ret; 485 486 if (!signal_page) 487 signal_page = get_signal_page(); 488 if (!signal_page) 489 return -ENOMEM; 490 491 down_write(&mm->mmap_sem); 492 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); 493 if (IS_ERR_VALUE(addr)) { 494 ret = addr; 495 goto up_fail; 496 } 497 498 ret = install_special_mapping(mm, addr, PAGE_SIZE, 499 VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, 500 &signal_page); 501 502 if (ret == 0) 503 mm->context.sigpage = addr; 504 505 up_fail: 506 up_write(&mm->mmap_sem); 507 return ret; 508} 509#endif