powerpc: make ARCH=ppc use arch/powerpc/kernel/process.c

Commit 5388fb1025443ec223ba556b10efc4c5f83f8682 made signal_32.c
use discard_lazy_cpu_state, which broke ARCH=ppc because that
uses the common signal_32.c but has its own process.c. Make ARCH=ppc
use the common process.c to fix this and to reduce the amount
of duplicated code.

Signed-off-by: Paul Mackerras <paulus@samba.org>

+15 -857
+3 -3
arch/powerpc/kernel/Makefile
··· 11 11 endif 12 12 13 13 obj-y := semaphore.o cputable.o ptrace.o syscalls.o \ 14 - irq.o align.o signal_32.o pmc.o vdso.o 14 + irq.o align.o signal_32.o pmc.o vdso.o \ 15 + init_task.o process.o 15 16 obj-y += vdso32/ 16 17 obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \ 17 18 signal_64.o ptrace32.o systbl.o \ ··· 45 44 extra-$(CONFIG_8xx) := head_8xx.o 46 45 extra-y += vmlinux.lds 47 46 48 - obj-y += process.o init_task.o time.o \ 49 - prom.o traps.o setup-common.o udbg.o 47 + obj-y += time.o prom.o traps.o setup-common.o udbg.o 50 48 obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o systbl.o 51 49 obj-$(CONFIG_PPC64) += misc_64.o dma_64.o iommu.o 52 50 obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
+2
arch/powerpc/kernel/process.c
··· 223 223 } 224 224 #endif /* CONFIG_SMP */ 225 225 226 + #ifdef CONFIG_PPC_MERGE /* XXX for now */ 226 227 int set_dabr(unsigned long dabr) 227 228 { 228 229 if (ppc_md.set_dabr) ··· 232 231 mtspr(SPRN_DABR, dabr); 233 232 return 0; 234 233 } 234 + #endif 235 235 236 236 #ifdef CONFIG_PPC64 237 237 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
-1
arch/ppc/kernel/Makefile
··· 13 13 extra-y += vmlinux.lds 14 14 15 15 obj-y := entry.o traps.o idle.o time.o misc.o \ 16 - process.o \ 17 16 setup.o \ 18 17 ppc_htab.o 19 18 obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
-851
arch/ppc/kernel/process.c
··· 1 - /* 2 - * arch/ppc/kernel/process.c 3 - * 4 - * Derived from "arch/i386/kernel/process.c" 5 - * Copyright (C) 1995 Linus Torvalds 6 - * 7 - * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and 8 - * Paul Mackerras (paulus@cs.anu.edu.au) 9 - * 10 - * PowerPC version 11 - * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 12 - * 13 - * This program is free software; you can redistribute it and/or 14 - * modify it under the terms of the GNU General Public License 15 - * as published by the Free Software Foundation; either version 16 - * 2 of the License, or (at your option) any later version. 17 - * 18 - */ 19 - 20 - #include <linux/config.h> 21 - #include <linux/errno.h> 22 - #include <linux/sched.h> 23 - #include <linux/kernel.h> 24 - #include <linux/mm.h> 25 - #include <linux/smp.h> 26 - #include <linux/smp_lock.h> 27 - #include <linux/stddef.h> 28 - #include <linux/unistd.h> 29 - #include <linux/ptrace.h> 30 - #include <linux/slab.h> 31 - #include <linux/user.h> 32 - #include <linux/elf.h> 33 - #include <linux/init.h> 34 - #include <linux/prctl.h> 35 - #include <linux/init_task.h> 36 - #include <linux/module.h> 37 - #include <linux/kallsyms.h> 38 - #include <linux/mqueue.h> 39 - #include <linux/hardirq.h> 40 - 41 - #include <asm/pgtable.h> 42 - #include <asm/uaccess.h> 43 - #include <asm/system.h> 44 - #include <asm/io.h> 45 - #include <asm/processor.h> 46 - #include <asm/mmu.h> 47 - #include <asm/prom.h> 48 - 49 - extern unsigned long _get_SP(void); 50 - 51 - struct task_struct *last_task_used_math = NULL; 52 - struct task_struct *last_task_used_altivec = NULL; 53 - struct task_struct *last_task_used_spe = NULL; 54 - 55 - static struct fs_struct init_fs = INIT_FS; 56 - static struct files_struct init_files = INIT_FILES; 57 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 58 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 59 - struct mm_struct init_mm = INIT_MM(init_mm); 60 - EXPORT_SYMBOL(init_mm); 61 - 62 - /* this is 8kB-aligned so we can get to the thread_info struct 63 - at the base of it from the stack pointer with 1 integer instruction. */ 64 - union thread_union init_thread_union 65 - __attribute__((__section__(".data.init_task"))) = 66 - { INIT_THREAD_INFO(init_task) }; 67 - 68 - /* initial task structure */ 69 - struct task_struct init_task = INIT_TASK(init_task); 70 - EXPORT_SYMBOL(init_task); 71 - 72 - /* only used to get secondary processor up */ 73 - struct task_struct *current_set[NR_CPUS] = {&init_task, }; 74 - 75 - #undef SHOW_TASK_SWITCHES 76 - #undef CHECK_STACK 77 - 78 - #if defined(CHECK_STACK) 79 - unsigned long 80 - kernel_stack_top(struct task_struct *tsk) 81 - { 82 - return ((unsigned long)tsk) + sizeof(union task_union); 83 - } 84 - 85 - unsigned long 86 - task_top(struct task_struct *tsk) 87 - { 88 - return ((unsigned long)tsk) + sizeof(struct thread_info); 89 - } 90 - 91 - /* check to make sure the kernel stack is healthy */ 92 - int check_stack(struct task_struct *tsk) 93 - { 94 - unsigned long stack_top = kernel_stack_top(tsk); 95 - unsigned long tsk_top = task_top(tsk); 96 - int ret = 0; 97 - 98 - #if 0 99 - /* check thread magic */ 100 - if ( tsk->thread.magic != THREAD_MAGIC ) 101 - { 102 - ret |= 1; 103 - printk("thread.magic bad: %08x\n", tsk->thread.magic); 104 - } 105 - #endif 106 - 107 - if ( !tsk ) 108 - printk("check_stack(): tsk bad tsk %p\n",tsk); 109 - 110 - /* check if stored ksp is bad */ 111 - if ( (tsk->thread.ksp > stack_top) || (tsk->thread.ksp < tsk_top) ) 112 - { 113 - printk("stack out of bounds: %s/%d\n" 114 - " tsk_top %08lx ksp %08lx stack_top %08lx\n", 115 - tsk->comm,tsk->pid, 116 - tsk_top, tsk->thread.ksp, stack_top); 117 - ret |= 2; 118 - } 119 - 120 - /* check if stack ptr RIGHT NOW is bad */ 121 - if ( (tsk == current) && ((_get_SP() > stack_top ) || (_get_SP() < tsk_top)) ) 122 - { 123 - printk("current stack ptr out of bounds: %s/%d\n" 124 - " tsk_top %08lx sp %08lx stack_top %08lx\n", 125 - current->comm,current->pid, 126 - tsk_top, _get_SP(), stack_top); 127 - ret |= 4; 128 - } 129 - 130 - #if 0 131 - /* check amount of free stack */ 132 - for ( i = (unsigned long *)task_top(tsk) ; i < kernel_stack_top(tsk) ; i++ ) 133 - { 134 - if ( !i ) 135 - printk("check_stack(): i = %p\n", i); 136 - if ( *i != 0 ) 137 - { 138 - /* only notify if it's less than 900 bytes */ 139 - if ( (i - (unsigned long *)task_top(tsk)) < 900 ) 140 - printk("%d bytes free on stack\n", 141 - i - task_top(tsk)); 142 - break; 143 - } 144 - } 145 - #endif 146 - 147 - if (ret) 148 - { 149 - panic("bad kernel stack"); 150 - } 151 - return(ret); 152 - } 153 - #endif /* defined(CHECK_STACK) */ 154 - 155 - /* 156 - * Make sure the floating-point register state in the 157 - * the thread_struct is up to date for task tsk. 158 - */ 159 - void flush_fp_to_thread(struct task_struct *tsk) 160 - { 161 - if (tsk->thread.regs) { 162 - /* 163 - * We need to disable preemption here because if we didn't, 164 - * another process could get scheduled after the regs->msr 165 - * test but before we have finished saving the FP registers 166 - * to the thread_struct. That process could take over the 167 - * FPU, and then when we get scheduled again we would store 168 - * bogus values for the remaining FP registers. 169 - */ 170 - preempt_disable(); 171 - if (tsk->thread.regs->msr & MSR_FP) { 172 - #ifdef CONFIG_SMP 173 - /* 174 - * This should only ever be called for current or 175 - * for a stopped child process. Since we save away 176 - * the FP register state on context switch on SMP, 177 - * there is something wrong if a stopped child appears 178 - * to still have its FP state in the CPU registers. 179 - */ 180 - BUG_ON(tsk != current); 181 - #endif 182 - giveup_fpu(current); 183 - } 184 - preempt_enable(); 185 - } 186 - } 187 - 188 - void enable_kernel_fp(void) 189 - { 190 - WARN_ON(preemptible()); 191 - 192 - #ifdef CONFIG_SMP 193 - if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) 194 - giveup_fpu(current); 195 - else 196 - giveup_fpu(NULL); /* just enables FP for kernel */ 197 - #else 198 - giveup_fpu(last_task_used_math); 199 - #endif /* CONFIG_SMP */ 200 - } 201 - EXPORT_SYMBOL(enable_kernel_fp); 202 - 203 - int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs) 204 - { 205 - preempt_disable(); 206 - if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP)) 207 - giveup_fpu(tsk); 208 - preempt_enable(); 209 - memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs)); 210 - return 1; 211 - } 212 - 213 - #ifdef CONFIG_ALTIVEC 214 - void enable_kernel_altivec(void) 215 - { 216 - WARN_ON(preemptible()); 217 - 218 - #ifdef CONFIG_SMP 219 - if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) 220 - giveup_altivec(current); 221 - else 222 - giveup_altivec(NULL); /* just enable AltiVec for kernel - force */ 223 - #else 224 - giveup_altivec(last_task_used_altivec); 225 - #endif /* __SMP __ */ 226 - } 227 - EXPORT_SYMBOL(enable_kernel_altivec); 228 - 229 - /* 230 - * Make sure the VMX/Altivec register state in the 231 - * the thread_struct is up to date for task tsk. 232 - */ 233 - void flush_altivec_to_thread(struct task_struct *tsk) 234 - { 235 - if (tsk->thread.regs) { 236 - preempt_disable(); 237 - if (tsk->thread.regs->msr & MSR_VEC) { 238 - #ifdef CONFIG_SMP 239 - BUG_ON(tsk != current); 240 - #endif 241 - giveup_altivec(current); 242 - } 243 - preempt_enable(); 244 - } 245 - } 246 - 247 - int dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs) 248 - { 249 - if (regs->msr & MSR_VEC) 250 - giveup_altivec(current); 251 - memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs)); 252 - return 1; 253 - } 254 - #endif /* CONFIG_ALTIVEC */ 255 - 256 - #ifdef CONFIG_SPE 257 - void 258 - enable_kernel_spe(void) 259 - { 260 - WARN_ON(preemptible()); 261 - 262 - #ifdef CONFIG_SMP 263 - if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) 264 - giveup_spe(current); 265 - else 266 - giveup_spe(NULL); /* just enable SPE for kernel - force */ 267 - #else 268 - giveup_spe(last_task_used_spe); 269 - #endif /* __SMP __ */ 270 - } 271 - EXPORT_SYMBOL(enable_kernel_spe); 272 - 273 - void flush_spe_to_thread(struct task_struct *tsk) 274 - { 275 - if (tsk->thread.regs) { 276 - preempt_disable(); 277 - if (tsk->thread.regs->msr & MSR_SPE) { 278 - #ifdef CONFIG_SMP 279 - BUG_ON(tsk != current); 280 - #endif 281 - giveup_spe(current); 282 - } 283 - preempt_enable(); 284 - } 285 - } 286 - 287 - int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs) 288 - { 289 - if (regs->msr & MSR_SPE) 290 - giveup_spe(current); 291 - /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */ 292 - memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35); 293 - return 1; 294 - } 295 - #endif /* CONFIG_SPE */ 296 - 297 - struct task_struct *__switch_to(struct task_struct *prev, 298 - struct task_struct *new) 299 - { 300 - struct thread_struct *new_thread, *old_thread; 301 - unsigned long s; 302 - struct task_struct *last; 303 - 304 - local_irq_save(s); 305 - #ifdef CHECK_STACK 306 - check_stack(prev); 307 - check_stack(new); 308 - #endif 309 - 310 - #ifdef CONFIG_SMP 311 - /* avoid complexity of lazy save/restore of fpu 312 - * by just saving it every time we switch out if 313 - * this task used the fpu during the last quantum. 314 - * 315 - * If it tries to use the fpu again, it'll trap and 316 - * reload its fp regs. So we don't have to do a restore 317 - * every switch, just a save. 318 - * -- Cort 319 - */ 320 - if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) 321 - giveup_fpu(prev); 322 - #ifdef CONFIG_ALTIVEC 323 - /* 324 - * If the previous thread used altivec in the last quantum 325 - * (thus changing altivec regs) then save them. 326 - * We used to check the VRSAVE register but not all apps 327 - * set it, so we don't rely on it now (and in fact we need 328 - * to save & restore VSCR even if VRSAVE == 0). -- paulus 329 - * 330 - * On SMP we always save/restore altivec regs just to avoid the 331 - * complexity of changing processors. 332 - * -- Cort 333 - */ 334 - if ((prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))) 335 - giveup_altivec(prev); 336 - #endif /* CONFIG_ALTIVEC */ 337 - #ifdef CONFIG_SPE 338 - /* 339 - * If the previous thread used spe in the last quantum 340 - * (thus changing spe regs) then save them. 341 - * 342 - * On SMP we always save/restore spe regs just to avoid the 343 - * complexity of changing processors. 344 - */ 345 - if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) 346 - giveup_spe(prev); 347 - #endif /* CONFIG_SPE */ 348 - #endif /* CONFIG_SMP */ 349 - 350 - #ifdef CONFIG_ALTIVEC 351 - /* Avoid the trap. On smp this this never happens since 352 - * we don't set last_task_used_altivec -- Cort 353 - */ 354 - if (new->thread.regs && last_task_used_altivec == new) 355 - new->thread.regs->msr |= MSR_VEC; 356 - #endif 357 - #ifdef CONFIG_SPE 358 - /* Avoid the trap. On smp this this never happens since 359 - * we don't set last_task_used_spe 360 - */ 361 - if (new->thread.regs && last_task_used_spe == new) 362 - new->thread.regs->msr |= MSR_SPE; 363 - #endif /* CONFIG_SPE */ 364 - new_thread = &new->thread; 365 - old_thread = &current->thread; 366 - last = _switch(old_thread, new_thread); 367 - local_irq_restore(s); 368 - return last; 369 - } 370 - 371 - void show_regs(struct pt_regs * regs) 372 - { 373 - int i, trap; 374 - 375 - printk("NIP: %08lX LR: %08lX SP: %08lX REGS: %p TRAP: %04lx %s\n", 376 - regs->nip, regs->link, regs->gpr[1], regs, regs->trap, 377 - print_tainted()); 378 - printk("MSR: %08lx EE: %01x PR: %01x FP: %01x ME: %01x IR/DR: %01x%01x\n", 379 - regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0, 380 - regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0, 381 - regs->msr&MSR_IR ? 1 : 0, 382 - regs->msr&MSR_DR ? 1 : 0); 383 - trap = TRAP(regs); 384 - if (trap == 0x300 || trap == 0x600) 385 - printk("DAR: %08lX, DSISR: %08lX\n", regs->dar, regs->dsisr); 386 - printk("TASK = %p[%d] '%s' THREAD: %p\n", 387 - current, current->pid, current->comm, current->thread_info); 388 - printk("Last syscall: %ld ", current->thread.last_syscall); 389 - 390 - #ifdef CONFIG_SMP 391 - printk(" CPU: %d", smp_processor_id()); 392 - #endif /* CONFIG_SMP */ 393 - 394 - for (i = 0; i < 32; i++) { 395 - long r; 396 - if ((i % 8) == 0) 397 - printk("\n" KERN_INFO "GPR%02d: ", i); 398 - if (__get_user(r, &regs->gpr[i])) 399 - break; 400 - printk("%08lX ", r); 401 - if (i == 12 && !FULL_REGS(regs)) 402 - break; 403 - } 404 - printk("\n"); 405 - #ifdef CONFIG_KALLSYMS 406 - /* 407 - * Lookup NIP late so we have the best change of getting the 408 - * above info out without failing 409 - */ 410 - printk("NIP [%08lx] ", regs->nip); 411 - print_symbol("%s\n", regs->nip); 412 - printk("LR [%08lx] ", regs->link); 413 - print_symbol("%s\n", regs->link); 414 - #endif 415 - show_stack(current, (unsigned long *) regs->gpr[1]); 416 - } 417 - 418 - void exit_thread(void) 419 - { 420 - preempt_disable(); 421 - if (last_task_used_math == current) 422 - last_task_used_math = NULL; 423 - if (last_task_used_altivec == current) 424 - last_task_used_altivec = NULL; 425 - #ifdef CONFIG_SPE 426 - if (last_task_used_spe == current) 427 - last_task_used_spe = NULL; 428 - #endif 429 - preempt_enable(); 430 - } 431 - 432 - void flush_thread(void) 433 - { 434 - preempt_disable(); 435 - if (last_task_used_math == current) 436 - last_task_used_math = NULL; 437 - if (last_task_used_altivec == current) 438 - last_task_used_altivec = NULL; 439 - #ifdef CONFIG_SPE 440 - if (last_task_used_spe == current) 441 - last_task_used_spe = NULL; 442 - #endif 443 - preempt_enable(); 444 - } 445 - 446 - void 447 - release_thread(struct task_struct *t) 448 - { 449 - } 450 - 451 - /* 452 - * This gets called before we allocate a new thread and copy 453 - * the current task into it. 454 - */ 455 - void prepare_to_copy(struct task_struct *tsk) 456 - { 457 - struct pt_regs *regs = tsk->thread.regs; 458 - 459 - if (regs == NULL) 460 - return; 461 - preempt_disable(); 462 - if (regs->msr & MSR_FP) 463 - giveup_fpu(current); 464 - #ifdef CONFIG_ALTIVEC 465 - if (regs->msr & MSR_VEC) 466 - giveup_altivec(current); 467 - #endif /* CONFIG_ALTIVEC */ 468 - #ifdef CONFIG_SPE 469 - if (regs->msr & MSR_SPE) 470 - giveup_spe(current); 471 - #endif /* CONFIG_SPE */ 472 - preempt_enable(); 473 - } 474 - 475 - /* 476 - * Copy a thread.. 477 - */ 478 - int 479 - copy_thread(int nr, unsigned long clone_flags, unsigned long usp, 480 - unsigned long unused, 481 - struct task_struct *p, struct pt_regs *regs) 482 - { 483 - struct pt_regs *childregs, *kregs; 484 - extern void ret_from_fork(void); 485 - unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE; 486 - unsigned long childframe; 487 - 488 - CHECK_FULL_REGS(regs); 489 - /* Copy registers */ 490 - sp -= sizeof(struct pt_regs); 491 - childregs = (struct pt_regs *) sp; 492 - *childregs = *regs; 493 - if ((childregs->msr & MSR_PR) == 0) { 494 - /* for kernel thread, set `current' and stackptr in new task */ 495 - childregs->gpr[1] = sp + sizeof(struct pt_regs); 496 - childregs->gpr[2] = (unsigned long) p; 497 - p->thread.regs = NULL; /* no user register state */ 498 - } else { 499 - childregs->gpr[1] = usp; 500 - p->thread.regs = childregs; 501 - if (clone_flags & CLONE_SETTLS) 502 - childregs->gpr[2] = childregs->gpr[6]; 503 - } 504 - childregs->gpr[3] = 0; /* Result from fork() */ 505 - sp -= STACK_FRAME_OVERHEAD; 506 - childframe = sp; 507 - 508 - /* 509 - * The way this works is that at some point in the future 510 - * some task will call _switch to switch to the new task. 511 - * That will pop off the stack frame created below and start 512 - * the new task running at ret_from_fork. The new task will 513 - * do some house keeping and then return from the fork or clone 514 - * system call, using the stack frame created above. 515 - */ 516 - sp -= sizeof(struct pt_regs); 517 - kregs = (struct pt_regs *) sp; 518 - sp -= STACK_FRAME_OVERHEAD; 519 - p->thread.ksp = sp; 520 - kregs->nip = (unsigned long)ret_from_fork; 521 - 522 - p->thread.last_syscall = -1; 523 - 524 - return 0; 525 - } 526 - 527 - /* 528 - * Set up a thread for executing a new program 529 - */ 530 - void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp) 531 - { 532 - set_fs(USER_DS); 533 - memset(regs->gpr, 0, sizeof(regs->gpr)); 534 - regs->ctr = 0; 535 - regs->link = 0; 536 - regs->xer = 0; 537 - regs->ccr = 0; 538 - regs->mq = 0; 539 - regs->nip = nip; 540 - regs->gpr[1] = sp; 541 - regs->msr = MSR_USER; 542 - preempt_disable(); 543 - if (last_task_used_math == current) 544 - last_task_used_math = NULL; 545 - if (last_task_used_altivec == current) 546 - last_task_used_altivec = NULL; 547 - #ifdef CONFIG_SPE 548 - if (last_task_used_spe == current) 549 - last_task_used_spe = NULL; 550 - #endif 551 - preempt_enable(); 552 - memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); 553 - current->thread.fpscr.val = 0; 554 - #ifdef CONFIG_ALTIVEC 555 - memset(current->thread.vr, 0, sizeof(current->thread.vr)); 556 - memset(&current->thread.vscr, 0, sizeof(current->thread.vscr)); 557 - current->thread.vrsave = 0; 558 - current->thread.used_vr = 0; 559 - #endif /* CONFIG_ALTIVEC */ 560 - #ifdef CONFIG_SPE 561 - memset(current->thread.evr, 0, sizeof(current->thread.evr)); 562 - current->thread.acc = 0; 563 - current->thread.spefscr = 0; 564 - current->thread.used_spe = 0; 565 - #endif /* CONFIG_SPE */ 566 - } 567 - 568 - #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \ 569 - | PR_FP_EXC_RES | PR_FP_EXC_INV) 570 - 571 - int set_fpexc_mode(struct task_struct *tsk, unsigned int val) 572 - { 573 - struct pt_regs *regs = tsk->thread.regs; 574 - 575 - /* This is a bit hairy. If we are an SPE enabled processor 576 - * (have embedded fp) we store the IEEE exception enable flags in 577 - * fpexc_mode. fpexc_mode is also used for setting FP exception 578 - * mode (asyn, precise, disabled) for 'Classic' FP. */ 579 - if (val & PR_FP_EXC_SW_ENABLE) { 580 - #ifdef CONFIG_SPE 581 - tsk->thread.fpexc_mode = val & 582 - (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); 583 - #else 584 - return -EINVAL; 585 - #endif 586 - } else { 587 - /* on a CONFIG_SPE this does not hurt us. The bits that 588 - * __pack_fe01 use do not overlap with bits used for 589 - * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits 590 - * on CONFIG_SPE implementations are reserved so writing to 591 - * them does not change anything */ 592 - if (val > PR_FP_EXC_PRECISE) 593 - return -EINVAL; 594 - tsk->thread.fpexc_mode = __pack_fe01(val); 595 - if (regs != NULL && (regs->msr & MSR_FP) != 0) 596 - regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1)) 597 - | tsk->thread.fpexc_mode; 598 - } 599 - return 0; 600 - } 601 - 602 - int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) 603 - { 604 - unsigned int val; 605 - 606 - if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) 607 - #ifdef CONFIG_SPE 608 - val = tsk->thread.fpexc_mode; 609 - #else 610 - return -EINVAL; 611 - #endif 612 - else 613 - val = __unpack_fe01(tsk->thread.fpexc_mode); 614 - return put_user(val, (unsigned int __user *) adr); 615 - } 616 - 617 - int sys_clone(unsigned long clone_flags, unsigned long usp, 618 - int __user *parent_tidp, void __user *child_threadptr, 619 - int __user *child_tidp, int p6, 620 - struct pt_regs *regs) 621 - { 622 - CHECK_FULL_REGS(regs); 623 - if (usp == 0) 624 - usp = regs->gpr[1]; /* stack pointer for child */ 625 - return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp); 626 - } 627 - 628 - int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3, 629 - unsigned long p4, unsigned long p5, unsigned long p6, 630 - struct pt_regs *regs) 631 - { 632 - CHECK_FULL_REGS(regs); 633 - return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL); 634 - } 635 - 636 - int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3, 637 - unsigned long p4, unsigned long p5, unsigned long p6, 638 - struct pt_regs *regs) 639 - { 640 - CHECK_FULL_REGS(regs); 641 - return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], 642 - regs, 0, NULL, NULL); 643 - } 644 - 645 - int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, 646 - unsigned long a3, unsigned long a4, unsigned long a5, 647 - struct pt_regs *regs) 648 - { 649 - int error; 650 - char * filename; 651 - 652 - filename = getname((char __user *) a0); 653 - error = PTR_ERR(filename); 654 - if (IS_ERR(filename)) 655 - goto out; 656 - preempt_disable(); 657 - if (regs->msr & MSR_FP) 658 - giveup_fpu(current); 659 - #ifdef CONFIG_ALTIVEC 660 - if (regs->msr & MSR_VEC) 661 - giveup_altivec(current); 662 - #endif /* CONFIG_ALTIVEC */ 663 - #ifdef CONFIG_SPE 664 - if (regs->msr & MSR_SPE) 665 - giveup_spe(current); 666 - #endif /* CONFIG_SPE */ 667 - preempt_enable(); 668 - error = do_execve(filename, (char __user *__user *) a1, 669 - (char __user *__user *) a2, regs); 670 - if (error == 0) { 671 - task_lock(current); 672 - current->ptrace &= ~PT_DTRACE; 673 - task_unlock(current); 674 - } 675 - putname(filename); 676 - out: 677 - return error; 678 - } 679 - 680 - void dump_stack(void) 681 - { 682 - show_stack(current, NULL); 683 - } 684 - 685 - EXPORT_SYMBOL(dump_stack); 686 - 687 - void show_stack(struct task_struct *tsk, unsigned long *stack) 688 - { 689 - unsigned long sp, stack_top, prev_sp, ret; 690 - int count = 0; 691 - unsigned long next_exc = 0; 692 - struct pt_regs *regs; 693 - extern char ret_from_except, ret_from_except_full, ret_from_syscall; 694 - 695 - sp = (unsigned long) stack; 696 - if (tsk == NULL) 697 - tsk = current; 698 - if (sp == 0) { 699 - if (tsk == current) 700 - asm("mr %0,1" : "=r" (sp)); 701 - else 702 - sp = tsk->thread.ksp; 703 - } 704 - 705 - prev_sp = (unsigned long) (tsk->thread_info + 1); 706 - stack_top = (unsigned long) tsk->thread_info + THREAD_SIZE; 707 - while (count < 16 && sp > prev_sp && sp < stack_top && (sp & 3) == 0) { 708 - if (count == 0) { 709 - printk("Call trace:"); 710 - #ifdef CONFIG_KALLSYMS 711 - printk("\n"); 712 - #endif 713 - } else { 714 - if (next_exc) { 715 - ret = next_exc; 716 - next_exc = 0; 717 - } else 718 - ret = *(unsigned long *)(sp + 4); 719 - printk(" [%08lx] ", ret); 720 - #ifdef CONFIG_KALLSYMS 721 - print_symbol("%s", ret); 722 - printk("\n"); 723 - #endif 724 - if (ret == (unsigned long) &ret_from_except 725 - || ret == (unsigned long) &ret_from_except_full 726 - || ret == (unsigned long) &ret_from_syscall) { 727 - /* sp + 16 points to an exception frame */ 728 - regs = (struct pt_regs *) (sp + 16); 729 - if (sp + 16 + sizeof(*regs) <= stack_top) 730 - next_exc = regs->nip; 731 - } 732 - } 733 - ++count; 734 - sp = *(unsigned long *)sp; 735 - } 736 - #ifndef CONFIG_KALLSYMS 737 - if (count > 0) 738 - printk("\n"); 739 - #endif 740 - } 741 - 742 - #if 0 743 - /* 744 - * Low level print for debugging - Cort 745 - */ 746 - int __init ll_printk(const char *fmt, ...) 747 - { 748 - va_list args; 749 - char buf[256]; 750 - int i; 751 - 752 - va_start(args, fmt); 753 - i=vsprintf(buf,fmt,args); 754 - ll_puts(buf); 755 - va_end(args); 756 - return i; 757 - } 758 - 759 - int lines = 24, cols = 80; 760 - int orig_x = 0, orig_y = 0; 761 - 762 - void puthex(unsigned long val) 763 - { 764 - unsigned char buf[10]; 765 - int i; 766 - for (i = 7; i >= 0; i--) 767 - { 768 - buf[i] = "0123456789ABCDEF"[val & 0x0F]; 769 - val >>= 4; 770 - } 771 - buf[8] = '\0'; 772 - prom_print(buf); 773 - } 774 - 775 - void __init ll_puts(const char *s) 776 - { 777 - int x,y; 778 - char *vidmem = (char *)/*(_ISA_MEM_BASE + 0xB8000) */0xD00B8000; 779 - char c; 780 - extern int mem_init_done; 781 - 782 - if ( mem_init_done ) /* assume this means we can printk */ 783 - { 784 - printk(s); 785 - return; 786 - } 787 - 788 - #if 0 789 - if ( have_of ) 790 - { 791 - prom_print(s); 792 - return; 793 - } 794 - #endif 795 - 796 - /* 797 - * can't ll_puts on chrp without openfirmware yet. 798 - * vidmem just needs to be setup for it. 799 - * -- Cort 800 - */ 801 - if ( _machine != _MACH_prep ) 802 - return; 803 - x = orig_x; 804 - y = orig_y; 805 - 806 - while ( ( c = *s++ ) != '\0' ) { 807 - if ( c == '\n' ) { 808 - x = 0; 809 - if ( ++y >= lines ) { 810 - /*scroll();*/ 811 - /*y--;*/ 812 - y = 0; 813 - } 814 - } else { 815 - vidmem [ ( x + cols * y ) * 2 ] = c; 816 - if ( ++x >= cols ) { 817 - x = 0; 818 - if ( ++y >= lines ) { 819 - /*scroll();*/ 820 - /*y--;*/ 821 - y = 0; 822 - } 823 - } 824 - } 825 - } 826 - 827 - orig_x = x; 828 - orig_y = y; 829 - } 830 - #endif 831 - 832 - unsigned long get_wchan(struct task_struct *p) 833 - { 834 - unsigned long ip, sp; 835 - unsigned long stack_page = (unsigned long) p->thread_info; 836 - int count = 0; 837 - if (!p || p == current || p->state == TASK_RUNNING) 838 - return 0; 839 - sp = p->thread.ksp; 840 - do { 841 - sp = *(unsigned long *)sp; 842 - if (sp < stack_page || sp >= stack_page + 8188) 843 - return 0; 844 - if (count > 0) { 845 - ip = *(unsigned long *)(sp + 4); 846 - if (!in_sched_functions(ip)) 847 - return ip; 848 - } 849 - } while (count++ < 16); 850 - return 0; 851 - }
+10 -2
include/asm-ppc/system.h
··· 4 4 #ifndef __PPC_SYSTEM_H 5 5 #define __PPC_SYSTEM_H 6 6 7 - #include <linux/config.h> 8 7 #include <linux/kernel.h> 9 8 10 9 #include <asm/atomic.h> ··· 38 39 #ifdef CONFIG_SMP 39 40 #define smp_mb() mb() 40 41 #define smp_rmb() rmb() 41 - #define smp_wmb() wmb() 42 + #define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory") 42 43 #define smp_read_barrier_depends() read_barrier_depends() 43 44 #else 44 45 #define smp_mb() barrier() ··· 73 74 extern void read_rtc_time(void); 74 75 extern void pmac_find_display(void); 75 76 extern void giveup_fpu(struct task_struct *); 77 + extern void disable_kernel_fp(void); 76 78 extern void enable_kernel_fp(void); 77 79 extern void flush_fp_to_thread(struct task_struct *); 78 80 extern void enable_kernel_altivec(void); ··· 85 85 extern int fix_alignment(struct pt_regs *); 86 86 extern void cvt_fd(float *from, double *to, struct thread_struct *thread); 87 87 extern void cvt_df(double *from, float *to, struct thread_struct *thread); 88 + 89 + #ifndef CONFIG_SMP 90 + extern void discard_lazy_cpu_state(void); 91 + #else 92 + static inline void discard_lazy_cpu_state(void) 93 + { 94 + } 95 + #endif 88 96 89 97 #ifdef CONFIG_ALTIVEC 90 98 extern void flush_altivec_to_thread(struct task_struct *);