···11+/*22+ * linux/arch/unicore32/include/asm/traps.h33+ *44+ * Code specific to PKUnity SoC and UniCore ISA55+ *66+ * Copyright (C) 2001-2010 GUAN Xue-tao77+ *88+ * This program is free software; you can redistribute it and/or modify99+ * it under the terms of the GNU General Public License version 2 as1010+ * published by the Free Software Foundation.1111+ */1212+#ifndef __UNICORE_TRAP_H__1313+#define __UNICORE_TRAP_H__1414+1515+extern void __init early_trap_init(void);1616+extern void dump_backtrace_entry(unsigned long where,1717+ unsigned long from, unsigned long frame);1818+1919+extern void do_DataAbort(unsigned long addr, unsigned int fsr,2020+ struct pt_regs *regs);2121+#endif
+824
arch/unicore32/kernel/entry.S
···11+/*22+ * linux/arch/unicore32/kernel/entry.S33+ *44+ * Code specific to PKUnity SoC and UniCore ISA55+ *66+ * Copyright (C) 2001-2010 GUAN Xue-tao77+ *88+ * This program is free software; you can redistribute it and/or modify99+ * it under the terms of the GNU General Public License version 2 as1010+ * published by the Free Software Foundation.1111+ *1212+ * Low-level vector interface routines1313+ */1414+#include <linux/init.h>1515+#include <linux/linkage.h>1616+#include <asm/assembler.h>1717+#include <asm/errno.h>1818+#include <asm/thread_info.h>1919+#include <asm/memory.h>2020+#include <asm/unistd.h>2121+#include <generated/asm-offsets.h>2222+#include "debug-macro.S"2323+2424+@2525+@ Most of the stack format comes from struct pt_regs, but with2626+@ the addition of 8 bytes for storing syscall args 5 and 6.2727+@2828+#define S_OFF 82929+3030+/*3131+ * The SWI code relies on the fact that R0 is at the bottom of the stack3232+ * (due to slow/fast restore user regs).3333+ */3434+#if S_R0 != 03535+#error "Please fix"3636+#endif3737+3838+ .macro zero_fp3939+#ifdef CONFIG_FRAME_POINTER4040+ mov fp, #04141+#endif4242+ .endm4343+4444+ .macro alignment_trap, rtemp4545+#ifdef CONFIG_ALIGNMENT_TRAP4646+ ldw \rtemp, .LCcralign4747+ ldw \rtemp, [\rtemp]4848+ movc p0.c1, \rtemp, #04949+#endif5050+ .endm5151+5252+ .macro load_user_sp_lr, rd, rtemp, offset = 05353+ mov \rtemp, asr5454+ xor \rtemp, \rtemp, #(PRIV_MODE ^ SUSR_MODE)5555+ mov.a asr, \rtemp @ switch to the SUSR mode5656+5757+ ldw sp, [\rd+], #\offset @ load sp_user5858+ ldw lr, [\rd+], #\offset + 4 @ load lr_user5959+6060+ xor \rtemp, \rtemp, #(PRIV_MODE ^ SUSR_MODE)6161+ mov.a asr, \rtemp @ switch back to the PRIV mode6262+ .endm6363+6464+ .macro priv_exit, rpsr6565+ mov.a bsr, \rpsr6666+ ldm.w (r0 - r15), [sp]+6767+ ldm.b (r16 - pc), [sp]+ @ load r0 - pc, asr6868+ .endm6969+7070+ .macro restore_user_regs, fast = 0, offset = 07171+ ldw r1, [sp+], #\offset + S_PSR @ get calling asr7272+ ldw lr, [sp+], #\offset + S_PC @ get pc7373+ mov.a bsr, r1 @ save in bsr_priv7474+ .if \fast7575+ add sp, sp, #\offset + S_R1 @ r0 is syscall return value7676+ ldm.w (r1 - r15), [sp]+ @ get calling r1 - r157777+ ldur (r16 - lr), [sp]+ @ get calling r16 - lr7878+ .else7979+ ldm.w (r0 - r15), [sp]+ @ get calling r0 - r158080+ ldur (r16 - lr), [sp]+ @ get calling r16 - lr8181+ .endif8282+ nop8383+ add sp, sp, #S_FRAME_SIZE - S_R168484+ mov.a pc, lr @ return8585+ @ and move bsr_priv into asr8686+ .endm8787+8888+ .macro get_thread_info, rd8989+ mov \rd, sp >> #139090+ mov \rd, \rd << #139191+ .endm9292+9393+ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp9494+ ldw \base, =(io_p2v(PKUNITY_INTC_BASE))9595+ ldw \irqstat, [\base+], #0xC @ INTC_ICIP9696+ ldw \tmp, [\base+], #0x4 @ INTC_ICMR9797+ and.a \irqstat, \irqstat, \tmp9898+ beq 1001f9999+ cntlz \irqnr, \irqstat100100+ rsub \irqnr, \irqnr, #31101101+1001: /* EQ will be set if no irqs pending */102102+ .endm103103+104104+#ifdef CONFIG_DEBUG_LL105105+ .macro printreg, reg, temp106106+ adr \temp, 901f107107+ stm (r0-r3), [\temp]+108108+ stw lr, [\temp+], #0x10109109+ mov r0, \reg110110+ b.l printhex8111111+ mov r0, #':'112112+ b.l printch113113+ mov r0, pc114114+ b.l printhex8115115+ adr r0, 902f116116+ b.l printascii117117+ adr \temp, 901f118118+ ldm (r0-r3), [\temp]+119119+ ldw lr, [\temp+], #0x10120120+ b 903f121121+901: .word 0, 0, 0, 0, 0 @ r0-r3, lr122122+902: .asciz ": epip4d\n"123123+ .align124124+903:125125+ .endm126126+#endif127127+128128+/*129129+ * These are the registers used in the syscall handler, and allow us to130130+ * have in theory up to 7 arguments to a function - r0 to r6.131131+ *132132+ * Note that tbl == why is intentional.133133+ *134134+ * We must set at least "tsk" and "why" when calling ret_with_reschedule.135135+ */136136+scno .req r21 @ syscall number137137+tbl .req r22 @ syscall table pointer138138+why .req r22 @ Linux syscall (!= 0)139139+tsk .req r23 @ current thread_info140140+141141+/*142142+ * Interrupt handling. Preserves r17, r18, r19143143+ */144144+ .macro intr_handler145145+1: get_irqnr_and_base r0, r6, r5, lr146146+ beq 2f147147+ mov r1, sp148148+ @149149+ @ routine called with r0 = irq number, r1 = struct pt_regs *150150+ @151151+ adr lr, 1b152152+ b asm_do_IRQ153153+2:154154+ .endm155155+156156+/*157157+ * PRIV mode handlers158158+ */159159+ .macro priv_entry160160+ sub sp, sp, #(S_FRAME_SIZE - 4)161161+ stm (r1 - r15), [sp]+162162+ add r5, sp, #S_R15163163+ stm (r16 - r28), [r5]+164164+165165+ ldm (r1 - r3), [r0]+166166+ add r5, sp, #S_SP - 4 @ here for interlock avoidance167167+ mov r4, #-1 @ "" "" "" ""168168+ add r0, sp, #(S_FRAME_SIZE - 4)169169+ stw.w r1, [sp+], #-4 @ save the "real" r0 copied170170+ @ from the exception stack171171+172172+ mov r1, lr173173+174174+ @175175+ @ We are now ready to fill in the remaining blanks on the stack:176176+ @177177+ @ r0 - sp_priv178178+ @ r1 - lr_priv179179+ @ r2 - lr_<exception>, already fixed up for correct return/restart180180+ @ r3 - bsr_<exception>181181+ @ r4 - orig_r0 (see pt_regs definition in ptrace.h)182182+ @183183+ stm (r0 - r4), [r5]+184184+ .endm185185+186186+/*187187+ * User mode handlers188188+ *189189+ */190190+ .macro user_entry191191+ sub sp, sp, #S_FRAME_SIZE192192+ stm (r1 - r15), [sp+]193193+ add r4, sp, #S_R16194194+ stm (r16 - r28), [r4]+195195+196196+ ldm (r1 - r3), [r0]+197197+ add r0, sp, #S_PC @ here for interlock avoidance198198+ mov r4, #-1 @ "" "" "" ""199199+200200+ stw r1, [sp] @ save the "real" r0 copied201201+ @ from the exception stack202202+203203+ @204204+ @ We are now ready to fill in the remaining blanks on the stack:205205+ @206206+ @ r2 - lr_<exception>, already fixed up for correct return/restart207207+ @ r3 - bsr_<exception>208208+ @ r4 - orig_r0 (see pt_regs definition in ptrace.h)209209+ @210210+ @ Also, separately save sp_user and lr_user211211+ @212212+ stm (r2 - r4), [r0]+213213+ stur (sp, lr), [r0-]214214+215215+ @216216+ @ Enable the alignment trap while in kernel mode217217+ @218218+ alignment_trap r0219219+220220+ @221221+ @ Clear FP to mark the first stack frame222222+ @223223+ zero_fp224224+ .endm225225+226226+ .text227227+228228+@229229+@ __invalid - generic code for failed exception230230+@ (re-entrant version of handlers)231231+@232232+__invalid:233233+ sub sp, sp, #S_FRAME_SIZE234234+ stm (r1 - r15), [sp+]235235+ add r1, sp, #S_R16236236+ stm (r16 - r28, sp, lr), [r1]+237237+238238+ zero_fp239239+240240+ ldm (r4 - r6), [r0]+241241+ add r0, sp, #S_PC @ here for interlock avoidance242242+ mov r7, #-1 @ "" "" "" ""243243+ stw r4, [sp] @ save preserved r0244244+ stm (r5 - r7), [r0]+ @ lr_<exception>,245245+ @ asr_<exception>, "old_r0"246246+247247+ mov r0, sp248248+ mov r1, asr249249+ b bad_mode250250+ENDPROC(__invalid)251251+252252+ .align 5253253+__dabt_priv:254254+ priv_entry255255+256256+ @257257+ @ get ready to re-enable interrupts if appropriate258258+ @259259+ mov r17, asr260260+ cand.a r3, #PSR_I_BIT261261+ bne 1f262262+ andn r17, r17, #PSR_I_BIT263263+1:264264+265265+ @266266+ @ Call the processor-specific abort handler:267267+ @268268+ @ r2 - aborted context pc269269+ @ r3 - aborted context asr270270+ @271271+ @ The abort handler must return the aborted address in r0, and272272+ @ the fault status register in r1.273273+ @274274+ movc r1, p0.c3, #0 @ get FSR275275+ movc r0, p0.c4, #0 @ get FAR276276+277277+ @278278+ @ set desired INTR state, then call main handler279279+ @280280+ mov.a asr, r17281281+ mov r2, sp282282+ b.l do_DataAbort283283+284284+ @285285+ @ INTRs off again before pulling preserved data off the stack286286+ @287287+ disable_irq r0288288+289289+ @290290+ @ restore BSR and restart the instruction291291+ @292292+ ldw r2, [sp+], #S_PSR293293+ priv_exit r2 @ return from exception294294+ENDPROC(__dabt_priv)295295+296296+ .align 5297297+__intr_priv:298298+ priv_entry299299+300300+ intr_handler301301+302302+ mov r0, #0 @ epip4d303303+ movc p0.c5, r0, #14304304+ nop; nop; nop; nop; nop; nop; nop; nop305305+306306+ ldw r4, [sp+], #S_PSR @ irqs are already disabled307307+308308+ priv_exit r4 @ return from exception309309+ENDPROC(__intr_priv)310310+311311+ .ltorg312312+313313+ .align 5314314+__extn_priv:315315+ priv_entry316316+317317+ mov r0, sp @ struct pt_regs *regs318318+ mov r1, asr319319+ b bad_mode @ not supported320320+ENDPROC(__extn_priv)321321+322322+ .align 5323323+__pabt_priv:324324+ priv_entry325325+326326+ @327327+ @ re-enable interrupts if appropriate328328+ @329329+ mov r17, asr330330+ cand.a r3, #PSR_I_BIT331331+ bne 1f332332+ andn r17, r17, #PSR_I_BIT333333+1:334334+335335+ @336336+ @ set args, then call main handler337337+ @338338+ @ r0 - address of faulting instruction339339+ @ r1 - pointer to registers on stack340340+ @341341+ mov r0, r2 @ pass address of aborted instruction342342+ mov r1, #5343343+ mov.a asr, r17344344+ mov r2, sp @ regs345345+ b.l do_PrefetchAbort @ call abort handler346346+347347+ @348348+ @ INTRs off again before pulling preserved data off the stack349349+ @350350+ disable_irq r0351351+352352+ @353353+ @ restore BSR and restart the instruction354354+ @355355+ ldw r2, [sp+], #S_PSR356356+ priv_exit r2 @ return from exception357357+ENDPROC(__pabt_priv)358358+359359+ .align 5360360+.LCcralign:361361+ .word cr_alignment362362+363363+ .align 5364364+__dabt_user:365365+ user_entry366366+367367+#ifdef CONFIG_UNICORE_FPU_F64368368+ cff ip, s31369369+ cand.a ip, #0x08000000 @ FPU execption traps?370370+ beq 209f371371+372372+ ldw ip, [sp+], #S_PC373373+ add ip, ip, #4374374+ stw ip, [sp+], #S_PC375375+ @376376+ @ fall through to the emulation code, which returns using r19 if377377+ @ it has emulated the instruction, or the more conventional lr378378+ @ if we are to treat this as a real extended instruction379379+ @380380+ @ r0 - instruction381381+ @382382+1: ldw.u r0, [r2]383383+ adr r19, ret_from_exception384384+ adr lr, 209f385385+ @386386+ @ fallthrough to call do_uc_f64387387+ @388388+/*389389+ * Check whether the instruction is a co-processor instruction.390390+ * If yes, we need to call the relevant co-processor handler.391391+ *392392+ * Note that we don't do a full check here for the co-processor393393+ * instructions; all instructions with bit 27 set are well394394+ * defined. The only instructions that should fault are the395395+ * co-processor instructions.396396+ *397397+ * Emulators may wish to make use of the following registers:398398+ * r0 = instruction opcode.399399+ * r2 = PC400400+ * r19 = normal "successful" return address401401+ * r20 = this threads thread_info structure.402402+ * lr = unrecognised instruction return address403403+ */404404+ get_thread_info r20 @ get current thread405405+ and r8, r0, #0x00003c00 @ mask out CP number406406+ mov r7, #1407407+ stb r7, [r20+], #TI_USED_CP + 2 @ set appropriate used_cp[]408408+409409+ @ F64 hardware support entry point.410410+ @ r0 = faulted instruction411411+ @ r19 = return address412412+ @ r20 = fp_state413413+ enable_irq r4414414+ add r20, r20, #TI_FPSTATE @ r20 = workspace415415+ cff r1, s31 @ get fpu FPSCR416416+ andn r2, r1, #0x08000000417417+ ctf r2, s31 @ clear 27 bit418418+ mov r2, sp @ nothing stacked - regdump is at TOS419419+ mov lr, r19 @ setup for a return to the user code420420+421421+ @ Now call the C code to package up the bounce to the support code422422+ @ r0 holds the trigger instruction423423+ @ r1 holds the FPSCR value424424+ @ r2 pointer to register dump425425+ b ucf64_exchandler426426+209:427427+#endif428428+ @429429+ @ Call the processor-specific abort handler:430430+ @431431+ @ r2 - aborted context pc432432+ @ r3 - aborted context asr433433+ @434434+ @ The abort handler must return the aborted address in r0, and435435+ @ the fault status register in r1.436436+ @437437+ movc r1, p0.c3, #0 @ get FSR438438+ movc r0, p0.c4, #0 @ get FAR439439+440440+ @441441+ @ INTRs on, then call the main handler442442+ @443443+ enable_irq r2444444+ mov r2, sp445445+ adr lr, ret_from_exception446446+ b do_DataAbort447447+ENDPROC(__dabt_user)448448+449449+ .align 5450450+__intr_user:451451+ user_entry452452+453453+ get_thread_info tsk454454+455455+ intr_handler456456+457457+ mov why, #0458458+ b ret_to_user459459+ENDPROC(__intr_user)460460+461461+ .ltorg462462+463463+ .align 5464464+__extn_user:465465+ user_entry466466+467467+ mov r0, sp468468+ mov r1, asr469469+ b bad_mode470470+ENDPROC(__extn_user)471471+472472+ .align 5473473+__pabt_user:474474+ user_entry475475+476476+ mov r0, r2 @ pass address of aborted instruction.477477+ mov r1, #5478478+ enable_irq r1 @ Enable interrupts479479+ mov r2, sp @ regs480480+ b.l do_PrefetchAbort @ call abort handler481481+ /* fall through */482482+/*483483+ * This is the return code to user mode for abort handlers484484+ */485485+ENTRY(ret_from_exception)486486+ get_thread_info tsk487487+ mov why, #0488488+ b ret_to_user489489+ENDPROC(__pabt_user)490490+ENDPROC(ret_from_exception)491491+492492+/*493493+ * Register switch for UniCore V2 processors494494+ * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info495495+ * previous and next are guaranteed not to be the same.496496+ */497497+ENTRY(__switch_to)498498+ add ip, r1, #TI_CPU_SAVE499499+ stm.w (r4 - r15), [ip]+500500+ stm.w (r16 - r27, sp, lr), [ip]+501501+502502+#ifdef CONFIG_UNICORE_FPU_F64503503+ add ip, r1, #TI_FPSTATE504504+ sfm.w (f0 - f7 ), [ip]+505505+ sfm.w (f8 - f15), [ip]+506506+ sfm.w (f16 - f23), [ip]+507507+ sfm.w (f24 - f31), [ip]+508508+ cff r4, s31509509+ stw r4, [ip]510510+511511+ add ip, r2, #TI_FPSTATE512512+ lfm.w (f0 - f7 ), [ip]+513513+ lfm.w (f8 - f15), [ip]+514514+ lfm.w (f16 - f23), [ip]+515515+ lfm.w (f24 - f31), [ip]+516516+ ldw r4, [ip]517517+ ctf r4, s31518518+#endif519519+ add ip, r2, #TI_CPU_SAVE520520+ ldm.w (r4 - r15), [ip]+521521+ ldm (r16 - r27, sp, pc), [ip]+ @ Load all regs saved previously522522+ENDPROC(__switch_to)523523+524524+ .align 5525525+/*526526+ * This is the fast syscall return path. We do as little as527527+ * possible here, and this includes saving r0 back into the PRIV528528+ * stack.529529+ */530530+ret_fast_syscall:531531+ disable_irq r1 @ disable interrupts532532+ ldw r1, [tsk+], #TI_FLAGS533533+ cand.a r1, #_TIF_WORK_MASK534534+ bne fast_work_pending535535+536536+ @ fast_restore_user_regs537537+ restore_user_regs fast = 1, offset = S_OFF538538+539539+/*540540+ * Ok, we need to do extra processing, enter the slow path.541541+ */542542+fast_work_pending:543543+ stw.w r0, [sp+], #S_R0+S_OFF @ returned r0544544+work_pending:545545+ cand.a r1, #_TIF_NEED_RESCHED546546+ bne work_resched547547+ cand.a r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME548548+ beq no_work_pending549549+ mov r0, sp @ 'regs'550550+ mov r2, why @ 'syscall'551551+ cand.a r1, #_TIF_SIGPENDING @ delivering a signal?552552+ cmovne why, #0 @ prevent further restarts553553+ b.l do_notify_resume554554+ b ret_slow_syscall @ Check work again555555+556556+work_resched:557557+ b.l schedule558558+/*559559+ * "slow" syscall return path. "why" tells us if this was a real syscall.560560+ */561561+ENTRY(ret_to_user)562562+ret_slow_syscall:563563+ disable_irq r1 @ disable interrupts564564+ get_thread_info tsk @ epip4d, one path error?!565565+ ldw r1, [tsk+], #TI_FLAGS566566+ cand.a r1, #_TIF_WORK_MASK567567+ bne work_pending568568+no_work_pending:569569+ @ slow_restore_user_regs570570+ restore_user_regs fast = 0, offset = 0571571+ENDPROC(ret_to_user)572572+573573+/*574574+ * This is how we return from a fork.575575+ */576576+ENTRY(ret_from_fork)577577+ b.l schedule_tail578578+ get_thread_info tsk579579+ ldw r1, [tsk+], #TI_FLAGS @ check for syscall tracing580580+ mov why, #1581581+ cand.a r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?582582+ beq ret_slow_syscall583583+ mov r1, sp584584+ mov r0, #1 @ trace exit [IP = 1]585585+ b.l syscall_trace586586+ b ret_slow_syscall587587+ENDPROC(ret_from_fork)588588+589589+/*=============================================================================590590+ * SWI handler591591+ *-----------------------------------------------------------------------------592592+ */593593+ .align 5594594+ENTRY(vector_swi)595595+ sub sp, sp, #S_FRAME_SIZE596596+ stm (r0 - r15), [sp]+ @ Calling r0 - r15597597+ add r8, sp, #S_R16598598+ stm (r16 - r28), [r8]+ @ Calling r16 - r28599599+ add r8, sp, #S_PC600600+ stur (sp, lr), [r8-] @ Calling sp, lr601601+ mov r8, bsr @ called from non-REAL mode602602+ stw lr, [sp+], #S_PC @ Save calling PC603603+ stw r8, [sp+], #S_PSR @ Save ASR604604+ stw r0, [sp+], #S_OLD_R0 @ Save OLD_R0605605+ zero_fp606606+607607+ /*608608+ * Get the system call number.609609+ */610610+ sub ip, lr, #4611611+ ldw.u scno, [ip] @ get SWI instruction612612+613613+#ifdef CONFIG_ALIGNMENT_TRAP614614+ ldw ip, __cr_alignment615615+ ldw ip, [ip]616616+ movc p0.c1, ip, #0 @ update control register617617+#endif618618+ enable_irq ip619619+620620+ get_thread_info tsk621621+ ldw tbl, =sys_call_table @ load syscall table pointer622622+623623+ andn scno, scno, #0xff000000 @ mask off SWI op-code624624+ andn scno, scno, #0x00ff0000 @ mask off SWI op-code625625+626626+ stm.w (r4, r5), [sp-] @ push fifth and sixth args627627+ ldw ip, [tsk+], #TI_FLAGS @ check for syscall tracing628628+ cand.a ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?629629+ bne __sys_trace630630+631631+ csub.a scno, #__NR_syscalls @ check upper syscall limit632632+ adr lr, ret_fast_syscall @ return address633633+ bea 1f634634+ ldw pc, [tbl+], scno << #2 @ call sys_* routine635635+1:636636+ add r1, sp, #S_OFF637637+2: mov why, #0 @ no longer a real syscall638638+ b sys_ni_syscall @ not private func639639+640640+ /*641641+ * This is the really slow path. We're going to be doing642642+ * context switches, and waiting for our parent to respond.643643+ */644644+__sys_trace:645645+ mov r2, scno646646+ add r1, sp, #S_OFF647647+ mov r0, #0 @ trace entry [IP = 0]648648+ b.l syscall_trace649649+650650+ adr lr, __sys_trace_return @ return address651651+ mov scno, r0 @ syscall number (possibly new)652652+ add r1, sp, #S_R0 + S_OFF @ pointer to regs653653+ csub.a scno, #__NR_syscalls @ check upper syscall limit654654+ bea 2b655655+ ldm (r0 - r3), [r1]+ @ have to reload r0 - r3656656+ ldw pc, [tbl+], scno << #2 @ call sys_* routine657657+658658+__sys_trace_return:659659+ stw.w r0, [sp+], #S_R0 + S_OFF @ save returned r0660660+ mov r2, scno661661+ mov r1, sp662662+ mov r0, #1 @ trace exit [IP = 1]663663+ b.l syscall_trace664664+ b ret_slow_syscall665665+666666+ .align 5667667+#ifdef CONFIG_ALIGNMENT_TRAP668668+ .type __cr_alignment, #object669669+__cr_alignment:670670+ .word cr_alignment671671+#endif672672+ .ltorg673673+674674+ENTRY(sys_execve)675675+ add r3, sp, #S_OFF676676+ b __sys_execve677677+ENDPROC(sys_execve)678678+679679+ENTRY(sys_clone)680680+ add ip, sp, #S_OFF681681+ stw ip, [sp+], #4682682+ b __sys_clone683683+ENDPROC(sys_clone)684684+685685+ENTRY(sys_rt_sigreturn)686686+ add r0, sp, #S_OFF687687+ mov why, #0 @ prevent syscall restart handling688688+ b __sys_rt_sigreturn689689+ENDPROC(sys_rt_sigreturn)690690+691691+ENTRY(sys_sigaltstack)692692+ ldw r2, [sp+], #S_OFF + S_SP693693+ b do_sigaltstack694694+ENDPROC(sys_sigaltstack)695695+696696+ __INIT697697+698698+/*699699+ * Vector stubs.700700+ *701701+ * This code is copied to 0xffff0200 so we can use branches in the702702+ * vectors, rather than ldr's. Note that this code must not703703+ * exceed 0x300 bytes.704704+ *705705+ * Common stub entry macro:706706+ * Enter in INTR mode, bsr = PRIV/USER ASR, lr = PRIV/USER PC707707+ *708708+ * SP points to a minimal amount of processor-private memory, the address709709+ * of which is copied into r0 for the mode specific abort handler.710710+ */711711+ .macro vector_stub, name, mode712712+ .align 5713713+714714+vector_\name:715715+ @716716+ @ Save r0, lr_<exception> (parent PC) and bsr_<exception>717717+ @ (parent ASR)718718+ @719719+ stw r0, [sp]720720+ stw lr, [sp+], #4 @ save r0, lr721721+ mov lr, bsr722722+ stw lr, [sp+], #8 @ save bsr723723+724724+ @725725+ @ Prepare for PRIV mode. INTRs remain disabled.726726+ @727727+ mov r0, asr728728+ xor r0, r0, #(\mode ^ PRIV_MODE)729729+ mov.a bsr, r0730730+731731+ @732732+ @ the branch table must immediately follow this code733733+ @734734+ and lr, lr, #0x03735735+ add lr, lr, #1736736+ mov r0, sp737737+ ldw lr, [pc+], lr << #2738738+ mov.a pc, lr @ branch to handler in PRIV mode739739+ENDPROC(vector_\name)740740+ .align 2741741+ @ handler addresses follow this label742742+ .endm743743+744744+ .globl __stubs_start745745+__stubs_start:746746+/*747747+ * Interrupt dispatcher748748+ */749749+ vector_stub intr, INTR_MODE750750+751751+ .long __intr_user @ 0 (USER)752752+ .long __invalid @ 1753753+ .long __invalid @ 2754754+ .long __intr_priv @ 3 (PRIV)755755+756756+/*757757+ * Data abort dispatcher758758+ * Enter in ABT mode, bsr = USER ASR, lr = USER PC759759+ */760760+ vector_stub dabt, ABRT_MODE761761+762762+ .long __dabt_user @ 0 (USER)763763+ .long __invalid @ 1764764+ .long __invalid @ 2 (INTR)765765+ .long __dabt_priv @ 3 (PRIV)766766+767767+/*768768+ * Prefetch abort dispatcher769769+ * Enter in ABT mode, bsr = USER ASR, lr = USER PC770770+ */771771+ vector_stub pabt, ABRT_MODE772772+773773+ .long __pabt_user @ 0 (USER)774774+ .long __invalid @ 1775775+ .long __invalid @ 2 (INTR)776776+ .long __pabt_priv @ 3 (PRIV)777777+778778+/*779779+ * Undef instr entry dispatcher780780+ * Enter in EXTN mode, bsr = PRIV/USER ASR, lr = PRIV/USER PC781781+ */782782+ vector_stub extn, EXTN_MODE783783+784784+ .long __extn_user @ 0 (USER)785785+ .long __invalid @ 1786786+ .long __invalid @ 2 (INTR)787787+ .long __extn_priv @ 3 (PRIV)788788+789789+/*790790+ * We group all the following data together to optimise791791+ * for CPUs with separate I & D caches.792792+ */793793+ .align 5794794+795795+.LCvswi:796796+ .word vector_swi797797+798798+ .globl __stubs_end799799+__stubs_end:800800+801801+ .equ stubs_offset, __vectors_start + 0x200 - __stubs_start802802+803803+ .globl __vectors_start804804+__vectors_start:805805+ jepriv SYS_ERROR0806806+ b vector_extn + stubs_offset807807+ ldw pc, .LCvswi + stubs_offset808808+ b vector_pabt + stubs_offset809809+ b vector_dabt + stubs_offset810810+ jepriv SYS_ERROR0811811+ b vector_intr + stubs_offset812812+ jepriv SYS_ERROR0813813+814814+ .globl __vectors_end815815+__vectors_end:816816+817817+ .data818818+819819+ .globl cr_alignment820820+ .globl cr_no_alignment821821+cr_alignment:822822+ .space 4823823+cr_no_alignment:824824+ .space 4
+252
arch/unicore32/kernel/head.S
···11+/*22+ * linux/arch/unicore32/kernel/head.S33+ *44+ * Code specific to PKUnity SoC and UniCore ISA55+ *66+ * Copyright (C) 2001-2010 GUAN Xue-tao77+ *88+ * This program is free software; you can redistribute it and/or modify99+ * it under the terms of the GNU General Public License version 2 as1010+ * published by the Free Software Foundation.1111+ */1212+#include <linux/linkage.h>1313+#include <linux/init.h>1414+1515+#include <asm/assembler.h>1616+#include <asm/ptrace.h>1717+#include <generated/asm-offsets.h>1818+#include <asm/memory.h>1919+#include <asm/thread_info.h>2020+#include <asm/system.h>2121+#include <asm/pgtable-hwdef.h>2222+2323+#if (PHYS_OFFSET & 0x003fffff)2424+#error "PHYS_OFFSET must be at an even 4MiB boundary!"2525+#endif2626+2727+#define KERNEL_RAM_VADDR (PAGE_OFFSET + KERNEL_IMAGE_START)2828+#define KERNEL_RAM_PADDR (PHYS_OFFSET + KERNEL_IMAGE_START)2929+3030+#define KERNEL_PGD_PADDR (KERNEL_RAM_PADDR - 0x1000)3131+#define KERNEL_PGD_VADDR (KERNEL_RAM_VADDR - 0x1000)3232+3333+#define KERNEL_START KERNEL_RAM_VADDR3434+#define KERNEL_END _end3535+3636+/*3737+ * swapper_pg_dir is the virtual address of the initial page table.3838+ * We place the page tables 4K below KERNEL_RAM_VADDR. Therefore, we must3939+ * make sure that KERNEL_RAM_VADDR is correctly set. Currently, we expect4040+ * the least significant 16 bits to be 0x8000, but we could probably4141+ * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x1000.4242+ */4343+#if (KERNEL_RAM_VADDR & 0xffff) != 0x80004444+#error KERNEL_RAM_VADDR must start at 0xXXXX80004545+#endif4646+4747+ .globl swapper_pg_dir4848+ .equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x10004949+5050+/*5151+ * Kernel startup entry point.5252+ * ---------------------------5353+ *5454+ * This is normally called from the decompressor code. The requirements5555+ * are: MMU = off, D-cache = off, I-cache = dont care5656+ *5757+ * This code is mostly position independent, so if you link the kernel at5858+ * 0xc0008000, you call this at __pa(0xc0008000).5959+ */6060+ __HEAD6161+ENTRY(stext)6262+ @ set asr6363+ mov r0, #PRIV_MODE @ ensure priv mode6464+ or r0, #PSR_R_BIT | PSR_I_BIT @ disable irqs6565+ mov.a asr, r06666+6767+ @ process identify6868+ movc r0, p0.c0, #0 @ cpuid6969+ movl r1, 0xff00ffff @ mask7070+ movl r2, 0x4d000863 @ value7171+ and r0, r1, r07272+ cxor.a r0, r27373+ bne __error_p @ invalid processor id7474+7575+ /*7676+ * Clear the 4K level 1 swapper page table7777+ */7878+ movl r0, #KERNEL_PGD_PADDR @ page table address7979+ mov r1, #08080+ add r2, r0, #0x10008181+101: stw.w r1, [r0]+, #48282+ stw.w r1, [r0]+, #48383+ stw.w r1, [r0]+, #48484+ stw.w r1, [r0]+, #48585+ cxor.a r0, r28686+ bne 101b8787+8888+ movl r4, #KERNEL_PGD_PADDR @ page table address8989+ mov r7, #PMD_TYPE_SECT | PMD_PRESENT @ page size: section9090+ or r7, r7, #PMD_SECT_CACHEABLE @ cacheable9191+ or r7, r7, #PMD_SECT_READ | PMD_SECT_WRITE | PMD_SECT_EXEC9292+9393+ /*9494+ * Create identity mapping for first 4MB of kernel to9595+ * cater for the MMU enable. This identity mapping9696+ * will be removed by paging_init(). We use our current program9797+ * counter to determine corresponding section base address.9898+ */9999+ mov r6, pc100100+ mov r6, r6 >> #22 @ start of kernel section101101+ or r1, r7, r6 << #22 @ flags + kernel base102102+ stw r1, [r4+], r6 << #2 @ identity mapping103103+104104+ /*105105+ * Now setup the pagetables for our kernel direct106106+ * mapped region.107107+ */108108+ add r0, r4, #(KERNEL_START & 0xff000000) >> 20109109+ stw.w r1, [r0+], #(KERNEL_START & 0x00c00000) >> 20110110+ movl r6, #(KERNEL_END - 1)111111+ add r0, r0, #4112112+ add r6, r4, r6 >> #20113113+102: csub.a r0, r6114114+ add r1, r1, #1 << 22115115+ bua 103f116116+ stw.w r1, [r0]+, #4117117+ b 102b118118+103:119119+ /*120120+ * Then map first 4MB of ram in case it contains our boot params.121121+ */122122+ add r0, r4, #PAGE_OFFSET >> 20123123+ or r6, r7, #(PHYS_OFFSET & 0xffc00000)124124+ stw r6, [r0]125125+126126+ ldw r15, __switch_data @ address to jump to after127127+128128+ /*129129+ * Initialise TLB, Caches, and MMU state ready to switch the MMU130130+ * on.131131+ */132132+ mov r0, #0133133+ movc p0.c5, r0, #28 @ cache invalidate all134134+ nop8135135+ movc p0.c6, r0, #6 @ TLB invalidate all136136+ nop8137137+138138+ /*139139+ * ..V. .... ..TB IDAM140140+ * ..1. .... ..01 1111141141+ */142142+ movl r0, #0x201f @ control register setting143143+144144+ /*145145+ * Setup common bits before finally enabling the MMU. Essentially146146+ * this is just loading the page table pointer and domain access147147+ * registers.148148+ */149149+ #ifndef CONFIG_ALIGNMENT_TRAP150150+ andn r0, r0, #CR_A151151+ #endif152152+ #ifdef CONFIG_CPU_DCACHE_DISABLE153153+ andn r0, r0, #CR_D154154+ #endif155155+ #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH156156+ andn r0, r0, #CR_B157157+ #endif158158+ #ifdef CONFIG_CPU_ICACHE_DISABLE159159+ andn r0, r0, #CR_I160160+ #endif161161+162162+ movc p0.c2, r4, #0 @ set pgd163163+ b __turn_mmu_on164164+ENDPROC(stext)165165+166166+/*167167+ * Enable the MMU. This completely changes the stucture of the visible168168+ * memory space. You will not be able to trace execution through this.169169+ *170170+ * r0 = cp#0 control register171171+ * r15 = *virtual* address to jump to upon completion172172+ */173173+ .align 5174174+__turn_mmu_on:175175+ mov r0, r0176176+ movc p0.c1, r0, #0 @ write control reg177177+ nop @ fetch inst by phys addr178178+ mov pc, r15179179+ nop8 @ fetch inst by phys addr180180+ENDPROC(__turn_mmu_on)181181+182182+/*183183+ * Setup the initial page tables. We only setup the barest184184+ * amount which are required to get the kernel running, which185185+ * generally means mapping in the kernel code.186186+ *187187+ * r9 = cpuid188188+ * r10 = procinfo189189+ *190190+ * Returns:191191+ * r0, r3, r6, r7 corrupted192192+ * r4 = physical page table address193193+ */194194+ .ltorg195195+196196+ .align 2197197+ .type __switch_data, %object198198+__switch_data:199199+ .long __mmap_switched200200+ .long __bss_start @ r6201201+ .long _end @ r7202202+ .long cr_alignment @ r8203203+ .long init_thread_union + THREAD_START_SP @ sp204204+205205+/*206206+ * The following fragment of code is executed with the MMU on in MMU mode,207207+ * and uses absolute addresses; this is not position independent.208208+ *209209+ * r0 = cp#0 control register210210+ */211211+__mmap_switched:212212+ adr r3, __switch_data + 4213213+214214+ ldm.w (r6, r7, r8), [r3]+215215+ ldw sp, [r3]216216+217217+ mov fp, #0 @ Clear BSS (and zero fp)218218+203: csub.a r6, r7219219+ bea 204f220220+ stw.w fp, [r6]+,#4221221+ b 203b222222+204:223223+ andn r1, r0, #CR_A @ Clear 'A' bit224224+ stm (r0, r1), [r8]+ @ Save control register values225225+ b start_kernel226226+ENDPROC(__mmap_switched)227227+228228+/*229229+ * Exception handling. Something went wrong and we can't proceed. We230230+ * ought to tell the user, but since we don't have any guarantee that231231+ * we're even running on the right architecture, we do virtually nothing.232232+ *233233+ * If CONFIG_DEBUG_LL is set we try to print out something about the error234234+ * and hope for the best (useful if bootloader fails to pass a proper235235+ * machine ID for example).236236+ */237237+__error_p:238238+#ifdef CONFIG_DEBUG_LL239239+ adr r0, str_p1240240+ b.l printascii241241+ mov r0, r9242242+ b.l printhex8243243+ adr r0, str_p2244244+ b.l printascii245245+901: nop8246246+ b 901b247247+str_p1: .asciz "\nError: unrecognized processor variant (0x"248248+str_p2: .asciz ").\n"249249+ .align250250+#endif251251+ENDPROC(__error_p)252252+
+360
arch/unicore32/kernel/setup.c
···11+/*22+ * linux/arch/unicore32/kernel/setup.c33+ *44+ * Code specific to PKUnity SoC and UniCore ISA55+ *66+ * Copyright (C) 2001-2010 GUAN Xue-tao77+ *88+ * This program is free software; you can redistribute it and/or modify99+ * it under the terms of the GNU General Public License version 2 as1010+ * published by the Free Software Foundation.1111+ */1212+#include <linux/module.h>1313+#include <linux/kernel.h>1414+#include <linux/stddef.h>1515+#include <linux/ioport.h>1616+#include <linux/delay.h>1717+#include <linux/utsname.h>1818+#include <linux/initrd.h>1919+#include <linux/console.h>2020+#include <linux/bootmem.h>2121+#include <linux/seq_file.h>2222+#include <linux/screen_info.h>2323+#include <linux/init.h>2424+#include <linux/root_dev.h>2525+#include <linux/cpu.h>2626+#include <linux/interrupt.h>2727+#include <linux/smp.h>2828+#include <linux/fs.h>2929+#include <linux/proc_fs.h>3030+#include <linux/memblock.h>3131+#include <linux/elf.h>3232+#include <linux/io.h>3333+3434+#include <asm/cputype.h>3535+#include <asm/sections.h>3636+#include <asm/setup.h>3737+#include <asm/cacheflush.h>3838+#include <asm/tlbflush.h>3939+#include <asm/traps.h>4040+4141+#include "setup.h"4242+4343+#ifndef MEM_SIZE4444+#define MEM_SIZE (16*1024*1024)4545+#endif4646+4747+struct stack {4848+ u32 irq[3];4949+ u32 abt[3];5050+ u32 und[3];5151+} ____cacheline_aligned;5252+5353+static struct stack stacks[NR_CPUS];5454+5555+char elf_platform[ELF_PLATFORM_SIZE];5656+EXPORT_SYMBOL(elf_platform);5757+5858+static char __initdata cmd_line[COMMAND_LINE_SIZE];5959+6060+static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;6161+6262+/*6363+ * Standard memory resources6464+ */6565+static struct resource mem_res[] = {6666+ {6767+ .name = "Video RAM",6868+ .start = 0,6969+ .end = 0,7070+ .flags = IORESOURCE_MEM7171+ },7272+ {7373+ .name = "Kernel text",7474+ .start = 0,7575+ .end = 0,7676+ .flags = IORESOURCE_MEM7777+ },7878+ {7979+ .name = "Kernel data",8080+ .start = 0,8181+ .end = 0,8282+ .flags = IORESOURCE_MEM8383+ }8484+};8585+8686+#define video_ram mem_res[0]8787+#define kernel_code mem_res[1]8888+#define kernel_data mem_res[2]8989+9090+/*9191+ * These functions re-use the assembly code in head.S, which9292+ * already provide the required functionality.9393+ */9494+static void __init setup_processor(void)9595+{9696+ printk(KERN_DEFAULT "CPU: UniCore-II [%08x] revision %d, cr=%08lx\n",9797+ uc32_cpuid, (int)(uc32_cpuid >> 16) & 15, cr_alignment);9898+9999+ sprintf(init_utsname()->machine, "puv3");100100+ sprintf(elf_platform, "ucv2");101101+}102102+103103+/*104104+ * cpu_init - initialise one CPU.105105+ *106106+ * cpu_init sets up the per-CPU stacks.107107+ */108108+void cpu_init(void)109109+{110110+ unsigned int cpu = smp_processor_id();111111+ struct stack *stk = &stacks[cpu];112112+113113+ /*114114+ * setup stacks for re-entrant exception handlers115115+ */116116+ __asm__ (117117+ "mov.a asr, %1\n\t"118118+ "add sp, %0, %2\n\t"119119+ "mov.a asr, %3\n\t"120120+ "add sp, %0, %4\n\t"121121+ "mov.a asr, %5\n\t"122122+ "add sp, %0, %6\n\t"123123+ "mov.a asr, %7"124124+ :125125+ : "r" (stk),126126+ "r" (PSR_R_BIT | PSR_I_BIT | INTR_MODE),127127+ "I" (offsetof(struct stack, irq[0])),128128+ "r" (PSR_R_BIT | PSR_I_BIT | ABRT_MODE),129129+ "I" (offsetof(struct stack, abt[0])),130130+ "r" (PSR_R_BIT | PSR_I_BIT | EXTN_MODE),131131+ "I" (offsetof(struct stack, und[0])),132132+ "r" (PSR_R_BIT | PSR_I_BIT | PRIV_MODE)133133+ : "r30", "cc");134134+}135135+136136+static int __init uc32_add_memory(unsigned long start, unsigned long size)137137+{138138+ struct membank *bank = &meminfo.bank[meminfo.nr_banks];139139+140140+ if (meminfo.nr_banks >= NR_BANKS) {141141+ printk(KERN_CRIT "NR_BANKS too low, "142142+ "ignoring memory at %#lx\n", start);143143+ return -EINVAL;144144+ }145145+146146+ /*147147+ * Ensure that start/size are aligned to a page boundary.148148+ * Size is appropriately rounded down, start is rounded up.149149+ */150150+ size -= start & ~PAGE_MASK;151151+152152+ bank->start = PAGE_ALIGN(start);153153+ bank->size = size & PAGE_MASK;154154+155155+ /*156156+ * Check whether this memory region has non-zero size or157157+ * invalid node number.158158+ */159159+ if (bank->size == 0)160160+ return -EINVAL;161161+162162+ meminfo.nr_banks++;163163+ return 0;164164+}165165+166166+/*167167+ * Pick out the memory size. We look for mem=size@start,168168+ * where start and size are "size[KkMm]"169169+ */170170+static int __init early_mem(char *p)171171+{172172+ static int usermem __initdata = 1;173173+ unsigned long size, start;174174+ char *endp;175175+176176+ /*177177+ * If the user specifies memory size, we178178+ * blow away any automatically generated179179+ * size.180180+ */181181+ if (usermem) {182182+ usermem = 0;183183+ meminfo.nr_banks = 0;184184+ }185185+186186+ start = PHYS_OFFSET;187187+ size = memparse(p, &endp);188188+ if (*endp == '@')189189+ start = memparse(endp + 1, NULL);190190+191191+ uc32_add_memory(start, size);192192+193193+ return 0;194194+}195195+early_param("mem", early_mem);196196+197197+static void __init198198+request_standard_resources(struct meminfo *mi)199199+{200200+ struct resource *res;201201+ int i;202202+203203+ kernel_code.start = virt_to_phys(_stext);204204+ kernel_code.end = virt_to_phys(_etext - 1);205205+ kernel_data.start = virt_to_phys(_sdata);206206+ kernel_data.end = virt_to_phys(_end - 1);207207+208208+ for (i = 0; i < mi->nr_banks; i++) {209209+ if (mi->bank[i].size == 0)210210+ continue;211211+212212+ res = alloc_bootmem_low(sizeof(*res));213213+ res->name = "System RAM";214214+ res->start = mi->bank[i].start;215215+ res->end = mi->bank[i].start + mi->bank[i].size - 1;216216+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;217217+218218+ request_resource(&iomem_resource, res);219219+220220+ if (kernel_code.start >= res->start &&221221+ kernel_code.end <= res->end)222222+ request_resource(res, &kernel_code);223223+ if (kernel_data.start >= res->start &&224224+ kernel_data.end <= res->end)225225+ request_resource(res, &kernel_data);226226+ }227227+228228+ video_ram.start = PKUNITY_UNIGFX_MMAP_BASE;229229+ video_ram.end = PKUNITY_UNIGFX_MMAP_BASE + PKUNITY_UNIGFX_MMAP_SIZE;230230+ request_resource(&iomem_resource, &video_ram);231231+}232232+233233+static void (*init_machine)(void) __initdata;234234+235235+static int __init customize_machine(void)236236+{237237+ /* customizes platform devices, or adds new ones */238238+ if (init_machine)239239+ init_machine();240240+ return 0;241241+}242242+arch_initcall(customize_machine);243243+244244+void __init setup_arch(char **cmdline_p)245245+{246246+ char *from = default_command_line;247247+248248+ setup_processor();249249+250250+ init_mm.start_code = (unsigned long) _stext;251251+ init_mm.end_code = (unsigned long) _etext;252252+ init_mm.end_data = (unsigned long) _edata;253253+ init_mm.brk = (unsigned long) _end;254254+255255+ /* parse_early_param needs a boot_command_line */256256+ strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);257257+258258+ /* populate cmd_line too for later use, preserving boot_command_line */259259+ strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);260260+ *cmdline_p = cmd_line;261261+262262+ parse_early_param();263263+264264+ uc32_memblock_init(&meminfo);265265+266266+ paging_init();267267+ request_standard_resources(&meminfo);268268+269269+ cpu_init();270270+271271+ /*272272+ * Set up various architecture-specific pointers273273+ */274274+ init_machine = puv3_core_init;275275+276276+#ifdef CONFIG_VT277277+#if defined(CONFIG_VGA_CONSOLE)278278+ conswitchp = &vga_con;279279+#elif defined(CONFIG_DUMMY_CONSOLE)280280+ conswitchp = &dummy_con;281281+#endif282282+#endif283283+ early_trap_init();284284+}285285+286286+static struct cpu cpuinfo_unicore;287287+288288+static int __init topology_init(void)289289+{290290+ int i;291291+292292+ for_each_possible_cpu(i)293293+ register_cpu(&cpuinfo_unicore, i);294294+295295+ return 0;296296+}297297+subsys_initcall(topology_init);298298+299299+#ifdef CONFIG_HAVE_PROC_CPU300300+static int __init proc_cpu_init(void)301301+{302302+ struct proc_dir_entry *res;303303+304304+ res = proc_mkdir("cpu", NULL);305305+ if (!res)306306+ return -ENOMEM;307307+ return 0;308308+}309309+fs_initcall(proc_cpu_init);310310+#endif311311+312312+static int c_show(struct seq_file *m, void *v)313313+{314314+ seq_printf(m, "Processor\t: UniCore-II rev %d (%s)\n",315315+ (int)(uc32_cpuid >> 16) & 15, elf_platform);316316+317317+ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",318318+ loops_per_jiffy / (500000/HZ),319319+ (loops_per_jiffy / (5000/HZ)) % 100);320320+321321+ /* dump out the processor features */322322+ seq_puts(m, "Features\t: CMOV UC-F64");323323+324324+ seq_printf(m, "\nCPU implementer\t: 0x%02x\n", uc32_cpuid >> 24);325325+ seq_printf(m, "CPU architecture: 2\n");326326+ seq_printf(m, "CPU revision\t: %d\n", (uc32_cpuid >> 16) & 15);327327+328328+ seq_printf(m, "Cache type\t: write-back\n"329329+ "Cache clean\t: cp0 c5 ops\n"330330+ "Cache lockdown\t: not support\n"331331+ "Cache format\t: Harvard\n");332332+333333+ seq_puts(m, "\n");334334+335335+ seq_printf(m, "Hardware\t: PKUnity v3\n");336336+337337+ return 0;338338+}339339+340340+static void *c_start(struct seq_file *m, loff_t *pos)341341+{342342+ return *pos < 1 ? (void *)1 : NULL;343343+}344344+345345+static void *c_next(struct seq_file *m, void *v, loff_t *pos)346346+{347347+ ++*pos;348348+ return NULL;349349+}350350+351351+static void c_stop(struct seq_file *m, void *v)352352+{353353+}354354+355355+const struct seq_operations cpuinfo_op = {356356+ .start = c_start,357357+ .next = c_next,358358+ .stop = c_stop,359359+ .show = c_show360360+};
+30
arch/unicore32/kernel/setup.h
···11+/*22+ * linux/arch/unicore32/kernel/setup.h33+ *44+ * Code specific to PKUnity SoC and UniCore ISA55+ *66+ * Copyright (C) 2001-2010 GUAN Xue-tao77+ *88+ * This program is free software; you can redistribute it and/or modify99+ * it under the terms of the GNU General Public License version 2 as1010+ * published by the Free Software Foundation.1111+ */1212+#ifndef __UNICORE_KERNEL_SETUP_H__1313+#define __UNICORE_KERNEL_SETUP_H__1414+1515+extern void paging_init(void);1616+extern void puv3_core_init(void);1717+1818+extern void puv3_ps2_init(void);1919+extern void pci_puv3_preinit(void);2020+extern void __init puv3_init_gpio(void);2121+2222+extern void setup_mm_for_reboot(char mode);2323+2424+extern char __stubs_start[], __stubs_end[];2525+extern char __vectors_start[], __vectors_end[];2626+2727+extern void kernel_thread_helper(void);2828+2929+extern void __init early_signal_init(void);3030+#endif
+333
arch/unicore32/kernel/traps.c
···11+/*22+ * linux/arch/unicore32/kernel/traps.c33+ *44+ * Code specific to PKUnity SoC and UniCore ISA55+ *66+ * Copyright (C) 2001-2010 GUAN Xue-tao77+ *88+ * This program is free software; you can redistribute it and/or modify99+ * it under the terms of the GNU General Public License version 2 as1010+ * published by the Free Software Foundation.1111+ *1212+ * 'traps.c' handles hardware exceptions after we have saved some state.1313+ * Mostly a debugging aid, but will probably kill the offending process.1414+ */1515+#include <linux/module.h>1616+#include <linux/signal.h>1717+#include <linux/spinlock.h>1818+#include <linux/personality.h>1919+#include <linux/kallsyms.h>2020+#include <linux/kdebug.h>2121+#include <linux/uaccess.h>2222+#include <linux/delay.h>2323+#include <linux/hardirq.h>2424+#include <linux/init.h>2525+#include <linux/uaccess.h>2626+#include <linux/atomic.h>2727+#include <linux/unistd.h>2828+2929+#include <asm/cacheflush.h>3030+#include <asm/system.h>3131+#include <asm/traps.h>3232+3333+#include "setup.h"3434+3535+static void dump_mem(const char *, const char *, unsigned long, unsigned long);3636+3737+void dump_backtrace_entry(unsigned long where,3838+ unsigned long from, unsigned long frame)3939+{4040+#ifdef CONFIG_KALLSYMS4141+ printk(KERN_DEFAULT "[<%08lx>] (%pS) from [<%08lx>] (%pS)\n",4242+ where, (void *)where, from, (void *)from);4343+#else4444+ printk(KERN_DEFAULT "Function entered at [<%08lx>] from [<%08lx>]\n",4545+ where, from);4646+#endif4747+}4848+4949+/*5050+ * Stack pointers should always be within the kernels view of5151+ * physical memory. If it is not there, then we can't dump5252+ * out any information relating to the stack.5353+ */5454+static int verify_stack(unsigned long sp)5555+{5656+ if (sp < PAGE_OFFSET ||5757+ (sp > (unsigned long)high_memory && high_memory != NULL))5858+ return -EFAULT;5959+6060+ return 0;6161+}6262+6363+/*6464+ * Dump out the contents of some memory nicely...6565+ */6666+static void dump_mem(const char *lvl, const char *str, unsigned long bottom,6767+ unsigned long top)6868+{6969+ unsigned long first;7070+ mm_segment_t fs;7171+ int i;7272+7373+ /*7474+ * We need to switch to kernel mode so that we can use __get_user7575+ * to safely read from kernel space. Note that we now dump the7676+ * code first, just in case the backtrace kills us.7777+ */7878+ fs = get_fs();7979+ set_fs(KERNEL_DS);8080+8181+ printk(KERN_DEFAULT "%s%s(0x%08lx to 0x%08lx)\n",8282+ lvl, str, bottom, top);8383+8484+ for (first = bottom & ~31; first < top; first += 32) {8585+ unsigned long p;8686+ char str[sizeof(" 12345678") * 8 + 1];8787+8888+ memset(str, ' ', sizeof(str));8989+ str[sizeof(str) - 1] = '\0';9090+9191+ for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {9292+ if (p >= bottom && p < top) {9393+ unsigned long val;9494+ if (__get_user(val, (unsigned long *)p) == 0)9595+ sprintf(str + i * 9, " %08lx", val);9696+ else9797+ sprintf(str + i * 9, " ????????");9898+ }9999+ }100100+ printk(KERN_DEFAULT "%s%04lx:%s\n", lvl, first & 0xffff, str);101101+ }102102+103103+ set_fs(fs);104104+}105105+106106+static void dump_instr(const char *lvl, struct pt_regs *regs)107107+{108108+ unsigned long addr = instruction_pointer(regs);109109+ const int width = 8;110110+ mm_segment_t fs;111111+ char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;112112+ int i;113113+114114+ /*115115+ * We need to switch to kernel mode so that we can use __get_user116116+ * to safely read from kernel space. Note that we now dump the117117+ * code first, just in case the backtrace kills us.118118+ */119119+ fs = get_fs();120120+ set_fs(KERNEL_DS);121121+122122+ for (i = -4; i < 1; i++) {123123+ unsigned int val, bad;124124+125125+ bad = __get_user(val, &((u32 *)addr)[i]);126126+127127+ if (!bad)128128+ p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",129129+ width, val);130130+ else {131131+ p += sprintf(p, "bad PC value");132132+ break;133133+ }134134+ }135135+ printk(KERN_DEFAULT "%sCode: %s\n", lvl, str);136136+137137+ set_fs(fs);138138+}139139+140140+static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)141141+{142142+ unsigned int fp, mode;143143+ int ok = 1;144144+145145+ printk(KERN_DEFAULT "Backtrace: ");146146+147147+ if (!tsk)148148+ tsk = current;149149+150150+ if (regs) {151151+ fp = regs->UCreg_fp;152152+ mode = processor_mode(regs);153153+ } else if (tsk != current) {154154+ fp = thread_saved_fp(tsk);155155+ mode = 0x10;156156+ } else {157157+ asm("mov %0, fp" : "=r" (fp) : : "cc");158158+ mode = 0x10;159159+ }160160+161161+ if (!fp) {162162+ printk("no frame pointer");163163+ ok = 0;164164+ } else if (verify_stack(fp)) {165165+ printk("invalid frame pointer 0x%08x", fp);166166+ ok = 0;167167+ } else if (fp < (unsigned long)end_of_stack(tsk))168168+ printk("frame pointer underflow");169169+ printk("\n");170170+171171+ if (ok)172172+ c_backtrace(fp, mode);173173+}174174+175175+void dump_stack(void)176176+{177177+ dump_backtrace(NULL, NULL);178178+}179179+EXPORT_SYMBOL(dump_stack);180180+181181+void show_stack(struct task_struct *tsk, unsigned long *sp)182182+{183183+ dump_backtrace(NULL, tsk);184184+ barrier();185185+}186186+187187+static int __die(const char *str, int err, struct thread_info *thread,188188+ struct pt_regs *regs)189189+{190190+ struct task_struct *tsk = thread->task;191191+ static int die_counter;192192+ int ret;193193+194194+ printk(KERN_EMERG "Internal error: %s: %x [#%d]\n",195195+ str, err, ++die_counter);196196+ sysfs_printk_last_file();197197+198198+ /* trap and error numbers are mostly meaningless on UniCore */199199+ ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, \200200+ SIGSEGV);201201+ if (ret == NOTIFY_STOP)202202+ return ret;203203+204204+ print_modules();205205+ __show_regs(regs);206206+ printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",207207+ TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);208208+209209+ if (!user_mode(regs) || in_interrupt()) {210210+ dump_mem(KERN_EMERG, "Stack: ", regs->UCreg_sp,211211+ THREAD_SIZE + (unsigned long)task_stack_page(tsk));212212+ dump_backtrace(regs, tsk);213213+ dump_instr(KERN_EMERG, regs);214214+ }215215+216216+ return ret;217217+}218218+219219+DEFINE_SPINLOCK(die_lock);220220+221221+/*222222+ * This function is protected against re-entrancy.223223+ */224224+void die(const char *str, struct pt_regs *regs, int err)225225+{226226+ struct thread_info *thread = current_thread_info();227227+ int ret;228228+229229+ oops_enter();230230+231231+ spin_lock_irq(&die_lock);232232+ console_verbose();233233+ bust_spinlocks(1);234234+ ret = __die(str, err, thread, regs);235235+236236+ bust_spinlocks(0);237237+ add_taint(TAINT_DIE);238238+ spin_unlock_irq(&die_lock);239239+ oops_exit();240240+241241+ if (in_interrupt())242242+ panic("Fatal exception in interrupt");243243+ if (panic_on_oops)244244+ panic("Fatal exception");245245+ if (ret != NOTIFY_STOP)246246+ do_exit(SIGSEGV);247247+}248248+249249+void uc32_notify_die(const char *str, struct pt_regs *regs,250250+ struct siginfo *info, unsigned long err, unsigned long trap)251251+{252252+ if (user_mode(regs)) {253253+ current->thread.error_code = err;254254+ current->thread.trap_no = trap;255255+256256+ force_sig_info(info->si_signo, info, current);257257+ } else258258+ die(str, regs, err);259259+}260260+261261+/*262262+ * bad_mode handles the impossible case in the vectors. If you see one of263263+ * these, then it's extremely serious, and could mean you have buggy hardware.264264+ * It never returns, and never tries to sync. We hope that we can at least265265+ * dump out some state information...266266+ */267267+asmlinkage void bad_mode(struct pt_regs *regs, unsigned int reason)268268+{269269+ console_verbose();270270+271271+ printk(KERN_CRIT "Bad mode detected with reason 0x%x\n", reason);272272+273273+ die("Oops - bad mode", regs, 0);274274+ local_irq_disable();275275+ panic("bad mode");276276+}277277+278278+void __pte_error(const char *file, int line, unsigned long val)279279+{280280+ printk(KERN_DEFAULT "%s:%d: bad pte %08lx.\n", file, line, val);281281+}282282+283283+void __pmd_error(const char *file, int line, unsigned long val)284284+{285285+ printk(KERN_DEFAULT "%s:%d: bad pmd %08lx.\n", file, line, val);286286+}287287+288288+void __pgd_error(const char *file, int line, unsigned long val)289289+{290290+ printk(KERN_DEFAULT "%s:%d: bad pgd %08lx.\n", file, line, val);291291+}292292+293293+asmlinkage void __div0(void)294294+{295295+ printk(KERN_DEFAULT "Division by zero in kernel.\n");296296+ dump_stack();297297+}298298+EXPORT_SYMBOL(__div0);299299+300300+void abort(void)301301+{302302+ BUG();303303+304304+ /* if that doesn't kill us, halt */305305+ panic("Oops failed to kill thread");306306+}307307+EXPORT_SYMBOL(abort);308308+309309+void __init trap_init(void)310310+{311311+ return;312312+}313313+314314+void __init early_trap_init(void)315315+{316316+ unsigned long vectors = VECTORS_BASE;317317+318318+ /*319319+ * Copy the vectors, stubs (in entry-unicore.S)320320+ * into the vector page, mapped at 0xffff0000, and ensure these321321+ * are visible to the instruction stream.322322+ */323323+ memcpy((void *)vectors,324324+ __vectors_start,325325+ __vectors_end - __vectors_start);326326+ memcpy((void *)vectors + 0x200,327327+ __stubs_start,328328+ __stubs_end - __stubs_start);329329+330330+ early_signal_init();331331+332332+ flush_icache_range(vectors, vectors + PAGE_SIZE);333333+}