[PATCH] ppc: Fix powersave code on arch/ppc

Fix asm_offsets.c and entry.S to work with the new power save code.
Changes in arch/powerpc needed to exist in arch/ppc as well since the
idle code is shared by both ppc and powerpc..

Signed-off-by: Becky Bruce <becky.bruce@freescale.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>

authored by Becky Bruce and committed by Paul Mackerras ea1e847c 23b2527d

+17 -17
+1
arch/ppc/kernel/asm-offsets.c
··· 134 134 DEFINE(TI_TASK, offsetof(struct thread_info, task)); 135 135 DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain)); 136 136 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 137 + DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, flags)); 137 138 DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); 138 139 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); 139 140
+16 -17
arch/ppc/kernel/entry.S
··· 128 128 stw r12,4(r11) 129 129 #endif 130 130 b 3f 131 + 131 132 2: /* if from kernel, check interrupted DOZE/NAP mode and 132 133 * check for stack overflow 133 134 */ 135 + lwz r9,THREAD_INFO-THREAD(r12) 136 + cmplw r1,r9 /* if r1 <= current->thread_info */ 137 + ble- stack_ovf /* then the kernel stack overflowed */ 138 + 5: 134 139 #ifdef CONFIG_6xx 135 - mfspr r11,SPRN_HID0 136 - mtcr r11 137 - BEGIN_FTR_SECTION 138 - bt- 8,4f /* Check DOZE */ 139 - END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) 140 - BEGIN_FTR_SECTION 141 - bt- 9,4f /* Check NAP */ 142 - END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) 140 + tophys(r9,r9) /* check local flags */ 141 + lwz r12,TI_LOCAL_FLAGS(r9) 142 + mtcrf 0x01,r12 143 + bt- 31-TLF_NAPPING,4f 143 144 #endif /* CONFIG_6xx */ 144 145 .globl transfer_to_handler_cont 145 146 transfer_to_handler_cont: 146 - lwz r11,THREAD_INFO-THREAD(r12) 147 - cmplw r1,r11 /* if r1 <= current->thread_info */ 148 - ble- stack_ovf /* then the kernel stack overflowed */ 149 147 3: 150 148 mflr r9 151 149 lwz r11,0(r9) /* virtual address of handler */ 152 150 lwz r9,4(r9) /* where to go when done */ 153 - FIX_SRR1(r10,r12) 154 151 mtspr SPRN_SRR0,r11 155 152 mtspr SPRN_SRR1,r10 156 153 mtlr r9 ··· 155 158 RFI /* jump to handler, enable MMU */ 156 159 157 160 #ifdef CONFIG_6xx 158 - 4: b power_save_6xx_restore 161 + 4: rlwinm r12,r12,0,~_TLF_NAPPING 162 + stw r12,TI_LOCAL_FLAGS(r9) 163 + b power_save_6xx_restore 159 164 #endif 160 165 161 166 /* ··· 166 167 */ 167 168 stack_ovf: 168 169 /* sometimes we use a statically-allocated stack, which is OK. */ 169 - lis r11,_end@h 170 - ori r11,r11,_end@l 171 - cmplw r1,r11 172 - ble 3b /* r1 <= &_end is OK */ 170 + lis r12,_end@h 171 + ori r12,r12,_end@l 172 + cmplw r1,r12 173 + ble 5b /* r1 <= &_end is OK */ 173 174 SAVE_NVGPRS(r11) 174 175 addi r3,r1,STACK_FRAME_OVERHEAD 175 176 lis r1,init_thread_union@ha