Merge git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc-merge

Fix up delete/modify conflict of arch/ppc/kernel/process.c by hand (it's
gone, gone, gone).

Signed-off-by: Linus Torvalds <torvalds@osdl.org>

+298 -1155
+3 -3
arch/powerpc/kernel/Makefile
··· 11 11 endif 12 12 13 13 obj-y := semaphore.o cputable.o ptrace.o syscalls.o \ 14 - irq.o align.o signal_32.o pmc.o vdso.o 14 + irq.o align.o signal_32.o pmc.o vdso.o \ 15 + init_task.o process.o 15 16 obj-y += vdso32/ 16 17 obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \ 17 18 signal_64.o ptrace32.o systbl.o \ ··· 45 44 extra-$(CONFIG_8xx) := head_8xx.o 46 45 extra-y += vmlinux.lds 47 46 48 - obj-y += process.o init_task.o time.o \ 49 - prom.o traps.o setup-common.o udbg.o 47 + obj-y += time.o prom.o traps.o setup-common.o udbg.o 50 48 obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o systbl.o 51 49 obj-$(CONFIG_PPC64) += misc_64.o dma_64.o iommu.o 52 50 obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
+3 -2
arch/powerpc/kernel/pci_64.c
··· 53 53 #ifdef CONFIG_PPC_MULTIPLATFORM 54 54 static void fixup_resource(struct resource *res, struct pci_dev *dev); 55 55 static void do_bus_setup(struct pci_bus *bus); 56 + static void phbs_remap_io(void); 56 57 #endif 57 58 58 59 /* pci_io_base -- the base address from which io bars are offsets. ··· 252 251 kfree(phb); 253 252 } 254 253 254 + #ifndef CONFIG_PPC_ISERIES 255 255 void __devinit pcibios_claim_one_bus(struct pci_bus *b) 256 256 { 257 257 struct pci_dev *dev; ··· 277 275 EXPORT_SYMBOL_GPL(pcibios_claim_one_bus); 278 276 #endif 279 277 280 - #ifndef CONFIG_PPC_ISERIES 281 278 static void __init pcibios_claim_of_setup(void) 282 279 { 283 280 struct pci_bus *b; ··· 1219 1218 } 1220 1219 EXPORT_SYMBOL(remap_bus_range); 1221 1220 1222 - void phbs_remap_io(void) 1221 + static void phbs_remap_io(void) 1223 1222 { 1224 1223 struct pci_controller *hose, *tmp; 1225 1224
+5 -3
arch/powerpc/kernel/process.c
··· 201 201 } 202 202 #endif /* CONFIG_SPE */ 203 203 204 + #ifndef CONFIG_SMP 204 205 /* 205 206 * If we are doing lazy switching of CPU state (FP, altivec or SPE), 206 207 * and the current task has some state, discard it. 207 208 */ 208 - static inline void discard_lazy_cpu_state(void) 209 + void discard_lazy_cpu_state(void) 209 210 { 210 - #ifndef CONFIG_SMP 211 211 preempt_disable(); 212 212 if (last_task_used_math == current) 213 213 last_task_used_math = NULL; ··· 220 220 last_task_used_spe = NULL; 221 221 #endif 222 222 preempt_enable(); 223 - #endif /* CONFIG_SMP */ 224 223 } 224 + #endif /* CONFIG_SMP */ 225 225 226 + #ifdef CONFIG_PPC_MERGE /* XXX for now */ 226 227 int set_dabr(unsigned long dabr) 227 228 { 228 229 if (ppc_md.set_dabr) ··· 232 231 mtspr(SPRN_DABR, dabr); 233 232 return 0; 234 233 } 234 + #endif 235 235 236 236 #ifdef CONFIG_PPC64 237 237 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
+24 -4
arch/powerpc/kernel/prom.c
··· 1100 1100 1101 1101 static void __init early_reserve_mem(void) 1102 1102 { 1103 - unsigned long base, size; 1104 - unsigned long *reserve_map; 1103 + u64 base, size; 1104 + u64 *reserve_map; 1105 1105 1106 - reserve_map = (unsigned long *)(((unsigned long)initial_boot_params) + 1106 + reserve_map = (u64 *)(((unsigned long)initial_boot_params) + 1107 1107 initial_boot_params->off_mem_rsvmap); 1108 + #ifdef CONFIG_PPC32 1109 + /* 1110 + * Handle the case where we might be booting from an old kexec 1111 + * image that setup the mem_rsvmap as pairs of 32-bit values 1112 + */ 1113 + if (*reserve_map > 0xffffffffull) { 1114 + u32 base_32, size_32; 1115 + u32 *reserve_map_32 = (u32 *)reserve_map; 1116 + 1117 + while (1) { 1118 + base_32 = *(reserve_map_32++); 1119 + size_32 = *(reserve_map_32++); 1120 + if (size_32 == 0) 1121 + break; 1122 + DBG("reserving: %lx -> %lx\n", base_32, size_32); 1123 + lmb_reserve(base_32, size_32); 1124 + } 1125 + return; 1126 + } 1127 + #endif 1108 1128 while (1) { 1109 1129 base = *(reserve_map++); 1110 1130 size = *(reserve_map++); 1111 1131 if (size == 0) 1112 1132 break; 1113 - DBG("reserving: %lx -> %lx\n", base, size); 1133 + DBG("reserving: %llx -> %llx\n", base, size); 1114 1134 lmb_reserve(base, size); 1115 1135 } 1116 1136
+4 -4
arch/powerpc/kernel/prom_init.c
··· 137 137 }; 138 138 139 139 struct mem_map_entry { 140 - unsigned long base; 141 - unsigned long size; 140 + u64 base; 141 + u64 size; 142 142 }; 143 143 144 144 typedef u32 cell_t; ··· 897 897 * If problems seem to show up, it would be a good start to track 898 898 * them down. 899 899 */ 900 - static void reserve_mem(unsigned long base, unsigned long size) 900 + static void reserve_mem(u64 base, u64 size) 901 901 { 902 - unsigned long top = base + size; 902 + u64 top = base + size; 903 903 unsigned long cnt = RELOC(mem_reserve_cnt); 904 904 905 905 if (size == 0)
+9 -12
arch/powerpc/kernel/signal_32.c
··· 497 497 if (err) 498 498 return 1; 499 499 500 + /* 501 + * Do this before updating the thread state in 502 + * current->thread.fpr/vr/evr. That way, if we get preempted 503 + * and another task grabs the FPU/Altivec/SPE, it won't be 504 + * tempted to save the current CPU state into the thread_struct 505 + * and corrupt what we are writing there. 506 + */ 507 + discard_lazy_cpu_state(); 508 + 500 509 /* force the process to reload the FP registers from 501 510 current->thread when it next does FP instructions */ 502 511 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); ··· 547 538 return 1; 548 539 #endif /* CONFIG_SPE */ 549 540 550 - #ifndef CONFIG_SMP 551 - preempt_disable(); 552 - if (last_task_used_math == current) 553 - last_task_used_math = NULL; 554 - if (last_task_used_altivec == current) 555 - last_task_used_altivec = NULL; 556 - #ifdef CONFIG_SPE 557 - if (last_task_used_spe == current) 558 - last_task_used_spe = NULL; 559 - #endif 560 - preempt_enable(); 561 - #endif 562 541 return 0; 563 542 } 564 543
+11 -9
arch/powerpc/kernel/signal_64.c
··· 207 207 208 208 if (!sig) 209 209 regs->gpr[13] = save_r13; 210 - err |= __copy_from_user(&current->thread.fpr, &sc->fp_regs, FP_REGS_SIZE); 211 210 if (set != NULL) 212 211 err |= __get_user(set->sig[0], &sc->oldmask); 212 + 213 + /* 214 + * Do this before updating the thread state in 215 + * current->thread.fpr/vr. That way, if we get preempted 216 + * and another task grabs the FPU/Altivec, it won't be 217 + * tempted to save the current CPU state into the thread_struct 218 + * and corrupt what we are writing there. 219 + */ 220 + discard_lazy_cpu_state(); 221 + 222 + err |= __copy_from_user(&current->thread.fpr, &sc->fp_regs, FP_REGS_SIZE); 213 223 214 224 #ifdef CONFIG_ALTIVEC 215 225 err |= __get_user(v_regs, &sc->v_regs); ··· 239 229 current->thread.vrsave = 0; 240 230 #endif /* CONFIG_ALTIVEC */ 241 231 242 - #ifndef CONFIG_SMP 243 - preempt_disable(); 244 - if (last_task_used_math == current) 245 - last_task_used_math = NULL; 246 - if (last_task_used_altivec == current) 247 - last_task_used_altivec = NULL; 248 - preempt_enable(); 249 - #endif 250 232 /* Force reload of FP/VEC */ 251 233 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC); 252 234
+1 -1
arch/powerpc/platforms/cell/pervasive.c
··· 142 142 } 143 143 } 144 144 145 - int cbe_system_reset_exception(struct pt_regs *regs) 145 + static int cbe_system_reset_exception(struct pt_regs *regs) 146 146 { 147 147 switch (regs->msr & SRR1_WAKEMASK) { 148 148 case SRR1_WAKEEE:
+1 -1
arch/powerpc/platforms/cell/setup.c
··· 57 57 #define DBG(fmt...) 58 58 #endif 59 59 60 - void cell_show_cpuinfo(struct seq_file *m) 60 + static void cell_show_cpuinfo(struct seq_file *m) 61 61 { 62 62 struct device_node *root; 63 63 const char *model = "";
+3 -1
arch/powerpc/platforms/cell/spufs/syscalls.c
··· 29 29 * value of the spu_status register after the SPU has stopped. 30 30 * 31 31 */ 32 - long do_spu_run(struct file *filp, __u32 __user *unpc, __u32 __user *ustatus) 32 + static long do_spu_run(struct file *filp, 33 + __u32 __user *unpc, 34 + __u32 __user *ustatus) 33 35 { 34 36 long ret; 35 37 struct spufs_inode_info *i;
+2 -2
arch/powerpc/platforms/iseries/Makefile
··· 1 1 EXTRA_CFLAGS += -mno-minimal-toc 2 2 3 3 obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o mf.o lpevents.o \ 4 - hvcall.o proc.o htab.o iommu.o misc.o 5 - obj-$(CONFIG_PCI) += pci.o irq.o vpdinfo.o 4 + hvcall.o proc.o htab.o iommu.o misc.o irq.o 5 + obj-$(CONFIG_PCI) += pci.o vpdinfo.o 6 6 obj-$(CONFIG_IBMVIO) += vio.o 7 7 obj-$(CONFIG_SMP) += smp.o 8 8 obj-$(CONFIG_VIOPATH) += viopath.o
+2
arch/powerpc/platforms/iseries/iommu.c
··· 34 34 #include <asm/pci-bridge.h> 35 35 #include <asm/iseries/hv_call_xm.h> 36 36 37 + #include "iommu.h" 38 + 37 39 extern struct list_head iSeries_Global_Device_List; 38 40 39 41
+35
arch/powerpc/platforms/iseries/iommu.h
··· 1 + #ifndef _PLATFORMS_ISERIES_IOMMU_H 2 + #define _PLATFORMS_ISERIES_IOMMU_H 3 + 4 + /* 5 + * Copyright (C) 2005 Stephen Rothwell, IBM Corporation 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License as published by 9 + * the Free Software Foundation; either version 2 of the License, or 10 + * (at your option) any later version. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + * You should have received a copy of the GNU General Public License 18 + * along with this program; if not, write to the: 19 + * Free Software Foundation, Inc., 20 + * 59 Temple Place, Suite 330, 21 + * Boston, MA 02111-1307 USA 22 + */ 23 + 24 + struct device_node; 25 + struct iommu_table; 26 + 27 + /* Creates table for an individual device node */ 28 + extern void iommu_devnode_init_iSeries(struct device_node *dn); 29 + 30 + /* Get table parameters from HV */ 31 + extern void iommu_table_getparms_iSeries(unsigned long busno, 32 + unsigned char slotno, unsigned char virtbus, 33 + struct iommu_table *tbl); 34 + 35 + #endif /* _PLATFORMS_ISERIES_IOMMU_H */
+8 -11
arch/powerpc/platforms/iseries/irq.c
··· 48 48 extern void iSeries_smp_message_recv(struct pt_regs *); 49 49 #endif 50 50 51 + #ifdef CONFIG_PCI 52 + 51 53 enum pci_event_type { 52 54 pe_bus_created = 0, /* PHB has been created */ 53 55 pe_bus_error = 1, /* PHB has failed */ ··· 149 147 static void pci_event_handler(struct HvLpEvent *event, struct pt_regs *regs) 150 148 { 151 149 if (event && (event->xType == HvLpEvent_Type_PciIo)) { 152 - switch (event->xFlags.xFunction) { 153 - case HvLpEvent_Function_Int: 150 + if (hvlpevent_is_int(event)) 154 151 int_received((struct pci_event *)event, regs); 155 - break; 156 - case HvLpEvent_Function_Ack: 152 + else 157 153 printk(KERN_ERR 158 154 "pci_event_handler: unexpected ack received\n"); 159 - break; 160 - default: 161 - printk(KERN_ERR 162 - "pci_event_handler: unexpected event function %d\n", 163 - (int)event->xFlags.xFunction); 164 - break; 165 - } 166 155 } else if (event) 167 156 printk(KERN_ERR 168 157 "pci_event_handler: Unrecognized PCI event type 0x%x\n", ··· 327 334 return virtirq; 328 335 } 329 336 337 + #endif /* CONFIG_PCI */ 338 + 330 339 /* 331 340 * Get the next pending IRQ. 332 341 */ ··· 348 353 if (hvlpevent_is_pending()) 349 354 process_hvlpevents(regs); 350 355 356 + #ifdef CONFIG_PCI 351 357 if (num_pending_irqs) { 352 358 spin_lock(&pending_irqs_lock); 353 359 for (irq = 0; irq < NR_IRQS; irq++) { ··· 362 366 if (irq >= NR_IRQS) 363 367 irq = -2; 364 368 } 369 + #endif 365 370 366 371 return irq; 367 372 }
+1 -4
arch/powerpc/platforms/iseries/lpardata.c
··· 93 93 .xPirEnvironMode = 0, /* Piranha stuff */ 94 94 .xPirConsoleMode = 0, 95 95 .xPirDasdMode = 0, 96 - .xLparInstalled = 0, 97 - .xSysPartitioned = 0, 98 - .xHwSyncedTBs = 0, 99 - .xIntProcUtilHmt = 0, 96 + .flags = 0, 100 97 .xSpVpdFormat = 0, 101 98 .xIntProcRatio = 0, 102 99 .xPlicVrmIndex = 0, /* VRM index of PLIC */
+6 -6
arch/powerpc/platforms/iseries/lpevents.c
··· 53 53 struct HvLpEvent * event; 54 54 event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr; 55 55 56 - if (event->xFlags.xValid) { 56 + if (hvlpevent_is_valid(event)) { 57 57 /* rmb() needed only for weakly consistent machines (regatta) */ 58 58 rmb(); 59 59 /* Set pointer to next potential event */ ··· 84 84 85 85 next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr; 86 86 87 - return next_event->xFlags.xValid | 87 + return hvlpevent_is_valid(next_event) || 88 88 hvlpevent_queue.xPlicOverflowIntPending; 89 89 } 90 90 ··· 101 101 switch (extra) { 102 102 case 3: 103 103 tmp = (struct HvLpEvent*)((char*)event + 3 * LpEventAlign); 104 - tmp->xFlags.xValid = 0; 104 + hvlpevent_invalidate(tmp); 105 105 case 2: 106 106 tmp = (struct HvLpEvent*)((char*)event + 2 * LpEventAlign); 107 - tmp->xFlags.xValid = 0; 107 + hvlpevent_invalidate(tmp); 108 108 case 1: 109 109 tmp = (struct HvLpEvent*)((char*)event + 1 * LpEventAlign); 110 - tmp->xFlags.xValid = 0; 110 + hvlpevent_invalidate(tmp); 111 111 } 112 112 113 113 mb(); 114 114 115 - event->xFlags.xValid = 0; 115 + hvlpevent_invalidate(event); 116 116 } 117 117 118 118 void process_hvlpevents(struct pt_regs *regs)
+3 -13
arch/powerpc/platforms/iseries/mf.c
··· 251 251 } 252 252 memset(ev, 0, sizeof(struct pending_event)); 253 253 hev = &ev->event.hp_lp_event; 254 - hev->xFlags.xValid = 1; 255 - hev->xFlags.xAckType = HvLpEvent_AckType_ImmediateAck; 256 - hev->xFlags.xAckInd = HvLpEvent_AckInd_DoAck; 257 - hev->xFlags.xFunction = HvLpEvent_Function_Int; 254 + hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DO_ACK | HV_LP_EVENT_INT; 258 255 hev->xType = HvLpEvent_Type_MachineFac; 259 256 hev->xSourceLp = HvLpConfig_getLpIndex(); 260 257 hev->xTargetLp = primary_lp; ··· 515 518 static void hv_handler(struct HvLpEvent *event, struct pt_regs *regs) 516 519 { 517 520 if ((event != NULL) && (event->xType == HvLpEvent_Type_MachineFac)) { 518 - switch(event->xFlags.xFunction) { 519 - case HvLpEvent_Function_Ack: 521 + if (hvlpevent_is_ack(event)) 520 522 handle_ack((struct io_mf_lp_event *)event); 521 - break; 522 - case HvLpEvent_Function_Int: 523 + else 523 524 handle_int((struct io_mf_lp_event *)event); 524 - break; 525 - default: 526 - printk(KERN_ERR "mf.c: non ack/int event received\n"); 527 - break; 528 - } 529 525 } else 530 526 printk(KERN_ERR "mf.c: alien event received\n"); 531 527 }
+1
arch/powerpc/platforms/iseries/pci.c
··· 43 43 #include "irq.h" 44 44 #include "pci.h" 45 45 #include "call_pci.h" 46 + #include "iommu.h" 46 47 47 48 extern unsigned long io_page_mask; 48 49
+2
arch/powerpc/platforms/iseries/vio.c
··· 22 22 #include <asm/iseries/hv_lp_config.h> 23 23 #include <asm/iseries/hv_call_xm.h> 24 24 25 + #include "iommu.h" 26 + 25 27 struct device *iSeries_vio_dev = &vio_bus_device.dev; 26 28 EXPORT_SYMBOL(iSeries_vio_dev); 27 29
+5 -7
arch/powerpc/platforms/iseries/viopath.c
··· 270 270 * First see if this is just a normal monitor message from the 271 271 * other partition 272 272 */ 273 - if (event->xFlags.xFunction == HvLpEvent_Function_Int) { 273 + if (hvlpevent_is_int(event)) { 274 274 remoteLp = event->xSourceLp; 275 275 if (!viopathStatus[remoteLp].isActive) 276 276 sendMonMsg(remoteLp); ··· 331 331 { 332 332 if (!event) 333 333 return; 334 - if (event->xFlags.xFunction == HvLpEvent_Function_Int) { 334 + if (hvlpevent_is_int(event)) { 335 335 printk(VIOPATH_KERN_WARN 336 336 "unexpected config request from partition %d", 337 337 event->xSourceLp); 338 338 339 - if ((event->xFlags.xFunction == HvLpEvent_Function_Int) && 340 - (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) { 339 + if (hvlpevent_need_ack(event)) { 341 340 event->xRc = HvLpEvent_Rc_InvalidSubtype; 342 341 HvCallEvent_ackLpEvent(event); 343 342 } ··· 376 377 int subtype = (event->xSubtype & VIOMAJOR_SUBTYPE_MASK) 377 378 >> VIOMAJOR_SUBTYPE_SHIFT; 378 379 379 - if (event->xFlags.xFunction == HvLpEvent_Function_Int) { 380 + if (hvlpevent_is_int(event)) { 380 381 remoteLp = event->xSourceLp; 381 382 /* 382 383 * The isActive is checked because if the hosting partition ··· 435 436 "unexpected virtual io event subtype %d from partition %d\n", 436 437 event->xSubtype, remoteLp); 437 438 /* No handler. Ack if necessary */ 438 - if ((event->xFlags.xFunction == HvLpEvent_Function_Int) && 439 - (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) { 439 + if (hvlpevent_is_int(event) && hvlpevent_need_ack(event)) { 440 440 event->xRc = HvLpEvent_Rc_InvalidSubtype; 441 441 HvCallEvent_ackLpEvent(event); 442 442 }
+1 -1
arch/powerpc/platforms/powermac/setup.c
··· 278 278 } 279 279 #endif 280 280 281 - void __init pmac_setup_arch(void) 281 + static void __init pmac_setup_arch(void) 282 282 { 283 283 struct device_node *cpu, *ic; 284 284 int *fp;
+3 -2
arch/powerpc/platforms/pseries/eeh.c
··· 208 208 { 209 209 while (dn) { 210 210 if (PCI_DN(dn)) { 211 - PCI_DN(dn)->eeh_mode |= mode_flag; 212 - 213 211 /* Mark the pci device driver too */ 214 212 struct pci_dev *dev = PCI_DN(dn)->pcidev; 213 + 214 + PCI_DN(dn)->eeh_mode |= mode_flag; 215 + 215 216 if (dev && dev->driver) 216 217 dev->error_state = pci_channel_io_frozen; 217 218
+2 -2
arch/powerpc/platforms/pseries/hvcserver.c
··· 40 40 * functions aren't performance sensitive, so this conversion isn't an 41 41 * issue. 42 42 */ 43 - int hvcs_convert(long to_convert) 43 + static int hvcs_convert(long to_convert) 44 44 { 45 45 switch (to_convert) { 46 46 case H_Success: ··· 91 91 EXPORT_SYMBOL(hvcs_free_partner_info); 92 92 93 93 /* Helper function for hvcs_get_partner_info */ 94 - int hvcs_next_partner(uint32_t unit_address, 94 + static int hvcs_next_partner(uint32_t unit_address, 95 95 unsigned long last_p_partition_ID, 96 96 unsigned long last_p_unit_address, unsigned long *pi_buff) 97 97
-2
arch/powerpc/platforms/pseries/iommu.c
··· 51 51 52 52 #define DBG(fmt...) 53 53 54 - extern int is_python(struct device_node *); 55 - 56 54 static void tce_build_pSeries(struct iommu_table *tbl, long index, 57 55 long npages, unsigned long uaddr, 58 56 enum dma_data_direction direction)
+2 -2
arch/powerpc/platforms/pseries/scanlog.c
··· 192 192 .release = scanlog_release, 193 193 }; 194 194 195 - int __init scanlog_init(void) 195 + static int __init scanlog_init(void) 196 196 { 197 197 struct proc_dir_entry *ent; 198 198 ··· 222 222 return 0; 223 223 } 224 224 225 - void __exit scanlog_cleanup(void) 225 + static void __exit scanlog_cleanup(void) 226 226 { 227 227 if (proc_ppc64_scan_log_dump) { 228 228 kfree(proc_ppc64_scan_log_dump->data);
+1 -1
arch/powerpc/platforms/pseries/setup.c
··· 86 86 87 87 struct mpic *pSeries_mpic; 88 88 89 - void pSeries_show_cpuinfo(struct seq_file *m) 89 + static void pSeries_show_cpuinfo(struct seq_file *m) 90 90 { 91 91 struct device_node *root; 92 92 const char *model = "";
+2 -2
arch/powerpc/platforms/pseries/smp.c
··· 93 93 return cpu_status; 94 94 } 95 95 96 - int pSeries_cpu_disable(void) 96 + static int pSeries_cpu_disable(void) 97 97 { 98 98 int cpu = smp_processor_id(); 99 99 ··· 109 109 return 0; 110 110 } 111 111 112 - void pSeries_cpu_die(unsigned int cpu) 112 + static void pSeries_cpu_die(unsigned int cpu) 113 113 { 114 114 int tries; 115 115 int cpu_status;
+1 -1
arch/powerpc/platforms/pseries/xics.c
··· 381 381 382 382 #ifdef CONFIG_SMP 383 383 384 - irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs) 384 + static irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs) 385 385 { 386 386 int cpu = smp_processor_id(); 387 387
-1
arch/powerpc/sysdev/dart_iommu.c
··· 139 139 140 140 *(dp++) = DARTMAP_VALID | (rpn & DARTMAP_RPNMASK); 141 141 142 - rpn++; 143 142 uaddr += DART_PAGE_SIZE; 144 143 } 145 144
+7 -7
arch/powerpc/xmon/xmon.c
··· 311 311 } 312 312 #endif 313 313 314 - int xmon_core(struct pt_regs *regs, int fromipi) 314 + static int xmon_core(struct pt_regs *regs, int fromipi) 315 315 { 316 316 int cmd = 0; 317 317 unsigned long msr; ··· 528 528 return IRQ_HANDLED; 529 529 } 530 530 531 - int xmon_bpt(struct pt_regs *regs) 531 + static int xmon_bpt(struct pt_regs *regs) 532 532 { 533 533 struct bpt *bp; 534 534 unsigned long offset; ··· 554 554 return 1; 555 555 } 556 556 557 - int xmon_sstep(struct pt_regs *regs) 557 + static int xmon_sstep(struct pt_regs *regs) 558 558 { 559 559 if (user_mode(regs)) 560 560 return 0; ··· 562 562 return 1; 563 563 } 564 564 565 - int xmon_dabr_match(struct pt_regs *regs) 565 + static int xmon_dabr_match(struct pt_regs *regs) 566 566 { 567 567 if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) != (MSR_IR|MSR_SF)) 568 568 return 0; ··· 572 572 return 1; 573 573 } 574 574 575 - int xmon_iabr_match(struct pt_regs *regs) 575 + static int xmon_iabr_match(struct pt_regs *regs) 576 576 { 577 577 if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) != (MSR_IR|MSR_SF)) 578 578 return 0; ··· 582 582 return 1; 583 583 } 584 584 585 - int xmon_ipi(struct pt_regs *regs) 585 + static int xmon_ipi(struct pt_regs *regs) 586 586 { 587 587 #ifdef CONFIG_SMP 588 588 if (in_xmon && !cpu_isset(smp_processor_id(), cpus_in_xmon)) ··· 591 591 return 0; 592 592 } 593 593 594 - int xmon_fault_handler(struct pt_regs *regs) 594 + static int xmon_fault_handler(struct pt_regs *regs) 595 595 { 596 596 struct bpt *bp; 597 597 unsigned long offset;
-1
arch/ppc/kernel/Makefile
··· 13 13 extra-y += vmlinux.lds 14 14 15 15 obj-y := entry.o traps.o idle.o time.o misc.o \ 16 - process.o \ 17 16 setup.o \ 18 17 ppc_htab.o 19 18 obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
-851
arch/ppc/kernel/process.c
··· 1 - /* 2 - * arch/ppc/kernel/process.c 3 - * 4 - * Derived from "arch/i386/kernel/process.c" 5 - * Copyright (C) 1995 Linus Torvalds 6 - * 7 - * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and 8 - * Paul Mackerras (paulus@cs.anu.edu.au) 9 - * 10 - * PowerPC version 11 - * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 12 - * 13 - * This program is free software; you can redistribute it and/or 14 - * modify it under the terms of the GNU General Public License 15 - * as published by the Free Software Foundation; either version 16 - * 2 of the License, or (at your option) any later version. 17 - * 18 - */ 19 - 20 - #include <linux/config.h> 21 - #include <linux/errno.h> 22 - #include <linux/sched.h> 23 - #include <linux/kernel.h> 24 - #include <linux/mm.h> 25 - #include <linux/smp.h> 26 - #include <linux/smp_lock.h> 27 - #include <linux/stddef.h> 28 - #include <linux/unistd.h> 29 - #include <linux/ptrace.h> 30 - #include <linux/slab.h> 31 - #include <linux/user.h> 32 - #include <linux/elf.h> 33 - #include <linux/init.h> 34 - #include <linux/prctl.h> 35 - #include <linux/init_task.h> 36 - #include <linux/module.h> 37 - #include <linux/kallsyms.h> 38 - #include <linux/mqueue.h> 39 - #include <linux/hardirq.h> 40 - 41 - #include <asm/pgtable.h> 42 - #include <asm/uaccess.h> 43 - #include <asm/system.h> 44 - #include <asm/io.h> 45 - #include <asm/processor.h> 46 - #include <asm/mmu.h> 47 - #include <asm/prom.h> 48 - 49 - extern unsigned long _get_SP(void); 50 - 51 - struct task_struct *last_task_used_math = NULL; 52 - struct task_struct *last_task_used_altivec = NULL; 53 - struct task_struct *last_task_used_spe = NULL; 54 - 55 - static struct fs_struct init_fs = INIT_FS; 56 - static struct files_struct init_files = INIT_FILES; 57 - static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 58 - static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 59 - struct mm_struct init_mm = INIT_MM(init_mm); 60 - EXPORT_SYMBOL(init_mm); 61 - 62 - /* this is 8kB-aligned so we can get to the thread_info struct 63 - at the base of it from the stack pointer with 1 integer instruction. */ 64 - union thread_union init_thread_union 65 - __attribute__((__section__(".data.init_task"))) = 66 - { INIT_THREAD_INFO(init_task) }; 67 - 68 - /* initial task structure */ 69 - struct task_struct init_task = INIT_TASK(init_task); 70 - EXPORT_SYMBOL(init_task); 71 - 72 - /* only used to get secondary processor up */ 73 - struct task_struct *current_set[NR_CPUS] = {&init_task, }; 74 - 75 - #undef SHOW_TASK_SWITCHES 76 - #undef CHECK_STACK 77 - 78 - #if defined(CHECK_STACK) 79 - unsigned long 80 - kernel_stack_top(struct task_struct *tsk) 81 - { 82 - return ((unsigned long)tsk) + sizeof(union task_union); 83 - } 84 - 85 - unsigned long 86 - task_top(struct task_struct *tsk) 87 - { 88 - return ((unsigned long)tsk) + sizeof(struct thread_info); 89 - } 90 - 91 - /* check to make sure the kernel stack is healthy */ 92 - int check_stack(struct task_struct *tsk) 93 - { 94 - unsigned long stack_top = kernel_stack_top(tsk); 95 - unsigned long tsk_top = task_top(tsk); 96 - int ret = 0; 97 - 98 - #if 0 99 - /* check thread magic */ 100 - if ( tsk->thread.magic != THREAD_MAGIC ) 101 - { 102 - ret |= 1; 103 - printk("thread.magic bad: %08x\n", tsk->thread.magic); 104 - } 105 - #endif 106 - 107 - if ( !tsk ) 108 - printk("check_stack(): tsk bad tsk %p\n",tsk); 109 - 110 - /* check if stored ksp is bad */ 111 - if ( (tsk->thread.ksp > stack_top) || (tsk->thread.ksp < tsk_top) ) 112 - { 113 - printk("stack out of bounds: %s/%d\n" 114 - " tsk_top %08lx ksp %08lx stack_top %08lx\n", 115 - tsk->comm,tsk->pid, 116 - tsk_top, tsk->thread.ksp, stack_top); 117 - ret |= 2; 118 - } 119 - 120 - /* check if stack ptr RIGHT NOW is bad */ 121 - if ( (tsk == current) && ((_get_SP() > stack_top ) || (_get_SP() < tsk_top)) ) 122 - { 123 - printk("current stack ptr out of bounds: %s/%d\n" 124 - " tsk_top %08lx sp %08lx stack_top %08lx\n", 125 - current->comm,current->pid, 126 - tsk_top, _get_SP(), stack_top); 127 - ret |= 4; 128 - } 129 - 130 - #if 0 131 - /* check amount of free stack */ 132 - for ( i = (unsigned long *)task_top(tsk) ; i < kernel_stack_top(tsk) ; i++ ) 133 - { 134 - if ( !i ) 135 - printk("check_stack(): i = %p\n", i); 136 - if ( *i != 0 ) 137 - { 138 - /* only notify if it's less than 900 bytes */ 139 - if ( (i - (unsigned long *)task_top(tsk)) < 900 ) 140 - printk("%d bytes free on stack\n", 141 - i - task_top(tsk)); 142 - break; 143 - } 144 - } 145 - #endif 146 - 147 - if (ret) 148 - { 149 - panic("bad kernel stack"); 150 - } 151 - return(ret); 152 - } 153 - #endif /* defined(CHECK_STACK) */ 154 - 155 - /* 156 - * Make sure the floating-point register state in the 157 - * the thread_struct is up to date for task tsk. 158 - */ 159 - void flush_fp_to_thread(struct task_struct *tsk) 160 - { 161 - if (tsk->thread.regs) { 162 - /* 163 - * We need to disable preemption here because if we didn't, 164 - * another process could get scheduled after the regs->msr 165 - * test but before we have finished saving the FP registers 166 - * to the thread_struct. That process could take over the 167 - * FPU, and then when we get scheduled again we would store 168 - * bogus values for the remaining FP registers. 169 - */ 170 - preempt_disable(); 171 - if (tsk->thread.regs->msr & MSR_FP) { 172 - #ifdef CONFIG_SMP 173 - /* 174 - * This should only ever be called for current or 175 - * for a stopped child process. Since we save away 176 - * the FP register state on context switch on SMP, 177 - * there is something wrong if a stopped child appears 178 - * to still have its FP state in the CPU registers. 179 - */ 180 - BUG_ON(tsk != current); 181 - #endif 182 - giveup_fpu(current); 183 - } 184 - preempt_enable(); 185 - } 186 - } 187 - 188 - void enable_kernel_fp(void) 189 - { 190 - WARN_ON(preemptible()); 191 - 192 - #ifdef CONFIG_SMP 193 - if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) 194 - giveup_fpu(current); 195 - else 196 - giveup_fpu(NULL); /* just enables FP for kernel */ 197 - #else 198 - giveup_fpu(last_task_used_math); 199 - #endif /* CONFIG_SMP */ 200 - } 201 - EXPORT_SYMBOL(enable_kernel_fp); 202 - 203 - int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs) 204 - { 205 - preempt_disable(); 206 - if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP)) 207 - giveup_fpu(tsk); 208 - preempt_enable(); 209 - memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs)); 210 - return 1; 211 - } 212 - 213 - #ifdef CONFIG_ALTIVEC 214 - void enable_kernel_altivec(void) 215 - { 216 - WARN_ON(preemptible()); 217 - 218 - #ifdef CONFIG_SMP 219 - if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) 220 - giveup_altivec(current); 221 - else 222 - giveup_altivec(NULL); /* just enable AltiVec for kernel - force */ 223 - #else 224 - giveup_altivec(last_task_used_altivec); 225 - #endif /* __SMP __ */ 226 - } 227 - EXPORT_SYMBOL(enable_kernel_altivec); 228 - 229 - /* 230 - * Make sure the VMX/Altivec register state in the 231 - * the thread_struct is up to date for task tsk. 232 - */ 233 - void flush_altivec_to_thread(struct task_struct *tsk) 234 - { 235 - if (tsk->thread.regs) { 236 - preempt_disable(); 237 - if (tsk->thread.regs->msr & MSR_VEC) { 238 - #ifdef CONFIG_SMP 239 - BUG_ON(tsk != current); 240 - #endif 241 - giveup_altivec(current); 242 - } 243 - preempt_enable(); 244 - } 245 - } 246 - 247 - int dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs) 248 - { 249 - if (regs->msr & MSR_VEC) 250 - giveup_altivec(current); 251 - memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs)); 252 - return 1; 253 - } 254 - #endif /* CONFIG_ALTIVEC */ 255 - 256 - #ifdef CONFIG_SPE 257 - void 258 - enable_kernel_spe(void) 259 - { 260 - WARN_ON(preemptible()); 261 - 262 - #ifdef CONFIG_SMP 263 - if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) 264 - giveup_spe(current); 265 - else 266 - giveup_spe(NULL); /* just enable SPE for kernel - force */ 267 - #else 268 - giveup_spe(last_task_used_spe); 269 - #endif /* __SMP __ */ 270 - } 271 - EXPORT_SYMBOL(enable_kernel_spe); 272 - 273 - void flush_spe_to_thread(struct task_struct *tsk) 274 - { 275 - if (tsk->thread.regs) { 276 - preempt_disable(); 277 - if (tsk->thread.regs->msr & MSR_SPE) { 278 - #ifdef CONFIG_SMP 279 - BUG_ON(tsk != current); 280 - #endif 281 - giveup_spe(current); 282 - } 283 - preempt_enable(); 284 - } 285 - } 286 - 287 - int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs) 288 - { 289 - if (regs->msr & MSR_SPE) 290 - giveup_spe(current); 291 - /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */ 292 - memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35); 293 - return 1; 294 - } 295 - #endif /* CONFIG_SPE */ 296 - 297 - struct task_struct *__switch_to(struct task_struct *prev, 298 - struct task_struct *new) 299 - { 300 - struct thread_struct *new_thread, *old_thread; 301 - unsigned long s; 302 - struct task_struct *last; 303 - 304 - local_irq_save(s); 305 - #ifdef CHECK_STACK 306 - check_stack(prev); 307 - check_stack(new); 308 - #endif 309 - 310 - #ifdef CONFIG_SMP 311 - /* avoid complexity of lazy save/restore of fpu 312 - * by just saving it every time we switch out if 313 - * this task used the fpu during the last quantum. 314 - * 315 - * If it tries to use the fpu again, it'll trap and 316 - * reload its fp regs. So we don't have to do a restore 317 - * every switch, just a save. 318 - * -- Cort 319 - */ 320 - if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) 321 - giveup_fpu(prev); 322 - #ifdef CONFIG_ALTIVEC 323 - /* 324 - * If the previous thread used altivec in the last quantum 325 - * (thus changing altivec regs) then save them. 326 - * We used to check the VRSAVE register but not all apps 327 - * set it, so we don't rely on it now (and in fact we need 328 - * to save & restore VSCR even if VRSAVE == 0). -- paulus 329 - * 330 - * On SMP we always save/restore altivec regs just to avoid the 331 - * complexity of changing processors. 332 - * -- Cort 333 - */ 334 - if ((prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))) 335 - giveup_altivec(prev); 336 - #endif /* CONFIG_ALTIVEC */ 337 - #ifdef CONFIG_SPE 338 - /* 339 - * If the previous thread used spe in the last quantum 340 - * (thus changing spe regs) then save them. 341 - * 342 - * On SMP we always save/restore spe regs just to avoid the 343 - * complexity of changing processors. 344 - */ 345 - if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) 346 - giveup_spe(prev); 347 - #endif /* CONFIG_SPE */ 348 - #endif /* CONFIG_SMP */ 349 - 350 - #ifdef CONFIG_ALTIVEC 351 - /* Avoid the trap. On smp this this never happens since 352 - * we don't set last_task_used_altivec -- Cort 353 - */ 354 - if (new->thread.regs && last_task_used_altivec == new) 355 - new->thread.regs->msr |= MSR_VEC; 356 - #endif 357 - #ifdef CONFIG_SPE 358 - /* Avoid the trap. On smp this this never happens since 359 - * we don't set last_task_used_spe 360 - */ 361 - if (new->thread.regs && last_task_used_spe == new) 362 - new->thread.regs->msr |= MSR_SPE; 363 - #endif /* CONFIG_SPE */ 364 - new_thread = &new->thread; 365 - old_thread = &current->thread; 366 - last = _switch(old_thread, new_thread); 367 - local_irq_restore(s); 368 - return last; 369 - } 370 - 371 - void show_regs(struct pt_regs * regs) 372 - { 373 - int i, trap; 374 - 375 - printk("NIP: %08lX LR: %08lX SP: %08lX REGS: %p TRAP: %04lx %s\n", 376 - regs->nip, regs->link, regs->gpr[1], regs, regs->trap, 377 - print_tainted()); 378 - printk("MSR: %08lx EE: %01x PR: %01x FP: %01x ME: %01x IR/DR: %01x%01x\n", 379 - regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0, 380 - regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0, 381 - regs->msr&MSR_IR ? 1 : 0, 382 - regs->msr&MSR_DR ? 1 : 0); 383 - trap = TRAP(regs); 384 - if (trap == 0x300 || trap == 0x600) 385 - printk("DAR: %08lX, DSISR: %08lX\n", regs->dar, regs->dsisr); 386 - printk("TASK = %p[%d] '%s' THREAD: %p\n", 387 - current, current->pid, current->comm, task_thread_info(current)); 388 - printk("Last syscall: %ld ", current->thread.last_syscall); 389 - 390 - #ifdef CONFIG_SMP 391 - printk(" CPU: %d", smp_processor_id()); 392 - #endif /* CONFIG_SMP */ 393 - 394 - for (i = 0; i < 32; i++) { 395 - long r; 396 - if ((i % 8) == 0) 397 - printk("\n" KERN_INFO "GPR%02d: ", i); 398 - if (__get_user(r, &regs->gpr[i])) 399 - break; 400 - printk("%08lX ", r); 401 - if (i == 12 && !FULL_REGS(regs)) 402 - break; 403 - } 404 - printk("\n"); 405 - #ifdef CONFIG_KALLSYMS 406 - /* 407 - * Lookup NIP late so we have the best change of getting the 408 - * above info out without failing 409 - */ 410 - printk("NIP [%08lx] ", regs->nip); 411 - print_symbol("%s\n", regs->nip); 412 - printk("LR [%08lx] ", regs->link); 413 - print_symbol("%s\n", regs->link); 414 - #endif 415 - show_stack(current, (unsigned long *) regs->gpr[1]); 416 - } 417 - 418 - void exit_thread(void) 419 - { 420 - preempt_disable(); 421 - if (last_task_used_math == current) 422 - last_task_used_math = NULL; 423 - if (last_task_used_altivec == current) 424 - last_task_used_altivec = NULL; 425 - #ifdef CONFIG_SPE 426 - if (last_task_used_spe == current) 427 - last_task_used_spe = NULL; 428 - #endif 429 - preempt_enable(); 430 - } 431 - 432 - void flush_thread(void) 433 - { 434 - preempt_disable(); 435 - if (last_task_used_math == current) 436 - last_task_used_math = NULL; 437 - if (last_task_used_altivec == current) 438 - last_task_used_altivec = NULL; 439 - #ifdef CONFIG_SPE 440 - if (last_task_used_spe == current) 441 - last_task_used_spe = NULL; 442 - #endif 443 - preempt_enable(); 444 - } 445 - 446 - void 447 - release_thread(struct task_struct *t) 448 - { 449 - } 450 - 451 - /* 452 - * This gets called before we allocate a new thread and copy 453 - * the current task into it. 454 - */ 455 - void prepare_to_copy(struct task_struct *tsk) 456 - { 457 - struct pt_regs *regs = tsk->thread.regs; 458 - 459 - if (regs == NULL) 460 - return; 461 - preempt_disable(); 462 - if (regs->msr & MSR_FP) 463 - giveup_fpu(current); 464 - #ifdef CONFIG_ALTIVEC 465 - if (regs->msr & MSR_VEC) 466 - giveup_altivec(current); 467 - #endif /* CONFIG_ALTIVEC */ 468 - #ifdef CONFIG_SPE 469 - if (regs->msr & MSR_SPE) 470 - giveup_spe(current); 471 - #endif /* CONFIG_SPE */ 472 - preempt_enable(); 473 - } 474 - 475 - /* 476 - * Copy a thread.. 477 - */ 478 - int 479 - copy_thread(int nr, unsigned long clone_flags, unsigned long usp, 480 - unsigned long unused, 481 - struct task_struct *p, struct pt_regs *regs) 482 - { 483 - struct pt_regs *childregs, *kregs; 484 - extern void ret_from_fork(void); 485 - unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; 486 - unsigned long childframe; 487 - 488 - CHECK_FULL_REGS(regs); 489 - /* Copy registers */ 490 - sp -= sizeof(struct pt_regs); 491 - childregs = (struct pt_regs *) sp; 492 - *childregs = *regs; 493 - if ((childregs->msr & MSR_PR) == 0) { 494 - /* for kernel thread, set `current' and stackptr in new task */ 495 - childregs->gpr[1] = sp + sizeof(struct pt_regs); 496 - childregs->gpr[2] = (unsigned long) p; 497 - p->thread.regs = NULL; /* no user register state */ 498 - } else { 499 - childregs->gpr[1] = usp; 500 - p->thread.regs = childregs; 501 - if (clone_flags & CLONE_SETTLS) 502 - childregs->gpr[2] = childregs->gpr[6]; 503 - } 504 - childregs->gpr[3] = 0; /* Result from fork() */ 505 - sp -= STACK_FRAME_OVERHEAD; 506 - childframe = sp; 507 - 508 - /* 509 - * The way this works is that at some point in the future 510 - * some task will call _switch to switch to the new task. 511 - * That will pop off the stack frame created below and start 512 - * the new task running at ret_from_fork. The new task will 513 - * do some house keeping and then return from the fork or clone 514 - * system call, using the stack frame created above. 515 - */ 516 - sp -= sizeof(struct pt_regs); 517 - kregs = (struct pt_regs *) sp; 518 - sp -= STACK_FRAME_OVERHEAD; 519 - p->thread.ksp = sp; 520 - kregs->nip = (unsigned long)ret_from_fork; 521 - 522 - p->thread.last_syscall = -1; 523 - 524 - return 0; 525 - } 526 - 527 - /* 528 - * Set up a thread for executing a new program 529 - */ 530 - void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp) 531 - { 532 - set_fs(USER_DS); 533 - memset(regs->gpr, 0, sizeof(regs->gpr)); 534 - regs->ctr = 0; 535 - regs->link = 0; 536 - regs->xer = 0; 537 - regs->ccr = 0; 538 - regs->mq = 0; 539 - regs->nip = nip; 540 - regs->gpr[1] = sp; 541 - regs->msr = MSR_USER; 542 - preempt_disable(); 543 - if (last_task_used_math == current) 544 - last_task_used_math = NULL; 545 - if (last_task_used_altivec == current) 546 - last_task_used_altivec = NULL; 547 - #ifdef CONFIG_SPE 548 - if (last_task_used_spe == current) 549 - last_task_used_spe = NULL; 550 - #endif 551 - preempt_enable(); 552 - memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); 553 - current->thread.fpscr.val = 0; 554 - #ifdef CONFIG_ALTIVEC 555 - memset(current->thread.vr, 0, sizeof(current->thread.vr)); 556 - memset(&current->thread.vscr, 0, sizeof(current->thread.vscr)); 557 - current->thread.vrsave = 0; 558 - current->thread.used_vr = 0; 559 - #endif /* CONFIG_ALTIVEC */ 560 - #ifdef CONFIG_SPE 561 - memset(current->thread.evr, 0, sizeof(current->thread.evr)); 562 - current->thread.acc = 0; 563 - current->thread.spefscr = 0; 564 - current->thread.used_spe = 0; 565 - #endif /* CONFIG_SPE */ 566 - } 567 - 568 - #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \ 569 - | PR_FP_EXC_RES | PR_FP_EXC_INV) 570 - 571 - int set_fpexc_mode(struct task_struct *tsk, unsigned int val) 572 - { 573 - struct pt_regs *regs = tsk->thread.regs; 574 - 575 - /* This is a bit hairy. If we are an SPE enabled processor 576 - * (have embedded fp) we store the IEEE exception enable flags in 577 - * fpexc_mode. fpexc_mode is also used for setting FP exception 578 - * mode (asyn, precise, disabled) for 'Classic' FP. */ 579 - if (val & PR_FP_EXC_SW_ENABLE) { 580 - #ifdef CONFIG_SPE 581 - tsk->thread.fpexc_mode = val & 582 - (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); 583 - #else 584 - return -EINVAL; 585 - #endif 586 - } else { 587 - /* on a CONFIG_SPE this does not hurt us. The bits that 588 - * __pack_fe01 use do not overlap with bits used for 589 - * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits 590 - * on CONFIG_SPE implementations are reserved so writing to 591 - * them does not change anything */ 592 - if (val > PR_FP_EXC_PRECISE) 593 - return -EINVAL; 594 - tsk->thread.fpexc_mode = __pack_fe01(val); 595 - if (regs != NULL && (regs->msr & MSR_FP) != 0) 596 - regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1)) 597 - | tsk->thread.fpexc_mode; 598 - } 599 - return 0; 600 - } 601 - 602 - int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) 603 - { 604 - unsigned int val; 605 - 606 - if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) 607 - #ifdef CONFIG_SPE 608 - val = tsk->thread.fpexc_mode; 609 - #else 610 - return -EINVAL; 611 - #endif 612 - else 613 - val = __unpack_fe01(tsk->thread.fpexc_mode); 614 - return put_user(val, (unsigned int __user *) adr); 615 - } 616 - 617 - int sys_clone(unsigned long clone_flags, unsigned long usp, 618 - int __user *parent_tidp, void __user *child_threadptr, 619 - int __user *child_tidp, int p6, 620 - struct pt_regs *regs) 621 - { 622 - CHECK_FULL_REGS(regs); 623 - if (usp == 0) 624 - usp = regs->gpr[1]; /* stack pointer for child */ 625 - return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp); 626 - } 627 - 628 - int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3, 629 - unsigned long p4, unsigned long p5, unsigned long p6, 630 - struct pt_regs *regs) 631 - { 632 - CHECK_FULL_REGS(regs); 633 - return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL); 634 - } 635 - 636 - int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3, 637 - unsigned long p4, unsigned long p5, unsigned long p6, 638 - struct pt_regs *regs) 639 - { 640 - CHECK_FULL_REGS(regs); 641 - return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], 642 - regs, 0, NULL, NULL); 643 - } 644 - 645 - int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, 646 - unsigned long a3, unsigned long a4, unsigned long a5, 647 - struct pt_regs *regs) 648 - { 649 - int error; 650 - char * filename; 651 - 652 - filename = getname((char __user *) a0); 653 - error = PTR_ERR(filename); 654 - if (IS_ERR(filename)) 655 - goto out; 656 - preempt_disable(); 657 - if (regs->msr & MSR_FP) 658 - giveup_fpu(current); 659 - #ifdef CONFIG_ALTIVEC 660 - if (regs->msr & MSR_VEC) 661 - giveup_altivec(current); 662 - #endif /* CONFIG_ALTIVEC */ 663 - #ifdef CONFIG_SPE 664 - if (regs->msr & MSR_SPE) 665 - giveup_spe(current); 666 - #endif /* CONFIG_SPE */ 667 - preempt_enable(); 668 - error = do_execve(filename, (char __user *__user *) a1, 669 - (char __user *__user *) a2, regs); 670 - if (error == 0) { 671 - task_lock(current); 672 - current->ptrace &= ~PT_DTRACE; 673 - task_unlock(current); 674 - } 675 - putname(filename); 676 - out: 677 - return error; 678 - } 679 - 680 - void dump_stack(void) 681 - { 682 - show_stack(current, NULL); 683 - } 684 - 685 - EXPORT_SYMBOL(dump_stack); 686 - 687 - void show_stack(struct task_struct *tsk, unsigned long *stack) 688 - { 689 - unsigned long sp, stack_top, prev_sp, ret; 690 - int count = 0; 691 - unsigned long next_exc = 0; 692 - struct pt_regs *regs; 693 - extern char ret_from_except, ret_from_except_full, ret_from_syscall; 694 - 695 - sp = (unsigned long) stack; 696 - if (tsk == NULL) 697 - tsk = current; 698 - if (sp == 0) { 699 - if (tsk == current) 700 - asm("mr %0,1" : "=r" (sp)); 701 - else 702 - sp = tsk->thread.ksp; 703 - } 704 - 705 - prev_sp = (unsigned long) end_of_stack(tsk); 706 - stack_top = (unsigned long) task_stack_page(tsk) + THREAD_SIZE; 707 - while (count < 16 && sp > prev_sp && sp < stack_top && (sp & 3) == 0) { 708 - if (count == 0) { 709 - printk("Call trace:"); 710 - #ifdef CONFIG_KALLSYMS 711 - printk("\n"); 712 - #endif 713 - } else { 714 - if (next_exc) { 715 - ret = next_exc; 716 - next_exc = 0; 717 - } else 718 - ret = *(unsigned long *)(sp + 4); 719 - printk(" [%08lx] ", ret); 720 - #ifdef CONFIG_KALLSYMS 721 - print_symbol("%s", ret); 722 - printk("\n"); 723 - #endif 724 - if (ret == (unsigned long) &ret_from_except 725 - || ret == (unsigned long) &ret_from_except_full 726 - || ret == (unsigned long) &ret_from_syscall) { 727 - /* sp + 16 points to an exception frame */ 728 - regs = (struct pt_regs *) (sp + 16); 729 - if (sp + 16 + sizeof(*regs) <= stack_top) 730 - next_exc = regs->nip; 731 - } 732 - } 733 - ++count; 734 - sp = *(unsigned long *)sp; 735 - } 736 - #ifndef CONFIG_KALLSYMS 737 - if (count > 0) 738 - printk("\n"); 739 - #endif 740 - } 741 - 742 - #if 0 743 - /* 744 - * Low level print for debugging - Cort 745 - */ 746 - int __init ll_printk(const char *fmt, ...) 747 - { 748 - va_list args; 749 - char buf[256]; 750 - int i; 751 - 752 - va_start(args, fmt); 753 - i=vsprintf(buf,fmt,args); 754 - ll_puts(buf); 755 - va_end(args); 756 - return i; 757 - } 758 - 759 - int lines = 24, cols = 80; 760 - int orig_x = 0, orig_y = 0; 761 - 762 - void puthex(unsigned long val) 763 - { 764 - unsigned char buf[10]; 765 - int i; 766 - for (i = 7; i >= 0; i--) 767 - { 768 - buf[i] = "0123456789ABCDEF"[val & 0x0F]; 769 - val >>= 4; 770 - } 771 - buf[8] = '\0'; 772 - prom_print(buf); 773 - } 774 - 775 - void __init ll_puts(const char *s) 776 - { 777 - int x,y; 778 - char *vidmem = (char *)/*(_ISA_MEM_BASE + 0xB8000) */0xD00B8000; 779 - char c; 780 - extern int mem_init_done; 781 - 782 - if ( mem_init_done ) /* assume this means we can printk */ 783 - { 784 - printk(s); 785 - return; 786 - } 787 - 788 - #if 0 789 - if ( have_of ) 790 - { 791 - prom_print(s); 792 - return; 793 - } 794 - #endif 795 - 796 - /* 797 - * can't ll_puts on chrp without openfirmware yet. 798 - * vidmem just needs to be setup for it. 799 - * -- Cort 800 - */ 801 - if ( _machine != _MACH_prep ) 802 - return; 803 - x = orig_x; 804 - y = orig_y; 805 - 806 - while ( ( c = *s++ ) != '\0' ) { 807 - if ( c == '\n' ) { 808 - x = 0; 809 - if ( ++y >= lines ) { 810 - /*scroll();*/ 811 - /*y--;*/ 812 - y = 0; 813 - } 814 - } else { 815 - vidmem [ ( x + cols * y ) * 2 ] = c; 816 - if ( ++x >= cols ) { 817 - x = 0; 818 - if ( ++y >= lines ) { 819 - /*scroll();*/ 820 - /*y--;*/ 821 - y = 0; 822 - } 823 - } 824 - } 825 - } 826 - 827 - orig_x = x; 828 - orig_y = y; 829 - } 830 - #endif 831 - 832 - unsigned long get_wchan(struct task_struct *p) 833 - { 834 - unsigned long ip, sp; 835 - unsigned long stack_page = (unsigned long) task_stack_page(p); 836 - int count = 0; 837 - if (!p || p == current || p->state == TASK_RUNNING) 838 - return 0; 839 - sp = p->thread.ksp; 840 - do { 841 - sp = *(unsigned long *)sp; 842 - if (sp < stack_page || sp >= stack_page + 8188) 843 - return 0; 844 - if (count > 0) { 845 - ip = *(unsigned long *)(sp + 4); 846 - if (!in_sched_functions(ip)) 847 - return ip; 848 - } 849 - } while (count++ < 16); 850 - return 0; 851 - }
+15 -17
drivers/block/viodasd.c
··· 293 293 u16 viocmd; 294 294 HvLpEvent_Rc hvrc; 295 295 struct vioblocklpevent *bevent; 296 + struct HvLpEvent *hev; 296 297 struct scatterlist sg[VIOMAXBLOCKDMA]; 297 298 int sgindex; 298 299 int statindex; ··· 348 347 * token so we can match the response up later 349 348 */ 350 349 memset(bevent, 0, sizeof(struct vioblocklpevent)); 351 - bevent->event.xFlags.xValid = 1; 352 - bevent->event.xFlags.xFunction = HvLpEvent_Function_Int; 353 - bevent->event.xFlags.xAckInd = HvLpEvent_AckInd_DoAck; 354 - bevent->event.xFlags.xAckType = HvLpEvent_AckType_ImmediateAck; 355 - bevent->event.xType = HvLpEvent_Type_VirtualIo; 356 - bevent->event.xSubtype = viocmd; 357 - bevent->event.xSourceLp = HvLpConfig_getLpIndex(); 358 - bevent->event.xTargetLp = viopath_hostLp; 359 - bevent->event.xSizeMinus1 = 350 + hev = &bevent->event; 351 + hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DO_ACK | 352 + HV_LP_EVENT_INT; 353 + hev->xType = HvLpEvent_Type_VirtualIo; 354 + hev->xSubtype = viocmd; 355 + hev->xSourceLp = HvLpConfig_getLpIndex(); 356 + hev->xTargetLp = viopath_hostLp; 357 + hev->xSizeMinus1 = 360 358 offsetof(struct vioblocklpevent, u.rw_data.dma_info) + 361 359 (sizeof(bevent->u.rw_data.dma_info[0]) * nsg) - 1; 362 - bevent->event.xSourceInstanceId = 363 - viopath_sourceinst(viopath_hostLp); 364 - bevent->event.xTargetInstanceId = 365 - viopath_targetinst(viopath_hostLp); 366 - bevent->event.xCorrelationToken = (u64)req; 360 + hev->xSourceInstanceId = viopath_sourceinst(viopath_hostLp); 361 + hev->xTargetInstanceId = viopath_targetinst(viopath_hostLp); 362 + hev->xCorrelationToken = (u64)req; 367 363 bevent->version = VIOVERSION; 368 364 bevent->disk = DEVICE_NO(d); 369 365 bevent->u.rw_data.offset = start; ··· 647 649 /* Notification that a partition went away! */ 648 650 return; 649 651 /* First, we should NEVER get an int here...only acks */ 650 - if (event->xFlags.xFunction == HvLpEvent_Function_Int) { 652 + if (hvlpevent_is_int(event)) { 651 653 printk(VIOD_KERN_WARNING 652 654 "Yikes! got an int in viodasd event handler!\n"); 653 - if (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck) { 655 + if (hvlpevent_need_ack(event)) { 654 656 event->xRc = HvLpEvent_Rc_InvalidSubtype; 655 657 HvCallEvent_ackLpEvent(event); 656 658 } ··· 693 695 694 696 default: 695 697 printk(VIOD_KERN_WARNING "invalid subtype!"); 696 - if (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck) { 698 + if (hvlpevent_need_ack(event)) { 697 699 event->xRc = HvLpEvent_Rc_InvalidSubtype; 698 700 HvCallEvent_ackLpEvent(event); 699 701 }
+3 -3
drivers/cdrom/viocd.c
··· 542 542 /* Notification that a partition went away! */ 543 543 return; 544 544 /* First, we should NEVER get an int here...only acks */ 545 - if (event->xFlags.xFunction == HvLpEvent_Function_Int) { 545 + if (hvlpevent_is_int(event)) { 546 546 printk(VIOCD_KERN_WARNING 547 547 "Yikes! got an int in viocd event handler!\n"); 548 - if (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck) { 548 + if (hvlpevent_need_ack(event)) { 549 549 event->xRc = HvLpEvent_Rc_InvalidSubtype; 550 550 HvCallEvent_ackLpEvent(event); 551 551 } ··· 616 616 printk(VIOCD_KERN_WARNING 617 617 "message with invalid subtype %0x04X!\n", 618 618 event->xSubtype & VIOMINOR_SUBTYPE_MASK); 619 - if (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck) { 619 + if (hvlpevent_need_ack(event)) { 620 620 event->xRc = HvLpEvent_Rc_InvalidSubtype; 621 621 HvCallEvent_ackLpEvent(event); 622 622 }
+4 -4
drivers/char/mem.c
··· 534 534 return virtr + wrote; 535 535 } 536 536 537 - #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)) 537 + #if defined(CONFIG_ISA) || !defined(__mc68000__) 538 538 static ssize_t read_port(struct file * file, char __user * buf, 539 539 size_t count, loff_t *ppos) 540 540 { ··· 795 795 .write = write_null, 796 796 }; 797 797 798 - #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)) 798 + #if defined(CONFIG_ISA) || !defined(__mc68000__) 799 799 static struct file_operations port_fops = { 800 800 .llseek = memory_lseek, 801 801 .read = read_port, ··· 865 865 case 3: 866 866 filp->f_op = &null_fops; 867 867 break; 868 - #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)) 868 + #if defined(CONFIG_ISA) || !defined(__mc68000__) 869 869 case 4: 870 870 filp->f_op = &port_fops; 871 871 break; ··· 912 912 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops}, 913 913 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops}, 914 914 {3, "null", S_IRUGO | S_IWUGO, &null_fops}, 915 - #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)) 915 + #if defined(CONFIG_ISA) || !defined(__mc68000__) 916 916 {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops}, 917 917 #endif 918 918 {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
+15 -16
drivers/char/viocons.c
··· 476 476 */ 477 477 static void initDataEvent(struct viocharlpevent *viochar, HvLpIndex lp) 478 478 { 479 + struct HvLpEvent *hev = &viochar->event; 480 + 479 481 memset(viochar, 0, sizeof(struct viocharlpevent)); 480 482 481 - viochar->event.xFlags.xValid = 1; 482 - viochar->event.xFlags.xFunction = HvLpEvent_Function_Int; 483 - viochar->event.xFlags.xAckInd = HvLpEvent_AckInd_NoAck; 484 - viochar->event.xFlags.xAckType = HvLpEvent_AckType_DeferredAck; 485 - viochar->event.xType = HvLpEvent_Type_VirtualIo; 486 - viochar->event.xSubtype = viomajorsubtype_chario | viochardata; 487 - viochar->event.xSourceLp = HvLpConfig_getLpIndex(); 488 - viochar->event.xTargetLp = lp; 489 - viochar->event.xSizeMinus1 = sizeof(struct viocharlpevent); 490 - viochar->event.xSourceInstanceId = viopath_sourceinst(lp); 491 - viochar->event.xTargetInstanceId = viopath_targetinst(lp); 483 + hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DEFERRED_ACK | 484 + HV_LP_EVENT_INT; 485 + hev->xType = HvLpEvent_Type_VirtualIo; 486 + hev->xSubtype = viomajorsubtype_chario | viochardata; 487 + hev->xSourceLp = HvLpConfig_getLpIndex(); 488 + hev->xTargetLp = lp; 489 + hev->xSizeMinus1 = sizeof(struct viocharlpevent); 490 + hev->xSourceInstanceId = viopath_sourceinst(lp); 491 + hev->xTargetInstanceId = viopath_targetinst(lp); 492 492 } 493 493 494 494 /* ··· 752 752 struct port_info *pi; 753 753 int reject = 0; 754 754 755 - if (event->xFlags.xFunction == HvLpEvent_Function_Ack) { 755 + if (hvlpevent_is_ack(event)) { 756 756 if (port >= VTTY_PORTS) 757 757 return; 758 758 ··· 788 788 } 789 789 790 790 /* This had better require an ack, otherwise complain */ 791 - if (event->xFlags.xAckInd != HvLpEvent_AckInd_DoAck) { 791 + if (!hvlpevent_need_ack(event)) { 792 792 printk(VIOCONS_KERN_WARN "viocharopen without ack bit!\n"); 793 793 return; 794 794 } ··· 856 856 struct viocharlpevent *cevent = (struct viocharlpevent *)event; 857 857 u8 port = cevent->virtual_device; 858 858 859 - if (event->xFlags.xFunction == HvLpEvent_Function_Int) { 859 + if (hvlpevent_is_int(event)) { 860 860 if (port >= VTTY_PORTS) { 861 861 printk(VIOCONS_KERN_WARN 862 862 "close message from invalid virtual device.\n"); ··· 1056 1056 vioHandleConfig(event); 1057 1057 break; 1058 1058 default: 1059 - if ((event->xFlags.xFunction == HvLpEvent_Function_Int) && 1060 - (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) { 1059 + if (hvlpevent_is_int(event) && hvlpevent_need_ack(event)) { 1061 1060 event->xRc = HvLpEvent_Rc_InvalidSubtype; 1062 1061 HvCallEvent_ackLpEvent(event); 1063 1062 }
+2 -2
drivers/net/iseries_veth.c
··· 590 590 { 591 591 struct veth_lpevent *veth_event = (struct veth_lpevent *)event; 592 592 593 - if (event->xFlags.xFunction == HvLpEvent_Function_Ack) 593 + if (hvlpevent_is_ack(event)) 594 594 veth_handle_ack(veth_event); 595 - else if (event->xFlags.xFunction == HvLpEvent_Function_Int) 595 + else 596 596 veth_handle_int(veth_event); 597 597 } 598 598
-19
include/asm-powerpc/iommu.h
··· 64 64 65 65 #endif /* CONFIG_PPC_MULTIPLATFORM */ 66 66 67 - #ifdef CONFIG_PPC_PSERIES 68 - 69 - /* Creates table for an individual device node */ 70 - extern void iommu_devnode_init_pSeries(struct device_node *dn); 71 - 72 - #endif /* CONFIG_PPC_PSERIES */ 73 - 74 - #ifdef CONFIG_PPC_ISERIES 75 - 76 - /* Creates table for an individual device node */ 77 - extern void iommu_devnode_init_iSeries(struct device_node *dn); 78 - /* Get table parameters from HV */ 79 - extern void iommu_table_getparms_iSeries(unsigned long busno, 80 - unsigned char slotno, 81 - unsigned char virtbus, 82 - struct iommu_table* tbl); 83 - 84 - #endif /* CONFIG_PPC_ISERIES */ 85 - 86 67 /* Initializes an iommu_table based in values set in the passed-in 87 68 * structure 88 69 */
+1 -3
include/asm-powerpc/iseries/hv_call.h
··· 1 1 /* 2 - * HvCall.h 3 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation 4 3 * 5 4 * This program is free software; you can redistribute it and/or modify ··· 14 15 * You should have received a copy of the GNU General Public License 15 16 * along with this program; if not, write to the Free Software 16 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 - */ 18 - /* 18 + * 19 19 * This file contains the "hypervisor call" interface which is used to 20 20 * drive the hypervisor from the OS. 21 21 */
+36 -98
include/asm-powerpc/iseries/hv_call_event.h
··· 1 1 /* 2 - * HvCallEvent.h 3 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation 4 3 * 5 4 * This program is free software; you can redistribute it and/or modify ··· 14 15 * You should have received a copy of the GNU General Public License 15 16 * along with this program; if not, write to the Free Software 16 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 - */ 18 - /* 18 + * 19 19 * This file contains the "hypervisor call" interface which is used to 20 20 * drive the hypervisor from the OS. 21 21 */ ··· 31 33 typedef u8 HvLpEvent_AckInd; 32 34 typedef u8 HvLpEvent_AckType; 33 35 34 - struct HvCallEvent_PackedParms { 35 - u8 xAckType:1; 36 - u8 xAckInd:1; 37 - u8 xRsvd:1; 38 - u8 xTargetLp:5; 39 - u8 xType; 40 - u16 xSubtype; 41 - HvLpInstanceId xSourceInstId; 42 - HvLpInstanceId xTargetInstId; 43 - }; 44 - 45 36 typedef u8 HvLpDma_Direction; 46 37 typedef u8 HvLpDma_AddressType; 47 - 48 - struct HvCallEvent_PackedDmaParms { 49 - u8 xDirection:1; 50 - u8 xLocalAddrType:1; 51 - u8 xRemoteAddrType:1; 52 - u8 xRsvd1:5; 53 - HvLpIndex xRemoteLp; 54 - u8 xType; 55 - u8 xRsvd2; 56 - HvLpInstanceId xLocalInstId; 57 - HvLpInstanceId xRemoteInstId; 58 - }; 59 38 60 39 typedef u64 HvLpEvent_Rc; 61 40 typedef u64 HvLpDma_Rc; ··· 67 92 static inline void HvCallEvent_setLpEventStack(u8 queueIndex, 68 93 char *eventStackAddr, u32 eventStackSize) 69 94 { 70 - u64 abs_addr; 71 - 72 - abs_addr = virt_to_abs(eventStackAddr); 73 - HvCall3(HvCallEventSetLpEventStack, queueIndex, abs_addr, 74 - eventStackSize); 95 + HvCall3(HvCallEventSetLpEventStack, queueIndex, 96 + virt_to_abs(eventStackAddr), eventStackSize); 75 97 } 76 98 77 99 static inline void HvCallEvent_setLpEventQueueInterruptProc(u8 queueIndex, ··· 80 108 81 109 static inline HvLpEvent_Rc HvCallEvent_signalLpEvent(struct HvLpEvent *event) 82 110 { 83 - u64 abs_addr; 84 - 85 - #ifdef DEBUG_SENDEVENT 86 - printk("HvCallEvent_signalLpEvent: *event = %016lx\n ", 87 - (unsigned long)event); 88 - #endif 89 - abs_addr = virt_to_abs(event); 90 - return HvCall1(HvCallEventSignalLpEvent, abs_addr); 111 + return HvCall1(HvCallEventSignalLpEvent, virt_to_abs(event)); 91 112 } 92 113 93 114 static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp, ··· 92 127 { 93 128 /* Pack the misc bits into a single Dword to pass to PLIC */ 94 129 union { 95 - struct HvCallEvent_PackedParms parms; 130 + struct { 131 + u8 ack_and_target; 132 + u8 type; 133 + u16 subtype; 134 + HvLpInstanceId src_inst; 135 + HvLpInstanceId target_inst; 136 + } parms; 96 137 u64 dword; 97 138 } packed; 98 - packed.parms.xAckType = ackType; 99 - packed.parms.xAckInd = ackInd; 100 - packed.parms.xRsvd = 0; 101 - packed.parms.xTargetLp = targetLp; 102 - packed.parms.xType = type; 103 - packed.parms.xSubtype = subtype; 104 - packed.parms.xSourceInstId = sourceInstanceId; 105 - packed.parms.xTargetInstId = targetInstanceId; 139 + 140 + packed.parms.ack_and_target = (ackType << 7) | (ackInd << 6) | targetLp; 141 + packed.parms.type = type; 142 + packed.parms.subtype = subtype; 143 + packed.parms.src_inst = sourceInstanceId; 144 + packed.parms.target_inst = targetInstanceId; 106 145 107 146 return HvCall7(HvCallEventSignalLpEventParms, packed.dword, 108 147 correlationToken, eventData1, eventData2, ··· 115 146 116 147 static inline HvLpEvent_Rc HvCallEvent_ackLpEvent(struct HvLpEvent *event) 117 148 { 118 - u64 abs_addr; 119 - 120 - abs_addr = virt_to_abs(event); 121 - return HvCall1(HvCallEventAckLpEvent, abs_addr); 149 + return HvCall1(HvCallEventAckLpEvent, virt_to_abs(event)); 122 150 } 123 151 124 152 static inline HvLpEvent_Rc HvCallEvent_cancelLpEvent(struct HvLpEvent *event) 125 153 { 126 - u64 abs_addr; 127 - 128 - abs_addr = virt_to_abs(event); 129 - return HvCall1(HvCallEventCancelLpEvent, abs_addr); 154 + return HvCall1(HvCallEventCancelLpEvent, virt_to_abs(event)); 130 155 } 131 156 132 157 static inline HvLpInstanceId HvCallEvent_getSourceLpInstanceId( ··· 158 195 { 159 196 /* Pack the misc bits into a single Dword to pass to PLIC */ 160 197 union { 161 - struct HvCallEvent_PackedDmaParms parms; 198 + struct { 199 + u8 flags; 200 + HvLpIndex remote; 201 + u8 type; 202 + u8 reserved; 203 + HvLpInstanceId local_inst; 204 + HvLpInstanceId remote_inst; 205 + } parms; 162 206 u64 dword; 163 207 } packed; 164 208 165 - packed.parms.xDirection = direction; 166 - packed.parms.xLocalAddrType = localAddressType; 167 - packed.parms.xRemoteAddrType = remoteAddressType; 168 - packed.parms.xRsvd1 = 0; 169 - packed.parms.xRemoteLp = remoteLp; 170 - packed.parms.xType = type; 171 - packed.parms.xRsvd2 = 0; 172 - packed.parms.xLocalInstId = localInstanceId; 173 - packed.parms.xRemoteInstId = remoteInstanceId; 209 + packed.parms.flags = (direction << 7) | 210 + (localAddressType << 6) | (remoteAddressType << 5); 211 + packed.parms.remote = remoteLp; 212 + packed.parms.type = type; 213 + packed.parms.reserved = 0; 214 + packed.parms.local_inst = localInstanceId; 215 + packed.parms.remote_inst = remoteInstanceId; 174 216 175 217 return HvCall4(HvCallEventDmaBufList, packed.dword, localBufList, 176 218 remoteBufList, transferLength); 177 219 } 178 220 179 - static inline HvLpDma_Rc HvCallEvent_dmaSingle(HvLpEvent_Type type, 180 - HvLpIndex remoteLp, HvLpDma_Direction direction, 181 - HvLpInstanceId localInstanceId, 182 - HvLpInstanceId remoteInstanceId, 183 - HvLpDma_AddressType localAddressType, 184 - HvLpDma_AddressType remoteAddressType, 185 - u64 localAddrOrTce, u64 remoteAddrOrTce, u32 transferLength) 186 - { 187 - /* Pack the misc bits into a single Dword to pass to PLIC */ 188 - union { 189 - struct HvCallEvent_PackedDmaParms parms; 190 - u64 dword; 191 - } packed; 192 - 193 - packed.parms.xDirection = direction; 194 - packed.parms.xLocalAddrType = localAddressType; 195 - packed.parms.xRemoteAddrType = remoteAddressType; 196 - packed.parms.xRsvd1 = 0; 197 - packed.parms.xRemoteLp = remoteLp; 198 - packed.parms.xType = type; 199 - packed.parms.xRsvd2 = 0; 200 - packed.parms.xLocalInstId = localInstanceId; 201 - packed.parms.xRemoteInstId = remoteInstanceId; 202 - 203 - return (HvLpDma_Rc)HvCall4(HvCallEventDmaSingle, packed.dword, 204 - localAddrOrTce, remoteAddrOrTce, transferLength); 205 - } 206 - 207 221 static inline HvLpDma_Rc HvCallEvent_dmaToSp(void *local, u32 remote, 208 222 u32 length, HvLpDma_Direction dir) 209 223 { 210 - u64 abs_addr; 211 - 212 - abs_addr = virt_to_abs(local); 213 - return HvCall4(HvCallEventDmaToSp, abs_addr, remote, length, dir); 224 + return HvCall4(HvCallEventDmaToSp, virt_to_abs(local), remote, 225 + length, dir); 214 226 } 215 227 216 228 #endif /* _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H */
-1
include/asm-powerpc/iseries/hv_call_sc.h
··· 1 1 /* 2 - * HvCallSc.h 3 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation 4 3 * 5 4 * This program is free software; you can redistribute it and/or modify
-1
include/asm-powerpc/iseries/hv_lp_config.h
··· 1 1 /* 2 - * HvLpConfig.h 3 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation 4 3 * 5 4 * This program is free software; you can redistribute it and/or modify
+31 -11
include/asm-powerpc/iseries/hv_lp_event.h
··· 1 1 /* 2 - * HvLpEvent.h 3 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation 4 3 * 5 4 * This program is free software; you can redistribute it and/or modify ··· 31 32 * partitions through PLIC. 32 33 */ 33 34 34 - struct HvEventFlags { 35 - u8 xValid:1; /* Indicates a valid request x00-x00 */ 36 - u8 xRsvd1:4; /* Reserved ... */ 37 - u8 xAckType:1; /* Immediate or deferred ... */ 38 - u8 xAckInd:1; /* Indicates if ACK required ... */ 39 - u8 xFunction:1; /* Interrupt or Acknowledge ... */ 40 - }; 41 - 42 - 43 35 struct HvLpEvent { 44 - struct HvEventFlags xFlags; /* Event flags x00-x00 */ 36 + u8 flags; /* Event flags x00-x00 */ 45 37 u8 xType; /* Type of message x01-x01 */ 46 38 u16 xSubtype; /* Subtype for event x02-x03 */ 47 39 u8 xSourceLp; /* Source LP x04-x04 */ ··· 116 126 #define HvLpEvent_AckType_ImmediateAck 0 117 127 #define HvLpEvent_AckType_DeferredAck 1 118 128 129 + #define HV_LP_EVENT_INT 0x01 130 + #define HV_LP_EVENT_DO_ACK 0x02 131 + #define HV_LP_EVENT_DEFERRED_ACK 0x04 132 + #define HV_LP_EVENT_VALID 0x80 133 + 119 134 #define HvLpDma_Direction_LocalToRemote 0 120 135 #define HvLpDma_Direction_RemoteToLocal 1 121 136 ··· 133 138 #define HvLpDma_Rc_PathClosed 3 134 139 #define HvLpDma_Rc_InvalidAddress 4 135 140 #define HvLpDma_Rc_InvalidLength 5 141 + 142 + static inline int hvlpevent_is_valid(struct HvLpEvent *h) 143 + { 144 + return h->flags & HV_LP_EVENT_VALID; 145 + } 146 + 147 + static inline void hvlpevent_invalidate(struct HvLpEvent *h) 148 + { 149 + h->flags &= ~ HV_LP_EVENT_VALID; 150 + } 151 + 152 + static inline int hvlpevent_is_int(struct HvLpEvent *h) 153 + { 154 + return h->flags & HV_LP_EVENT_INT; 155 + } 156 + 157 + static inline int hvlpevent_is_ack(struct HvLpEvent *h) 158 + { 159 + return !hvlpevent_is_int(h); 160 + } 161 + 162 + static inline int hvlpevent_need_ack(struct HvLpEvent *h) 163 + { 164 + return h->flags & HV_LP_EVENT_DO_ACK; 165 + } 136 166 137 167 #endif /* _ASM_POWERPC_ISERIES_HV_LP_EVENT_H */
-1
include/asm-powerpc/iseries/hv_types.h
··· 1 1 /* 2 - * HvTypes.h 3 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation 4 3 * 5 4 * This program is free software; you can redistribute it and/or modify
+13 -1
include/asm-powerpc/iseries/iseries_io.h
··· 6 6 #ifdef CONFIG_PPC_ISERIES 7 7 #include <linux/types.h> 8 8 /* 9 - * File iSeries_io.h created by Allan Trautman on Thu Dec 28 2000. 9 + * Created by Allan Trautman on Thu Dec 28 2000. 10 10 * 11 11 * Remaps the io.h for the iSeries Io 12 12 * Copyright (C) 2000 Allan H Trautman, IBM Corporation ··· 32 32 * End Change Activity 33 33 */ 34 34 35 + #ifdef CONFIG_PCI 35 36 extern u8 iSeries_Read_Byte(const volatile void __iomem * IoAddress); 36 37 extern u16 iSeries_Read_Word(const volatile void __iomem * IoAddress); 37 38 extern u32 iSeries_Read_Long(const volatile void __iomem * IoAddress); ··· 45 44 size_t n); 46 45 extern void iSeries_memcpy_fromio(void *dest, 47 46 const volatile void __iomem *source, size_t n); 47 + #else 48 + static inline u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress) 49 + { 50 + return 0xff; 51 + } 52 + 53 + static inline void iSeries_Write_Byte(u8 IoData, 54 + volatile void __iomem *IoAddress) 55 + { 56 + } 57 + #endif /* CONFIG_PCI */ 48 58 49 59 #endif /* CONFIG_PPC_ISERIES */ 50 60 #endif /* _ASM_POWERPC_ISERIES_ISERIES_IO_H */
-1
include/asm-powerpc/iseries/it_exp_vpd_panel.h
··· 1 1 /* 2 - * ItExtVpdPanel.h 3 2 * Copyright (C) 2002 Dave Boutcher IBM Corporation 4 3 * 5 4 * This program is free software; you can redistribute it and/or modify
+11 -11
include/asm-powerpc/iseries/it_lp_naca.h
··· 1 1 /* 2 - * ItLpNaca.h 3 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation 4 3 * 5 4 * This program is free software; you can redistribute it and/or modify ··· 36 37 u8 xLpIndex; // LP Index x0B-x0B 37 38 u16 xMaxLpQueues; // Number of allocated queues x0C-x0D 38 39 u16 xLpQueueOffset; // Offset to start of LP queues x0E-x0F 39 - u8 xPirEnvironMode:8; // Piranha or hardware x10-x10 40 - u8 xPirConsoleMode:8; // Piranha console indicator x11-x11 41 - u8 xPirDasdMode:8; // Piranha dasd indicator x12-x12 40 + u8 xPirEnvironMode; // Piranha or hardware x10-x10 41 + u8 xPirConsoleMode; // Piranha console indicator x11-x11 42 + u8 xPirDasdMode; // Piranha dasd indicator x12-x12 42 43 u8 xRsvd1_0[5]; // Reserved for Piranha related x13-x17 43 - u8 xLparInstalled:1; // Is LPAR installed on system x18-x1F 44 - u8 xSysPartitioned:1; // Is the system partitioned ... 45 - u8 xHwSyncedTBs:1; // Hardware synced TBs ... 46 - u8 xIntProcUtilHmt:1; // Utilize HMT for interrupts ... 47 - u8 xRsvd1_1:4; // Reserved ... 48 - u8 xSpVpdFormat:8; // VPD areas are in CSP format ... 49 - u8 xIntProcRatio:8; // Ratio of int procs to procs ... 44 + u8 flags; // flags, see below x18-x1F 45 + u8 xSpVpdFormat; // VPD areas are in CSP format ... 46 + u8 xIntProcRatio; // Ratio of int procs to procs ... 50 47 u8 xRsvd1_2[5]; // Reserved ... 51 48 u16 xRsvd1_3; // Reserved x20-x21 52 49 u16 xPlicVrmIndex; // VRM index of PLIC x22-x23 ··· 71 76 }; 72 77 73 78 extern struct ItLpNaca itLpNaca; 79 + 80 + #define ITLPNACA_LPAR 0x80 /* Is LPAR installed on the system */ 81 + #define ITLPNACA_PARTITIONED 0x40 /* Is the system partitioned */ 82 + #define ITLPNACA_HWSYNCEDTBS 0x20 /* Hardware synced TBs */ 83 + #define ITLPNACA_HMTINT 0x10 /* Utilize MHT for interrupts */ 74 84 75 85 #endif /* _ASM_POWERPC_ISERIES_IT_LP_NACA_H */
-1
include/asm-powerpc/iseries/it_lp_queue.h
··· 1 1 /* 2 - * ItLpQueue.h 3 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation 4 3 * 5 4 * This program is free software; you can redistribute it and/or modify
+1 -2
include/asm-powerpc/iseries/it_lp_reg_save.h
··· 1 1 /* 2 - * ItLpRegSave.h 3 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation 4 3 * 5 4 * This program is free software; you can redistribute it and/or modify ··· 82 83 83 84 extern struct ItLpRegSave iseries_reg_save[]; 84 85 85 - #endif /* _ITLPREGSAVE_H */ 86 + #endif /* _ASM_POWERPC_ISERIES_IT_LP_REG_SAVE_H */
-1
include/asm-powerpc/iseries/lpar_map.h
··· 1 1 /* 2 - * LparMap.h 3 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation 4 3 * 5 4 * This program is free software; you can redistribute it and/or modify
-1
include/asm-powerpc/iseries/mf.h
··· 1 1 /* 2 - * mf.h 3 2 * Copyright (C) 2001 Troy D. Armstrong IBM Corporation 4 3 * Copyright (C) 2004 Stephen Rothwell IBM Corporation 5 4 *
-1
include/asm-powerpc/iseries/vio.h
··· 1 1 /* -*- linux-c -*- 2 - * drivers/char/vio.h 3 2 * 4 3 * iSeries Virtual I/O Message Path header 5 4 *
-2
include/asm-powerpc/pci-bridge.h
··· 142 142 143 143 extern int pcibios_remove_root_bus(struct pci_controller *phb); 144 144 145 - extern void phbs_remap_io(void); 146 - 147 145 static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus) 148 146 { 149 147 struct device_node *busdn = bus->sysdata;
+8
include/asm-powerpc/system.h
··· 133 133 extern void cvt_fd(float *from, double *to, struct thread_struct *thread); 134 134 extern void cvt_df(double *from, float *to, struct thread_struct *thread); 135 135 136 + #ifndef CONFIG_SMP 137 + extern void discard_lazy_cpu_state(void); 138 + #else 139 + static inline void discard_lazy_cpu_state(void) 140 + { 141 + } 142 + #endif 143 + 136 144 #ifdef CONFIG_ALTIVEC 137 145 extern void flush_altivec_to_thread(struct task_struct *); 138 146 #else
+10 -2
include/asm-ppc/system.h
··· 4 4 #ifndef __PPC_SYSTEM_H 5 5 #define __PPC_SYSTEM_H 6 6 7 - #include <linux/config.h> 8 7 #include <linux/kernel.h> 9 8 10 9 #include <asm/atomic.h> ··· 38 39 #ifdef CONFIG_SMP 39 40 #define smp_mb() mb() 40 41 #define smp_rmb() rmb() 41 - #define smp_wmb() wmb() 42 + #define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory") 42 43 #define smp_read_barrier_depends() read_barrier_depends() 43 44 #else 44 45 #define smp_mb() barrier() ··· 73 74 extern void read_rtc_time(void); 74 75 extern void pmac_find_display(void); 75 76 extern void giveup_fpu(struct task_struct *); 77 + extern void disable_kernel_fp(void); 76 78 extern void enable_kernel_fp(void); 77 79 extern void flush_fp_to_thread(struct task_struct *); 78 80 extern void enable_kernel_altivec(void); ··· 85 85 extern int fix_alignment(struct pt_regs *); 86 86 extern void cvt_fd(float *from, double *to, struct thread_struct *thread); 87 87 extern void cvt_df(double *from, float *to, struct thread_struct *thread); 88 + 89 + #ifndef CONFIG_SMP 90 + extern void discard_lazy_cpu_state(void); 91 + #else 92 + static inline void discard_lazy_cpu_state(void) 93 + { 94 + } 95 + #endif 88 96 89 97 #ifdef CONFIG_ALTIVEC 90 98 extern void flush_altivec_to_thread(struct task_struct *);