Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: Remove FW_FEATURE ISERIES from arch code

This is no longer selectable, so just remove all the dependent code.

Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

authored by

Stephen Rothwell and committed by
Benjamin Herrenschmidt
f5339277 ec86b45a

+26 -367
+1 -20
arch/powerpc/include/asm/abs_addr.h
··· 17 17 #include <asm/types.h> 18 18 #include <asm/page.h> 19 19 #include <asm/prom.h> 20 - #include <asm/firmware.h> 21 20 22 21 struct mschunks_map { 23 22 unsigned long num_chunks; ··· 45 46 46 47 static inline unsigned long phys_to_abs(unsigned long pa) 47 48 { 48 - unsigned long chunk; 49 - 50 - /* This is a no-op on non-iSeries */ 51 - if (!firmware_has_feature(FW_FEATURE_ISERIES)) 52 - return pa; 53 - 54 - chunk = addr_to_chunk(pa); 55 - 56 - if (chunk < mschunks_map.num_chunks) 57 - chunk = mschunks_map.mapping[chunk]; 58 - 59 - return chunk_to_addr(chunk) + (pa & MSCHUNKS_OFFSET_MASK); 49 + return pa; 60 50 } 61 51 62 52 /* Convenience macros */ 63 53 #define virt_to_abs(va) phys_to_abs(__pa(va)) 64 54 #define abs_to_virt(aa) __va(aa) 65 - 66 - /* 67 - * Converts Virtual Address to Real Address for 68 - * Legacy iSeries Hypervisor calls 69 - */ 70 - #define iseries_hv_addr(virtaddr) \ 71 - (0x8000000000000000UL | virt_to_abs(virtaddr)) 72 55 73 56 #endif /* __KERNEL__ */ 74 57 #endif /* _ASM_POWERPC_ABS_ADDR_H */
-9
arch/powerpc/include/asm/firmware.h
··· 41 41 #define FW_FEATURE_XDABR ASM_CONST(0x0000000000040000) 42 42 #define FW_FEATURE_MULTITCE ASM_CONST(0x0000000000080000) 43 43 #define FW_FEATURE_SPLPAR ASM_CONST(0x0000000000100000) 44 - #define FW_FEATURE_ISERIES ASM_CONST(0x0000000000200000) 45 44 #define FW_FEATURE_LPAR ASM_CONST(0x0000000000400000) 46 45 #define FW_FEATURE_PS3_LV1 ASM_CONST(0x0000000000800000) 47 46 #define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000) ··· 64 65 FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR | 65 66 FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO, 66 67 FW_FEATURE_PSERIES_ALWAYS = 0, 67 - FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR, 68 - FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR, 69 68 FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2, 70 69 FW_FEATURE_POWERNV_ALWAYS = 0, 71 70 FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, ··· 75 78 FW_FEATURE_POSSIBLE = 76 79 #ifdef CONFIG_PPC_PSERIES 77 80 FW_FEATURE_PSERIES_POSSIBLE | 78 - #endif 79 - #ifdef CONFIG_PPC_ISERIES 80 - FW_FEATURE_ISERIES_POSSIBLE | 81 81 #endif 82 82 #ifdef CONFIG_PPC_POWERNV 83 83 FW_FEATURE_POWERNV_POSSIBLE | ··· 92 98 FW_FEATURE_ALWAYS = 93 99 #ifdef CONFIG_PPC_PSERIES 94 100 FW_FEATURE_PSERIES_ALWAYS & 95 - #endif 96 - #ifdef CONFIG_PPC_ISERIES 97 - FW_FEATURE_ISERIES_ALWAYS & 98 101 #endif 99 102 #ifdef CONFIG_PPC_POWERNV 100 103 FW_FEATURE_POWERNV_ALWAYS &
-15
arch/powerpc/include/asm/time.h
··· 18 18 #include <linux/percpu.h> 19 19 20 20 #include <asm/processor.h> 21 - #ifdef CONFIG_PPC_ISERIES 22 - #include <asm/paca.h> 23 - #include <asm/firmware.h> 24 - #include <asm/iseries/hv_call.h> 25 - #endif 26 21 27 22 /* time.c */ 28 23 extern unsigned long tb_ticks_per_jiffy; ··· 162 167 #ifndef CONFIG_BOOKE 163 168 --val; 164 169 #endif 165 - #ifdef CONFIG_PPC_ISERIES 166 - if (firmware_has_feature(FW_FEATURE_ISERIES) && 167 - get_lppaca()->shared_proc) { 168 - get_lppaca()->virtual_decr = val; 169 - if (get_dec() > val) 170 - HvCall_setVirtualDecr(); 171 - return; 172 - } 173 - #endif 174 170 mtspr(SPRN_DEC, val); 175 171 #endif /* not 40x or 8xx_CPU6 */ 176 172 } ··· 203 217 #endif 204 218 205 219 extern void secondary_cpu_time_init(void); 206 - extern void iSeries_time_init_early(void); 207 220 208 221 DECLARE_PER_CPU(u64, decrementers_next_tb); 209 222
-14
arch/powerpc/kernel/irq.c
··· 211 211 * External interrupt events on non-iseries will have caused 212 212 * interrupts to be hard-disabled, so there is no problem, we 213 213 * cannot have preempted. 214 - * 215 - * That leaves us with EEs on iSeries or decrementer interrupts, 216 - * which I decided to safely ignore. The preemption would have 217 - * itself been the result of an interrupt, upon which return we 218 - * will have checked for pending events on the old CPU. 219 214 */ 220 215 irq_happened = get_irq_happened(); 221 216 if (!irq_happened) ··· 452 457 453 458 irq_exit(); 454 459 set_irq_regs(old_regs); 455 - 456 - #ifdef CONFIG_PPC_ISERIES 457 - if (firmware_has_feature(FW_FEATURE_ISERIES) && 458 - get_lppaca()->int_dword.fields.decr_int) { 459 - get_lppaca()->int_dword.fields.decr_int = 0; 460 - /* Signal a fake decrementer interrupt */ 461 - timer_interrupt(regs); 462 - } 463 - #endif 464 460 465 461 trace_irq_exit(regs); 466 462 }
-3
arch/powerpc/kernel/isa-bridge.c
··· 29 29 #include <asm/pci-bridge.h> 30 30 #include <asm/machdep.h> 31 31 #include <asm/ppc-pci.h> 32 - #include <asm/firmware.h> 33 32 34 33 unsigned long isa_io_base; /* NULL if no ISA bus */ 35 34 EXPORT_SYMBOL(isa_io_base); ··· 260 261 */ 261 262 static int __init isa_bridge_init(void) 262 263 { 263 - if (firmware_has_feature(FW_FEATURE_ISERIES)) 264 - return 0; 265 264 bus_register_notifier(&pci_bus_type, &isa_bridge_notifier); 266 265 return 0; 267 266 }
+7 -101
arch/powerpc/kernel/lparcfg.c
··· 26 26 #include <linux/seq_file.h> 27 27 #include <linux/slab.h> 28 28 #include <asm/uaccess.h> 29 - #include <asm/iseries/hv_lp_config.h> 30 29 #include <asm/lppaca.h> 31 30 #include <asm/hvcall.h> 32 31 #include <asm/firmware.h> ··· 54 55 int cpu; 55 56 56 57 for_each_possible_cpu(cpu) { 57 - if (firmware_has_feature(FW_FEATURE_ISERIES)) 58 - sum_purr += lppaca_of(cpu).emulated_time_base; 59 - else { 60 - struct cpu_usage *cu; 58 + struct cpu_usage *cu; 61 59 62 - cu = &per_cpu(cpu_usage_array, cpu); 63 - sum_purr += cu->current_tb; 64 - } 60 + cu = &per_cpu(cpu_usage_array, cpu); 61 + sum_purr += cu->current_tb; 65 62 } 66 63 return sum_purr; 67 64 } 68 65 69 - #ifdef CONFIG_PPC_ISERIES 70 - 71 - /* 72 - * Methods used to fetch LPAR data when running on an iSeries platform. 73 - */ 74 - static int iseries_lparcfg_data(struct seq_file *m, void *v) 75 - { 76 - unsigned long pool_id; 77 - int shared, entitled_capacity, max_entitled_capacity; 78 - int processors, max_processors; 79 - unsigned long purr = get_purr(); 80 - 81 - shared = (int)(local_paca->lppaca_ptr->shared_proc); 82 - 83 - seq_printf(m, "system_active_processors=%d\n", 84 - (int)HvLpConfig_getSystemPhysicalProcessors()); 85 - 86 - seq_printf(m, "system_potential_processors=%d\n", 87 - (int)HvLpConfig_getSystemPhysicalProcessors()); 88 - 89 - processors = (int)HvLpConfig_getPhysicalProcessors(); 90 - seq_printf(m, "partition_active_processors=%d\n", processors); 91 - 92 - max_processors = (int)HvLpConfig_getMaxPhysicalProcessors(); 93 - seq_printf(m, "partition_potential_processors=%d\n", max_processors); 94 - 95 - if (shared) { 96 - entitled_capacity = HvLpConfig_getSharedProcUnits(); 97 - max_entitled_capacity = HvLpConfig_getMaxSharedProcUnits(); 98 - } else { 99 - entitled_capacity = processors * 100; 100 - max_entitled_capacity = max_processors * 100; 101 - } 102 - seq_printf(m, "partition_entitled_capacity=%d\n", entitled_capacity); 103 - 104 - seq_printf(m, "partition_max_entitled_capacity=%d\n", 105 - max_entitled_capacity); 106 - 107 - if (shared) { 108 - pool_id = HvLpConfig_getSharedPoolIndex(); 109 - seq_printf(m, "pool=%d\n", (int)pool_id); 110 - seq_printf(m, "pool_capacity=%d\n", 111 - (int)(HvLpConfig_getNumProcsInSharedPool(pool_id) * 112 - 100)); 113 - seq_printf(m, "purr=%ld\n", purr); 114 - } 115 - 116 - seq_printf(m, "shared_processor_mode=%d\n", shared); 117 - 118 - return 0; 119 - } 120 - 121 - #else /* CONFIG_PPC_ISERIES */ 122 - 123 - static int iseries_lparcfg_data(struct seq_file *m, void *v) 124 - { 125 - return 0; 126 - } 127 - 128 - #endif /* CONFIG_PPC_ISERIES */ 129 - 130 - #ifdef CONFIG_PPC_PSERIES 131 66 /* 132 67 * Methods used to fetch LPAR data when running on a pSeries platform. 133 68 */ ··· 581 648 u8 new_weight, *new_weight_ptr = &new_weight; 582 649 ssize_t retval; 583 650 584 - if (!firmware_has_feature(FW_FEATURE_SPLPAR) || 585 - firmware_has_feature(FW_FEATURE_ISERIES)) 651 + if (!firmware_has_feature(FW_FEATURE_SPLPAR)) 586 652 return -EINVAL; 587 653 588 654 if (count > kbuf_sz) ··· 641 709 return retval; 642 710 } 643 711 644 - #else /* CONFIG_PPC_PSERIES */ 645 - 646 - static int pseries_lparcfg_data(struct seq_file *m, void *v) 647 - { 648 - return 0; 649 - } 650 - 651 - static ssize_t lparcfg_write(struct file *file, const char __user * buf, 652 - size_t count, loff_t * off) 653 - { 654 - return -EINVAL; 655 - } 656 - 657 - #endif /* CONFIG_PPC_PSERIES */ 658 - 659 712 static int lparcfg_data(struct seq_file *m, void *v) 660 713 { 661 714 struct device_node *rootdn; ··· 655 738 rootdn = of_find_node_by_path("/"); 656 739 if (rootdn) { 657 740 tmp = of_get_property(rootdn, "model", NULL); 658 - if (tmp) { 741 + if (tmp) 659 742 model = tmp; 660 - /* Skip "IBM," - see platforms/iseries/dt.c */ 661 - if (firmware_has_feature(FW_FEATURE_ISERIES)) 662 - model += 4; 663 - } 664 743 tmp = of_get_property(rootdn, "system-id", NULL); 665 - if (tmp) { 744 + if (tmp) 666 745 system_id = tmp; 667 - /* Skip "IBM," - see platforms/iseries/dt.c */ 668 - if (firmware_has_feature(FW_FEATURE_ISERIES)) 669 - system_id += 4; 670 - } 671 746 lp_index_ptr = of_get_property(rootdn, "ibm,partition-no", 672 747 NULL); 673 748 if (lp_index_ptr) ··· 670 761 seq_printf(m, "system_type=%s\n", model); 671 762 seq_printf(m, "partition_id=%d\n", (int)lp_index); 672 763 673 - if (firmware_has_feature(FW_FEATURE_ISERIES)) 674 - return iseries_lparcfg_data(m, v); 675 764 return pseries_lparcfg_data(m, v); 676 765 } 677 766 ··· 693 786 umode_t mode = S_IRUSR | S_IRGRP | S_IROTH; 694 787 695 788 /* Allow writing if we have FW_FEATURE_SPLPAR */ 696 - if (firmware_has_feature(FW_FEATURE_SPLPAR) && 697 - !firmware_has_feature(FW_FEATURE_ISERIES)) 789 + if (firmware_has_feature(FW_FEATURE_SPLPAR)) 698 790 mode |= S_IWUSR; 699 791 700 792 ent = proc_create("powerpc/lparcfg", mode, NULL, &lparcfg_fops);
+3 -9
arch/powerpc/kernel/paca.c
··· 11 11 #include <linux/export.h> 12 12 #include <linux/memblock.h> 13 13 14 - #include <asm/firmware.h> 15 14 #include <asm/lppaca.h> 16 15 #include <asm/paca.h> 17 16 #include <asm/sections.h> 18 17 #include <asm/pgtable.h> 19 - #include <asm/iseries/lpar_map.h> 20 - #include <asm/iseries/hv_types.h> 21 18 #include <asm/kexec.h> 22 19 23 20 /* This symbol is provided by the linker - let it fill in the paca ··· 27 30 * The structure which the hypervisor knows about - this structure 28 31 * should not cross a page boundary. The vpa_init/register_vpa call 29 32 * is now known to fail if the lppaca structure crosses a page 30 - * boundary. The lppaca is also used on legacy iSeries and POWER5 31 - * pSeries boxes. The lppaca is 640 bytes long, and cannot readily 33 + * boundary. The lppaca is also used on POWER5 pSeries boxes. 34 + * The lppaca is 640 bytes long, and cannot readily 32 35 * change since the hypervisor knows its layout, so a 1kB alignment 33 36 * will suffice to ensure that it doesn't cross a page boundary. 34 37 */ ··· 180 183 /* 181 184 * We can't take SLB misses on the paca, and we want to access them 182 185 * in real mode, so allocate them within the RMA and also within 183 - * the first segment. On iSeries they must be within the area mapped 184 - * by the HV, which is HvPagesToMap * HVPAGESIZE bytes. 186 + * the first segment. 185 187 */ 186 188 limit = min(0x10000000ULL, ppc64_rma_size); 187 - if (firmware_has_feature(FW_FEATURE_ISERIES)) 188 - limit = min(limit, HvPagesToMap * HVPAGESIZE); 189 189 190 190 paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); 191 191
-15
arch/powerpc/kernel/pci-common.c
··· 38 38 #include <asm/byteorder.h> 39 39 #include <asm/machdep.h> 40 40 #include <asm/ppc-pci.h> 41 - #include <asm/firmware.h> 42 41 #include <asm/eeh.h> 43 42 44 43 static DEFINE_SPINLOCK(hose_spinlock); ··· 217 218 { 218 219 struct of_irq oirq; 219 220 unsigned int virq; 220 - 221 - /* The current device-tree that iSeries generates from the HV 222 - * PCI informations doesn't contain proper interrupt routing, 223 - * and all the fallback would do is print out crap, so we 224 - * don't attempt to resolve the interrupts here at all, some 225 - * iSeries specific fixup does it. 226 - * 227 - * In the long run, we will hopefully fix the generated device-tree 228 - * instead. 229 - */ 230 - #ifdef CONFIG_PPC_ISERIES 231 - if (firmware_has_feature(FW_FEATURE_ISERIES)) 232 - return -1; 233 - #endif 234 221 235 222 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev)); 236 223
+2 -5
arch/powerpc/kernel/sysfs.c
··· 12 12 #include <asm/current.h> 13 13 #include <asm/processor.h> 14 14 #include <asm/cputable.h> 15 - #include <asm/firmware.h> 16 15 #include <asm/hvcall.h> 17 16 #include <asm/prom.h> 18 17 #include <asm/machdep.h> ··· 340 341 int i, nattrs; 341 342 342 343 #ifdef CONFIG_PPC64 343 - if (!firmware_has_feature(FW_FEATURE_ISERIES) && 344 - cpu_has_feature(CPU_FTR_SMT)) 344 + if (cpu_has_feature(CPU_FTR_SMT)) 345 345 device_create_file(s, &dev_attr_smt_snooze_delay); 346 346 #endif 347 347 ··· 412 414 BUG_ON(!c->hotpluggable); 413 415 414 416 #ifdef CONFIG_PPC64 415 - if (!firmware_has_feature(FW_FEATURE_ISERIES) && 416 - cpu_has_feature(CPU_FTR_SMT)) 417 + if (cpu_has_feature(CPU_FTR_SMT)) 417 418 device_remove_file(s, &dev_attr_smt_snooze_delay); 418 419 #endif 419 420
+3 -105
arch/powerpc/kernel/time.c
··· 17 17 * 18 18 * TODO (not necessarily in this file): 19 19 * - improve precision and reproducibility of timebase frequency 20 - * measurement at boot time. (for iSeries, we calibrate the timebase 21 - * against the Titan chip's clock.) 20 + * measurement at boot time. 22 21 * - for astronomical applications: add a new function to get 23 22 * non ambiguous timestamps even around leap seconds. This needs 24 23 * a new timestamp format and a good name. ··· 69 70 #include <asm/vdso_datapage.h> 70 71 #include <asm/firmware.h> 71 72 #include <asm/cputime.h> 72 - #ifdef CONFIG_PPC_ISERIES 73 - #include <asm/iseries/it_lp_queue.h> 74 - #include <asm/iseries/hv_call_xm.h> 75 - #endif 76 73 77 74 /* powerpc clocksource/clockevent code */ 78 75 ··· 111 116 112 117 DEFINE_PER_CPU(u64, decrementers_next_tb); 113 118 static DEFINE_PER_CPU(struct clock_event_device, decrementers); 114 - 115 - #ifdef CONFIG_PPC_ISERIES 116 - static unsigned long __initdata iSeries_recal_titan; 117 - static signed long __initdata iSeries_recal_tb; 118 - 119 - /* Forward declaration is only needed for iSereis compiles */ 120 - static void __init clocksource_init(void); 121 - #endif 122 119 123 120 #define XSEC_PER_SEC (1024*1024) 124 121 ··· 410 423 EXPORT_SYMBOL(profile_pc); 411 424 #endif 412 425 413 - #ifdef CONFIG_PPC_ISERIES 414 - 415 - /* 416 - * This function recalibrates the timebase based on the 49-bit time-of-day 417 - * value in the Titan chip. The Titan is much more accurate than the value 418 - * returned by the service processor for the timebase frequency. 419 - */ 420 - 421 - static int __init iSeries_tb_recal(void) 422 - { 423 - unsigned long titan, tb; 424 - 425 - /* Make sure we only run on iSeries */ 426 - if (!firmware_has_feature(FW_FEATURE_ISERIES)) 427 - return -ENODEV; 428 - 429 - tb = get_tb(); 430 - titan = HvCallXm_loadTod(); 431 - if ( iSeries_recal_titan ) { 432 - unsigned long tb_ticks = tb - iSeries_recal_tb; 433 - unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12; 434 - unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec; 435 - unsigned long new_tb_ticks_per_jiffy = 436 - DIV_ROUND_CLOSEST(new_tb_ticks_per_sec, HZ); 437 - long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy; 438 - char sign = '+'; 439 - /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */ 440 - new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ; 441 - 442 - if ( tick_diff < 0 ) { 443 - tick_diff = -tick_diff; 444 - sign = '-'; 445 - } 446 - if ( tick_diff ) { 447 - if ( tick_diff < tb_ticks_per_jiffy/25 ) { 448 - printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n", 449 - new_tb_ticks_per_jiffy, sign, tick_diff ); 450 - tb_ticks_per_jiffy = new_tb_ticks_per_jiffy; 451 - tb_ticks_per_sec = new_tb_ticks_per_sec; 452 - calc_cputime_factors(); 453 - vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 454 - setup_cputime_one_jiffy(); 455 - } 456 - else { 457 - printk( "Titan recalibrate: FAILED (difference > 4 percent)\n" 458 - " new tb_ticks_per_jiffy = %lu\n" 459 - " old tb_ticks_per_jiffy = %lu\n", 460 - new_tb_ticks_per_jiffy, tb_ticks_per_jiffy ); 461 - } 462 - } 463 - } 464 - iSeries_recal_titan = titan; 465 - iSeries_recal_tb = tb; 466 - 467 - /* Called here as now we know accurate values for the timebase */ 468 - clocksource_init(); 469 - return 0; 470 - } 471 - late_initcall(iSeries_tb_recal); 472 - 473 - /* Called from platform early init */ 474 - void __init iSeries_time_init_early(void) 475 - { 476 - iSeries_recal_tb = get_tb(); 477 - iSeries_recal_titan = HvCallXm_loadTod(); 478 - } 479 - #endif /* CONFIG_PPC_ISERIES */ 480 - 481 426 #ifdef CONFIG_IRQ_WORK 482 427 483 428 /* ··· 466 547 #endif /* CONFIG_IRQ_WORK */ 467 548 468 549 /* 469 - * For iSeries shared processors, we have to let the hypervisor 470 - * set the hardware decrementer. We set a virtual decrementer 471 - * in the lppaca and call the hypervisor if the virtual 472 - * decrementer is less than the current value in the hardware 473 - * decrementer. (almost always the new decrementer value will 474 - * be greater than the current hardware decementer so the hypervisor 475 - * call will not be needed) 476 - */ 477 - 478 - /* 479 550 * timer_interrupt - gets called when the decrementer overflows, 480 551 * with interrupts disabled. 481 552 */ ··· 508 599 irq_work_run(); 509 600 } 510 601 511 - #ifdef CONFIG_PPC_ISERIES 512 - if (firmware_has_feature(FW_FEATURE_ISERIES)) 513 - get_lppaca()->int_dword.fields.decr_int = 0; 514 - #endif 515 - 516 602 *next_tb = ~(u64)0; 517 603 if (evt->event_handler) 518 604 evt->event_handler(evt); 519 - 520 - #ifdef CONFIG_PPC_ISERIES 521 - if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) 522 - process_hvlpevents(); 523 - #endif 524 605 525 606 #ifdef CONFIG_PPC64 526 607 /* collect purr register values often, for accurate calculations */ ··· 883 984 */ 884 985 start_cpu_decrementer(); 885 986 886 - /* Register the clocksource, if we're not running on iSeries */ 887 - if (!firmware_has_feature(FW_FEATURE_ISERIES)) 888 - clocksource_init(); 987 + /* Register the clocksource */ 988 + clocksource_init(); 889 989 890 990 init_decrementer_clockevent(); 891 991 }
+5 -19
arch/powerpc/lib/locks.c
··· 19 19 #include <linux/smp.h> 20 20 21 21 /* waiting for a spinlock... */ 22 - #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) 22 + #if defined(CONFIG_PPC_SPLPAR) 23 23 #include <asm/hvcall.h> 24 - #include <asm/iseries/hv_call.h> 25 24 #include <asm/smp.h> 26 - #include <asm/firmware.h> 27 25 28 26 void __spin_yield(arch_spinlock_t *lock) 29 27 { ··· 38 40 rmb(); 39 41 if (lock->slock != lock_value) 40 42 return; /* something has changed */ 41 - if (firmware_has_feature(FW_FEATURE_ISERIES)) 42 - HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, 43 - ((u64)holder_cpu << 32) | yield_count); 44 - #ifdef CONFIG_PPC_SPLPAR 45 - else 46 - plpar_hcall_norets(H_CONFER, 47 - get_hard_smp_processor_id(holder_cpu), yield_count); 48 - #endif 43 + plpar_hcall_norets(H_CONFER, 44 + get_hard_smp_processor_id(holder_cpu), yield_count); 49 45 } 50 46 51 47 /* ··· 63 71 rmb(); 64 72 if (rw->lock != lock_value) 65 73 return; /* something has changed */ 66 - if (firmware_has_feature(FW_FEATURE_ISERIES)) 67 - HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, 68 - ((u64)holder_cpu << 32) | yield_count); 69 - #ifdef CONFIG_PPC_SPLPAR 70 - else 71 - plpar_hcall_norets(H_CONFER, 72 - get_hard_smp_processor_id(holder_cpu), yield_count); 73 - #endif 74 + plpar_hcall_norets(H_CONFER, 75 + get_hard_smp_processor_id(holder_cpu), yield_count); 74 76 } 75 77 #endif 76 78
+3 -6
arch/powerpc/mm/hash_utils_64.c
··· 56 56 #include <asm/udbg.h> 57 57 #include <asm/code-patching.h> 58 58 #include <asm/fadump.h> 59 + #include <asm/firmware.h> 59 60 60 61 #ifdef DEBUG 61 62 #define DBG(fmt...) udbg_printf(fmt) ··· 757 756 */ 758 757 htab_initialize(); 759 758 760 - /* Initialize stab / SLB management except on iSeries 761 - */ 759 + /* Initialize stab / SLB management */ 762 760 if (mmu_has_feature(MMU_FTR_SLB)) 763 761 slb_initialize(); 764 - else if (!firmware_has_feature(FW_FEATURE_ISERIES)) 765 - stab_initialize(get_paca()->stab_real); 766 762 } 767 763 768 764 #ifdef CONFIG_SMP ··· 770 772 mtspr(SPRN_SDR1, _SDR1); 771 773 772 774 /* Initialize STAB/SLB. We use a virtual address as it works 773 - * in real mode on pSeries and we want a virtual address on 774 - * iSeries anyway 775 + * in real mode on pSeries. 775 776 */ 776 777 if (mmu_has_feature(MMU_FTR_SLB)) 777 778 slb_initialize();
-6
arch/powerpc/mm/slb.c
··· 21 21 #include <asm/cputable.h> 22 22 #include <asm/cacheflush.h> 23 23 #include <asm/smp.h> 24 - #include <asm/firmware.h> 25 24 #include <linux/compiler.h> 26 25 #include <asm/udbg.h> 27 26 #include <asm/code-patching.h> ··· 305 306 } 306 307 307 308 get_paca()->stab_rr = SLB_NUM_BOLTED; 308 - 309 - /* On iSeries the bolted entries have already been set up by 310 - * the hypervisor from the lparMap data in head.S */ 311 - if (firmware_has_feature(FW_FEATURE_ISERIES)) 312 - return; 313 309 314 310 lflags = SLB_VSID_KERNEL | linear_llp; 315 311 vflags = SLB_VSID_KERNEL | vmalloc_llp;
-9
arch/powerpc/mm/stab.c
··· 21 21 #include <asm/cputable.h> 22 22 #include <asm/prom.h> 23 23 #include <asm/abs_addr.h> 24 - #include <asm/firmware.h> 25 - #include <asm/iseries/hv_call.h> 26 24 27 25 struct stab_entry { 28 26 unsigned long esid_data; ··· 282 284 283 285 /* Set ASR */ 284 286 stabreal = get_paca()->stab_real | 0x1ul; 285 - 286 - #ifdef CONFIG_PPC_ISERIES 287 - if (firmware_has_feature(FW_FEATURE_ISERIES)) { 288 - HvCall1(HvCallBaseSetASR, stabreal); 289 - return; 290 - } 291 - #endif /* CONFIG_PPC_ISERIES */ 292 287 293 288 mtspr(SPRN_ASR, stabreal); 294 289 }
-3
arch/powerpc/oprofile/common.c
··· 195 195 if (!cur_cpu_spec->oprofile_cpu_type) 196 196 return -ENODEV; 197 197 198 - if (firmware_has_feature(FW_FEATURE_ISERIES)) 199 - return -ENODEV; 200 - 201 198 switch (cur_cpu_spec->oprofile_type) { 202 199 #ifdef CONFIG_PPC_BOOK3S_64 203 200 #ifdef CONFIG_OPROFILE_CELL
+1
arch/powerpc/platforms/powernv/pci.c
··· 31 31 #include <asm/iommu.h> 32 32 #include <asm/tce.h> 33 33 #include <asm/abs_addr.h> 34 + #include <asm/firmware.h> 34 35 35 36 #include "powernv.h" 36 37 #include "pci.h"
+1
arch/powerpc/platforms/pseries/lpar.c
··· 41 41 #include <asm/udbg.h> 42 42 #include <asm/smp.h> 43 43 #include <asm/trace.h> 44 + #include <asm/firmware.h> 44 45 45 46 #include "plpar_wrappers.h" 46 47 #include "pseries.h"
-28
arch/powerpc/xmon/xmon.c
··· 39 39 #include <asm/irq_regs.h> 40 40 #include <asm/spu.h> 41 41 #include <asm/spu_priv1.h> 42 - #include <asm/firmware.h> 43 42 #include <asm/setjmp.h> 44 43 #include <asm/reg.h> 45 44 ··· 1634 1635 mfspr(SPRN_DEC), mfspr(SPRN_SPRG2)); 1635 1636 printf("sp = "REG" sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3)); 1636 1637 printf("toc = "REG" dar = "REG"\n", toc, mfspr(SPRN_DAR)); 1637 - #ifdef CONFIG_PPC_ISERIES 1638 - if (firmware_has_feature(FW_FEATURE_ISERIES)) { 1639 - struct paca_struct *ptrPaca; 1640 - struct lppaca *ptrLpPaca; 1641 - 1642 - /* Dump out relevant Paca data areas. */ 1643 - printf("Paca: \n"); 1644 - ptrPaca = local_paca; 1645 - 1646 - printf(" Local Processor Control Area (LpPaca): \n"); 1647 - ptrLpPaca = ptrPaca->lppaca_ptr; 1648 - printf(" Saved Srr0=%.16lx Saved Srr1=%.16lx \n", 1649 - ptrLpPaca->saved_srr0, ptrLpPaca->saved_srr1); 1650 - printf(" Saved Gpr3=%.16lx Saved Gpr4=%.16lx \n", 1651 - ptrLpPaca->saved_gpr3, ptrLpPaca->saved_gpr4); 1652 - printf(" Saved Gpr5=%.16lx \n", 1653 - ptrLpPaca->gpr5_dword.saved_gpr5); 1654 - } 1655 - #endif 1656 1638 1657 1639 return; 1658 1640 } ··· 2836 2856 2837 2857 static void xmon_init(int enable) 2838 2858 { 2839 - #ifdef CONFIG_PPC_ISERIES 2840 - if (firmware_has_feature(FW_FEATURE_ISERIES)) 2841 - return; 2842 - #endif 2843 2859 if (enable) { 2844 2860 __debugger = xmon; 2845 2861 __debugger_ipi = xmon_ipi; ··· 2872 2896 2873 2897 static int __init setup_xmon_sysrq(void) 2874 2898 { 2875 - #ifdef CONFIG_PPC_ISERIES 2876 - if (firmware_has_feature(FW_FEATURE_ISERIES)) 2877 - return 0; 2878 - #endif 2879 2899 register_sysrq_key('x', &sysrq_xmon_op); 2880 2900 return 0; 2881 2901 }