Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * 64-bit pSeries and RS/6000 setup code.
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Adapted from 'alpha' version by Gary Thomas
6 * Modified by Cort Dougan (cort@cs.nmt.edu)
7 * Modified by PPC64 Team, IBM Corp
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15/*
16 * bootup setup stuff..
17 */
18
19#include <linux/cpu.h>
20#include <linux/errno.h>
21#include <linux/sched.h>
22#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/stddef.h>
25#include <linux/unistd.h>
26#include <linux/user.h>
27#include <linux/tty.h>
28#include <linux/major.h>
29#include <linux/interrupt.h>
30#include <linux/reboot.h>
31#include <linux/init.h>
32#include <linux/ioport.h>
33#include <linux/console.h>
34#include <linux/pci.h>
35#include <linux/utsname.h>
36#include <linux/adb.h>
37#include <linux/export.h>
38#include <linux/delay.h>
39#include <linux/irq.h>
40#include <linux/seq_file.h>
41#include <linux/root_dev.h>
42#include <linux/of.h>
43#include <linux/of_pci.h>
44#include <linux/kexec.h>
45
46#include <asm/mmu.h>
47#include <asm/processor.h>
48#include <asm/io.h>
49#include <asm/pgtable.h>
50#include <asm/prom.h>
51#include <asm/rtas.h>
52#include <asm/pci-bridge.h>
53#include <asm/iommu.h>
54#include <asm/dma.h>
55#include <asm/machdep.h>
56#include <asm/irq.h>
57#include <asm/time.h>
58#include <asm/nvram.h>
59#include <asm/pmc.h>
60#include <asm/mpic.h>
61#include <asm/xics.h>
62#include <asm/ppc-pci.h>
63#include <asm/i8259.h>
64#include <asm/udbg.h>
65#include <asm/smp.h>
66#include <asm/firmware.h>
67#include <asm/eeh.h>
68#include <asm/reg.h>
69#include <asm/plpar_wrappers.h>
70
71#include "pseries.h"
72
73int CMO_PrPSP = -1;
74int CMO_SecPSP = -1;
75unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
76EXPORT_SYMBOL(CMO_PageSize);
77
78int fwnmi_active; /* TRUE if an FWNMI handler is present */
79
80static struct device_node *pSeries_mpic_node;
81
82static void pSeries_show_cpuinfo(struct seq_file *m)
83{
84 struct device_node *root;
85 const char *model = "";
86
87 root = of_find_node_by_path("/");
88 if (root)
89 model = of_get_property(root, "model", NULL);
90 seq_printf(m, "machine\t\t: CHRP %s\n", model);
91 of_node_put(root);
92}
93
94/* Initialize firmware assisted non-maskable interrupts if
95 * the firmware supports this feature.
96 */
97static void __init fwnmi_init(void)
98{
99 unsigned long system_reset_addr, machine_check_addr;
100
101 int ibm_nmi_register = rtas_token("ibm,nmi-register");
102 if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE)
103 return;
104
105 /* If the kernel's not linked at zero we point the firmware at low
106 * addresses anyway, and use a trampoline to get to the real code. */
107 system_reset_addr = __pa(system_reset_fwnmi) - PHYSICAL_START;
108 machine_check_addr = __pa(machine_check_fwnmi) - PHYSICAL_START;
109
110 if (0 == rtas_call(ibm_nmi_register, 2, 1, NULL, system_reset_addr,
111 machine_check_addr))
112 fwnmi_active = 1;
113}
114
115static void pseries_8259_cascade(struct irq_desc *desc)
116{
117 struct irq_chip *chip = irq_desc_get_chip(desc);
118 unsigned int cascade_irq = i8259_irq();
119
120 if (cascade_irq != NO_IRQ)
121 generic_handle_irq(cascade_irq);
122
123 chip->irq_eoi(&desc->irq_data);
124}
125
126static void __init pseries_setup_i8259_cascade(void)
127{
128 struct device_node *np, *old, *found = NULL;
129 unsigned int cascade;
130 const u32 *addrp;
131 unsigned long intack = 0;
132 int naddr;
133
134 for_each_node_by_type(np, "interrupt-controller") {
135 if (of_device_is_compatible(np, "chrp,iic")) {
136 found = np;
137 break;
138 }
139 }
140
141 if (found == NULL) {
142 printk(KERN_DEBUG "pic: no ISA interrupt controller\n");
143 return;
144 }
145
146 cascade = irq_of_parse_and_map(found, 0);
147 if (cascade == NO_IRQ) {
148 printk(KERN_ERR "pic: failed to map cascade interrupt");
149 return;
150 }
151 pr_debug("pic: cascade mapped to irq %d\n", cascade);
152
153 for (old = of_node_get(found); old != NULL ; old = np) {
154 np = of_get_parent(old);
155 of_node_put(old);
156 if (np == NULL)
157 break;
158 if (strcmp(np->name, "pci") != 0)
159 continue;
160 addrp = of_get_property(np, "8259-interrupt-acknowledge", NULL);
161 if (addrp == NULL)
162 continue;
163 naddr = of_n_addr_cells(np);
164 intack = addrp[naddr-1];
165 if (naddr > 1)
166 intack |= ((unsigned long)addrp[naddr-2]) << 32;
167 }
168 if (intack)
169 printk(KERN_DEBUG "pic: PCI 8259 intack at 0x%016lx\n", intack);
170 i8259_init(found, intack);
171 of_node_put(found);
172 irq_set_chained_handler(cascade, pseries_8259_cascade);
173}
174
175static void __init pseries_mpic_init_IRQ(void)
176{
177 struct device_node *np;
178 const unsigned int *opprop;
179 unsigned long openpic_addr = 0;
180 int naddr, n, i, opplen;
181 struct mpic *mpic;
182
183 np = of_find_node_by_path("/");
184 naddr = of_n_addr_cells(np);
185 opprop = of_get_property(np, "platform-open-pic", &opplen);
186 if (opprop != NULL) {
187 openpic_addr = of_read_number(opprop, naddr);
188 printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
189 }
190 of_node_put(np);
191
192 BUG_ON(openpic_addr == 0);
193
194 /* Setup the openpic driver */
195 mpic = mpic_alloc(pSeries_mpic_node, openpic_addr,
196 MPIC_NO_RESET, 16, 0, " MPIC ");
197 BUG_ON(mpic == NULL);
198
199 /* Add ISUs */
200 opplen /= sizeof(u32);
201 for (n = 0, i = naddr; i < opplen; i += naddr, n++) {
202 unsigned long isuaddr = of_read_number(opprop + i, naddr);
203 mpic_assign_isu(mpic, n, isuaddr);
204 }
205
206 /* Setup top-level get_irq */
207 ppc_md.get_irq = mpic_get_irq;
208
209 /* All ISUs are setup, complete initialization */
210 mpic_init(mpic);
211
212 /* Look for cascade */
213 pseries_setup_i8259_cascade();
214}
215
216static void __init pseries_xics_init_IRQ(void)
217{
218 xics_init();
219 pseries_setup_i8259_cascade();
220}
221
222static void pseries_lpar_enable_pmcs(void)
223{
224 unsigned long set, reset;
225
226 set = 1UL << 63;
227 reset = 0;
228 plpar_hcall_norets(H_PERFMON, set, reset);
229}
230
231static void __init pseries_discover_pic(void)
232{
233 struct device_node *np;
234 const char *typep;
235
236 for_each_node_by_name(np, "interrupt-controller") {
237 typep = of_get_property(np, "compatible", NULL);
238 if (!typep)
239 continue;
240 if (strstr(typep, "open-pic")) {
241 pSeries_mpic_node = of_node_get(np);
242 ppc_md.init_IRQ = pseries_mpic_init_IRQ;
243 setup_kexec_cpu_down_mpic();
244 smp_init_pseries_mpic();
245 return;
246 } else if (strstr(typep, "ppc-xicp")) {
247 ppc_md.init_IRQ = pseries_xics_init_IRQ;
248 setup_kexec_cpu_down_xics();
249 smp_init_pseries_xics();
250 return;
251 }
252 }
253 printk(KERN_ERR "pSeries_discover_pic: failed to recognize"
254 " interrupt-controller\n");
255}
256
257static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data)
258{
259 struct of_reconfig_data *rd = data;
260 struct device_node *parent, *np = rd->dn;
261 struct pci_dn *pdn;
262 int err = NOTIFY_OK;
263
264 switch (action) {
265 case OF_RECONFIG_ATTACH_NODE:
266 parent = of_get_parent(np);
267 pdn = parent ? PCI_DN(parent) : NULL;
268 if (pdn) {
269 /* Create pdn and EEH device */
270 pci_add_device_node_info(pdn->phb, np);
271 eeh_dev_init(PCI_DN(np), pdn->phb);
272 }
273
274 of_node_put(parent);
275 break;
276 case OF_RECONFIG_DETACH_NODE:
277 pdn = PCI_DN(np);
278 if (pdn)
279 list_del(&pdn->list);
280 break;
281 default:
282 err = NOTIFY_DONE;
283 break;
284 }
285 return err;
286}
287
288static struct notifier_block pci_dn_reconfig_nb = {
289 .notifier_call = pci_dn_reconfig_notifier,
290};
291
292struct kmem_cache *dtl_cache;
293
294#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
295/*
296 * Allocate space for the dispatch trace log for all possible cpus
297 * and register the buffers with the hypervisor. This is used for
298 * computing time stolen by the hypervisor.
299 */
300static int alloc_dispatch_logs(void)
301{
302 int cpu, ret;
303 struct paca_struct *pp;
304 struct dtl_entry *dtl;
305
306 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
307 return 0;
308
309 if (!dtl_cache)
310 return 0;
311
312 for_each_possible_cpu(cpu) {
313 pp = &paca[cpu];
314 dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL);
315 if (!dtl) {
316 pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
317 cpu);
318 pr_warn("Stolen time statistics will be unreliable\n");
319 break;
320 }
321
322 pp->dtl_ridx = 0;
323 pp->dispatch_log = dtl;
324 pp->dispatch_log_end = dtl + N_DISPATCH_LOG;
325 pp->dtl_curr = dtl;
326 }
327
328 /* Register the DTL for the current (boot) cpu */
329 dtl = get_paca()->dispatch_log;
330 get_paca()->dtl_ridx = 0;
331 get_paca()->dtl_curr = dtl;
332 get_paca()->lppaca_ptr->dtl_idx = 0;
333
334 /* hypervisor reads buffer length from this field */
335 dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
336 ret = register_dtl(hard_smp_processor_id(), __pa(dtl));
337 if (ret)
338 pr_err("WARNING: DTL registration of cpu %d (hw %d) failed "
339 "with %d\n", smp_processor_id(),
340 hard_smp_processor_id(), ret);
341 get_paca()->lppaca_ptr->dtl_enable_mask = 2;
342
343 return 0;
344}
345#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
346static inline int alloc_dispatch_logs(void)
347{
348 return 0;
349}
350#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
351
352static int alloc_dispatch_log_kmem_cache(void)
353{
354 dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
355 DISPATCH_LOG_BYTES, 0, NULL);
356 if (!dtl_cache) {
357 pr_warn("Failed to create dispatch trace log buffer cache\n");
358 pr_warn("Stolen time statistics will be unreliable\n");
359 return 0;
360 }
361
362 return alloc_dispatch_logs();
363}
364machine_early_initcall(pseries, alloc_dispatch_log_kmem_cache);
365
366static void pseries_lpar_idle(void)
367{
368 /*
369 * Default handler to go into low thread priority and possibly
370 * low power mode by cedeing processor to hypervisor
371 */
372
373 /* Indicate to hypervisor that we are idle. */
374 get_lppaca()->idle = 1;
375
376 /*
377 * Yield the processor to the hypervisor. We return if
378 * an external interrupt occurs (which are driven prior
379 * to returning here) or if a prod occurs from another
380 * processor. When returning here, external interrupts
381 * are enabled.
382 */
383 cede_processor();
384
385 get_lppaca()->idle = 0;
386}
387
388/*
389 * Enable relocation on during exceptions. This has partition wide scope and
390 * may take a while to complete, if it takes longer than one second we will
391 * just give up rather than wasting any more time on this - if that turns out
392 * to ever be a problem in practice we can move this into a kernel thread to
393 * finish off the process later in boot.
394 */
395long pSeries_enable_reloc_on_exc(void)
396{
397 long rc;
398 unsigned int delay, total_delay = 0;
399
400 while (1) {
401 rc = enable_reloc_on_exceptions();
402 if (!H_IS_LONG_BUSY(rc))
403 return rc;
404
405 delay = get_longbusy_msecs(rc);
406 total_delay += delay;
407 if (total_delay > 1000) {
408 pr_warn("Warning: Giving up waiting to enable "
409 "relocation on exceptions (%u msec)!\n",
410 total_delay);
411 return rc;
412 }
413
414 mdelay(delay);
415 }
416}
417EXPORT_SYMBOL(pSeries_enable_reloc_on_exc);
418
419long pSeries_disable_reloc_on_exc(void)
420{
421 long rc;
422
423 while (1) {
424 rc = disable_reloc_on_exceptions();
425 if (!H_IS_LONG_BUSY(rc))
426 return rc;
427 mdelay(get_longbusy_msecs(rc));
428 }
429}
430EXPORT_SYMBOL(pSeries_disable_reloc_on_exc);
431
432#ifdef CONFIG_KEXEC
433static void pSeries_machine_kexec(struct kimage *image)
434{
435 long rc;
436
437 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
438 rc = pSeries_disable_reloc_on_exc();
439 if (rc != H_SUCCESS)
440 pr_warning("Warning: Failed to disable relocation on "
441 "exceptions: %ld\n", rc);
442 }
443
444 default_machine_kexec(image);
445}
446#endif
447
448#ifdef __LITTLE_ENDIAN__
449long pseries_big_endian_exceptions(void)
450{
451 long rc;
452
453 while (1) {
454 rc = enable_big_endian_exceptions();
455 if (!H_IS_LONG_BUSY(rc))
456 return rc;
457 mdelay(get_longbusy_msecs(rc));
458 }
459}
460
461static long pseries_little_endian_exceptions(void)
462{
463 long rc;
464
465 while (1) {
466 rc = enable_little_endian_exceptions();
467 if (!H_IS_LONG_BUSY(rc))
468 return rc;
469 mdelay(get_longbusy_msecs(rc));
470 }
471}
472#endif
473
474static void __init find_and_init_phbs(void)
475{
476 struct device_node *node;
477 struct pci_controller *phb;
478 struct device_node *root = of_find_node_by_path("/");
479
480 for_each_child_of_node(root, node) {
481 if (node->type == NULL || (strcmp(node->type, "pci") != 0 &&
482 strcmp(node->type, "pciex") != 0))
483 continue;
484
485 phb = pcibios_alloc_controller(node);
486 if (!phb)
487 continue;
488 rtas_setup_phb(phb);
489 pci_process_bridge_OF_ranges(phb, node, 0);
490 isa_bridge_find_early(phb);
491 phb->controller_ops = pseries_pci_controller_ops;
492 }
493
494 of_node_put(root);
495 pci_devs_phb_init();
496
497 /*
498 * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties
499 * in chosen.
500 */
501 of_pci_check_probe_only();
502}
503
504static void __init pSeries_setup_arch(void)
505{
506 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
507
508 /* Discover PIC type and setup ppc_md accordingly */
509 pseries_discover_pic();
510
511 /* openpic global configuration register (64-bit format). */
512 /* openpic Interrupt Source Unit pointer (64-bit format). */
513 /* python0 facility area (mmio) (64-bit format) REAL address. */
514
515 /* init to some ~sane value until calibrate_delay() runs */
516 loops_per_jiffy = 50000000;
517
518 fwnmi_init();
519
520 /* By default, only probe PCI (can be overridden by rtas_pci) */
521 pci_add_flags(PCI_PROBE_ONLY);
522
523 /* Find and initialize PCI host bridges */
524 init_pci_config_tokens();
525 find_and_init_phbs();
526 of_reconfig_notifier_register(&pci_dn_reconfig_nb);
527
528 pSeries_nvram_init();
529
530 if (firmware_has_feature(FW_FEATURE_LPAR)) {
531 vpa_init(boot_cpuid);
532 ppc_md.power_save = pseries_lpar_idle;
533 ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
534 } else {
535 /* No special idle routine */
536 ppc_md.enable_pmcs = power4_enable_pmcs;
537 }
538
539 ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
540
541 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
542 long rc;
543
544 rc = pSeries_enable_reloc_on_exc();
545 if (rc == H_P2) {
546 pr_info("Relocation on exceptions not supported\n");
547 } else if (rc != H_SUCCESS) {
548 pr_warn("Unable to enable relocation on exceptions: "
549 "%ld\n", rc);
550 }
551 }
552}
553
554static int __init pSeries_init_panel(void)
555{
556 /* Manually leave the kernel version on the panel. */
557#ifdef __BIG_ENDIAN__
558 ppc_md.progress("Linux ppc64\n", 0);
559#else
560 ppc_md.progress("Linux ppc64le\n", 0);
561#endif
562 ppc_md.progress(init_utsname()->version, 0);
563
564 return 0;
565}
566machine_arch_initcall(pseries, pSeries_init_panel);
567
568static int pseries_set_dabr(unsigned long dabr, unsigned long dabrx)
569{
570 return plpar_hcall_norets(H_SET_DABR, dabr);
571}
572
573static int pseries_set_xdabr(unsigned long dabr, unsigned long dabrx)
574{
575 /* Have to set at least one bit in the DABRX according to PAPR */
576 if (dabrx == 0 && dabr == 0)
577 dabrx = DABRX_USER;
578 /* PAPR says we can only set kernel and user bits */
579 dabrx &= DABRX_KERNEL | DABRX_USER;
580
581 return plpar_hcall_norets(H_SET_XDABR, dabr, dabrx);
582}
583
584static int pseries_set_dawr(unsigned long dawr, unsigned long dawrx)
585{
586 /* PAPR says we can't set HYP */
587 dawrx &= ~DAWRX_HYP;
588
589 return plapr_set_watchpoint0(dawr, dawrx);
590}
591
592#define CMO_CHARACTERISTICS_TOKEN 44
593#define CMO_MAXLENGTH 1026
594
595void pSeries_coalesce_init(void)
596{
597 struct hvcall_mpp_x_data mpp_x_data;
598
599 if (firmware_has_feature(FW_FEATURE_CMO) && !h_get_mpp_x(&mpp_x_data))
600 powerpc_firmware_features |= FW_FEATURE_XCMO;
601 else
602 powerpc_firmware_features &= ~FW_FEATURE_XCMO;
603}
604
605/**
606 * fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions,
607 * handle that here. (Stolen from parse_system_parameter_string)
608 */
609static void pSeries_cmo_feature_init(void)
610{
611 char *ptr, *key, *value, *end;
612 int call_status;
613 int page_order = IOMMU_PAGE_SHIFT_4K;
614
615 pr_debug(" -> fw_cmo_feature_init()\n");
616 spin_lock(&rtas_data_buf_lock);
617 memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE);
618 call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
619 NULL,
620 CMO_CHARACTERISTICS_TOKEN,
621 __pa(rtas_data_buf),
622 RTAS_DATA_BUF_SIZE);
623
624 if (call_status != 0) {
625 spin_unlock(&rtas_data_buf_lock);
626 pr_debug("CMO not available\n");
627 pr_debug(" <- fw_cmo_feature_init()\n");
628 return;
629 }
630
631 end = rtas_data_buf + CMO_MAXLENGTH - 2;
632 ptr = rtas_data_buf + 2; /* step over strlen value */
633 key = value = ptr;
634
635 while (*ptr && (ptr <= end)) {
636 /* Separate the key and value by replacing '=' with '\0' and
637 * point the value at the string after the '='
638 */
639 if (ptr[0] == '=') {
640 ptr[0] = '\0';
641 value = ptr + 1;
642 } else if (ptr[0] == '\0' || ptr[0] == ',') {
643 /* Terminate the string containing the key/value pair */
644 ptr[0] = '\0';
645
646 if (key == value) {
647 pr_debug("Malformed key/value pair\n");
648 /* Never found a '=', end processing */
649 break;
650 }
651
652 if (0 == strcmp(key, "CMOPageSize"))
653 page_order = simple_strtol(value, NULL, 10);
654 else if (0 == strcmp(key, "PrPSP"))
655 CMO_PrPSP = simple_strtol(value, NULL, 10);
656 else if (0 == strcmp(key, "SecPSP"))
657 CMO_SecPSP = simple_strtol(value, NULL, 10);
658 value = key = ptr + 1;
659 }
660 ptr++;
661 }
662
663 /* Page size is returned as the power of 2 of the page size,
664 * convert to the page size in bytes before returning
665 */
666 CMO_PageSize = 1 << page_order;
667 pr_debug("CMO_PageSize = %lu\n", CMO_PageSize);
668
669 if (CMO_PrPSP != -1 || CMO_SecPSP != -1) {
670 pr_info("CMO enabled\n");
671 pr_debug("CMO enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP,
672 CMO_SecPSP);
673 powerpc_firmware_features |= FW_FEATURE_CMO;
674 pSeries_coalesce_init();
675 } else
676 pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP,
677 CMO_SecPSP);
678 spin_unlock(&rtas_data_buf_lock);
679 pr_debug(" <- fw_cmo_feature_init()\n");
680}
681
682/*
683 * Early initialization. Relocation is on but do not reference unbolted pages
684 */
685static void __init pSeries_init_early(void)
686{
687 pr_debug(" -> pSeries_init_early()\n");
688
689#ifdef CONFIG_HVC_CONSOLE
690 if (firmware_has_feature(FW_FEATURE_LPAR))
691 hvc_vio_init_early();
692#endif
693 if (firmware_has_feature(FW_FEATURE_XDABR))
694 ppc_md.set_dabr = pseries_set_xdabr;
695 else if (firmware_has_feature(FW_FEATURE_DABR))
696 ppc_md.set_dabr = pseries_set_dabr;
697
698 if (firmware_has_feature(FW_FEATURE_SET_MODE))
699 ppc_md.set_dawr = pseries_set_dawr;
700
701 pSeries_cmo_feature_init();
702 iommu_init_early_pSeries();
703
704 pr_debug(" <- pSeries_init_early()\n");
705}
706
707/**
708 * pseries_power_off - tell firmware about how to power off the system.
709 *
710 * This function calls either the power-off rtas token in normal cases
711 * or the ibm,power-off-ups token (if present & requested) in case of
712 * a power failure. If power-off token is used, power on will only be
713 * possible with power button press. If ibm,power-off-ups token is used
714 * it will allow auto poweron after power is restored.
715 */
716static void pseries_power_off(void)
717{
718 int rc;
719 int rtas_poweroff_ups_token = rtas_token("ibm,power-off-ups");
720
721 if (rtas_flash_term_hook)
722 rtas_flash_term_hook(SYS_POWER_OFF);
723
724 if (rtas_poweron_auto == 0 ||
725 rtas_poweroff_ups_token == RTAS_UNKNOWN_SERVICE) {
726 rc = rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1);
727 printk(KERN_INFO "RTAS power-off returned %d\n", rc);
728 } else {
729 rc = rtas_call(rtas_poweroff_ups_token, 0, 1, NULL);
730 printk(KERN_INFO "RTAS ibm,power-off-ups returned %d\n", rc);
731 }
732 for (;;);
733}
734
735/*
736 * Called very early, MMU is off, device-tree isn't unflattened
737 */
738
739static int __init pseries_probe_fw_features(unsigned long node,
740 const char *uname, int depth,
741 void *data)
742{
743 const char *prop;
744 int len;
745 static int hypertas_found;
746 static int vec5_found;
747
748 if (depth != 1)
749 return 0;
750
751 if (!strcmp(uname, "rtas") || !strcmp(uname, "rtas@0")) {
752 prop = of_get_flat_dt_prop(node, "ibm,hypertas-functions",
753 &len);
754 if (prop) {
755 powerpc_firmware_features |= FW_FEATURE_LPAR;
756 fw_hypertas_feature_init(prop, len);
757 }
758
759 hypertas_found = 1;
760 }
761
762 if (!strcmp(uname, "chosen")) {
763 prop = of_get_flat_dt_prop(node, "ibm,architecture-vec-5",
764 &len);
765 if (prop)
766 fw_vec5_feature_init(prop, len);
767
768 vec5_found = 1;
769 }
770
771 return hypertas_found && vec5_found;
772}
773
774static int __init pSeries_probe(void)
775{
776 unsigned long root = of_get_flat_dt_root();
777 const char *dtype = of_get_flat_dt_prop(root, "device_type", NULL);
778
779 if (dtype == NULL)
780 return 0;
781 if (strcmp(dtype, "chrp"))
782 return 0;
783
784 /* Cell blades firmware claims to be chrp while it's not. Until this
785 * is fixed, we need to avoid those here.
786 */
787 if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0") ||
788 of_flat_dt_is_compatible(root, "IBM,CBEA"))
789 return 0;
790
791 pr_debug("pSeries detected, looking for LPAR capability...\n");
792
793 /* Now try to figure out if we are running on LPAR */
794 of_scan_flat_dt(pseries_probe_fw_features, NULL);
795
796#ifdef __LITTLE_ENDIAN__
797 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
798 long rc;
799 /*
800 * Tell the hypervisor that we want our exceptions to
801 * be taken in little endian mode. If this fails we don't
802 * want to use BUG() because it will trigger an exception.
803 */
804 rc = pseries_little_endian_exceptions();
805 if (rc) {
806 ppc_md.progress("H_SET_MODE LE exception fail", 0);
807 panic("Could not enable little endian exceptions");
808 }
809 }
810#endif
811
812 if (firmware_has_feature(FW_FEATURE_LPAR))
813 hpte_init_lpar();
814 else
815 hpte_init_native();
816
817 pm_power_off = pseries_power_off;
818
819 pr_debug("Machine is%s LPAR !\n",
820 (powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not");
821
822 return 1;
823}
824
825static int pSeries_pci_probe_mode(struct pci_bus *bus)
826{
827 if (firmware_has_feature(FW_FEATURE_LPAR))
828 return PCI_PROBE_DEVTREE;
829 return PCI_PROBE_NORMAL;
830}
831
832struct pci_controller_ops pseries_pci_controller_ops = {
833 .probe_mode = pSeries_pci_probe_mode,
834};
835
836define_machine(pseries) {
837 .name = "pSeries",
838 .probe = pSeries_probe,
839 .setup_arch = pSeries_setup_arch,
840 .init_early = pSeries_init_early,
841 .show_cpuinfo = pSeries_show_cpuinfo,
842 .log_error = pSeries_log_error,
843 .pcibios_fixup = pSeries_final_fixup,
844 .restart = rtas_restart,
845 .halt = rtas_halt,
846 .panic = rtas_os_term,
847 .get_boot_time = rtas_get_boot_time,
848 .get_rtc_time = rtas_get_rtc_time,
849 .set_rtc_time = rtas_set_rtc_time,
850 .calibrate_decr = generic_calibrate_decr,
851 .progress = rtas_progress,
852 .system_reset_exception = pSeries_system_reset_exception,
853 .machine_check_exception = pSeries_machine_check_exception,
854#ifdef CONFIG_KEXEC
855 .machine_kexec = pSeries_machine_kexec,
856#endif
857#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
858 .memory_block_size = pseries_memory_block_size,
859#endif
860};