at v2.6.29 671 lines 16 kB view raw
1/* 2 * Common boot and setup code for both 32-bit and 64-bit. 3 * Extracted from arch/powerpc/kernel/setup_64.c. 4 * 5 * Copyright (C) 2001 PPC64 Team, IBM Corp 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13#undef DEBUG 14 15#include <linux/module.h> 16#include <linux/string.h> 17#include <linux/sched.h> 18#include <linux/init.h> 19#include <linux/kernel.h> 20#include <linux/reboot.h> 21#include <linux/delay.h> 22#include <linux/initrd.h> 23#include <linux/platform_device.h> 24#include <linux/seq_file.h> 25#include <linux/ioport.h> 26#include <linux/console.h> 27#include <linux/utsname.h> 28#include <linux/screen_info.h> 29#include <linux/root_dev.h> 30#include <linux/notifier.h> 31#include <linux/cpu.h> 32#include <linux/unistd.h> 33#include <linux/serial.h> 34#include <linux/serial_8250.h> 35#include <linux/debugfs.h> 36#include <linux/percpu.h> 37#include <linux/lmb.h> 38#include <asm/io.h> 39#include <asm/prom.h> 40#include <asm/processor.h> 41#include <asm/vdso_datapage.h> 42#include <asm/pgtable.h> 43#include <asm/smp.h> 44#include <asm/elf.h> 45#include <asm/machdep.h> 46#include <asm/time.h> 47#include <asm/cputable.h> 48#include <asm/sections.h> 49#include <asm/firmware.h> 50#include <asm/btext.h> 51#include <asm/nvram.h> 52#include <asm/setup.h> 53#include <asm/system.h> 54#include <asm/rtas.h> 55#include <asm/iommu.h> 56#include <asm/serial.h> 57#include <asm/cache.h> 58#include <asm/page.h> 59#include <asm/mmu.h> 60#include <asm/xmon.h> 61#include <asm/cputhreads.h> 62#include <mm/mmu_decl.h> 63 64#include "setup.h" 65 66#ifdef DEBUG 67#include <asm/udbg.h> 68#define DBG(fmt...) udbg_printf(fmt) 69#else 70#define DBG(fmt...) 71#endif 72 73/* The main machine-dep calls structure 74 */ 75struct machdep_calls ppc_md; 76EXPORT_SYMBOL(ppc_md); 77struct machdep_calls *machine_id; 78EXPORT_SYMBOL(machine_id); 79 80unsigned long klimit = (unsigned long) _end; 81 82char cmd_line[COMMAND_LINE_SIZE]; 83 84/* 85 * This still seems to be needed... -- paulus 86 */ 87struct screen_info screen_info = { 88 .orig_x = 0, 89 .orig_y = 25, 90 .orig_video_cols = 80, 91 .orig_video_lines = 25, 92 .orig_video_isVGA = 1, 93 .orig_video_points = 16 94}; 95 96#ifdef __DO_IRQ_CANON 97/* XXX should go elsewhere eventually */ 98int ppc_do_canonicalize_irqs; 99EXPORT_SYMBOL(ppc_do_canonicalize_irqs); 100#endif 101 102/* also used by kexec */ 103void machine_shutdown(void) 104{ 105 if (ppc_md.machine_shutdown) 106 ppc_md.machine_shutdown(); 107} 108 109void machine_restart(char *cmd) 110{ 111 machine_shutdown(); 112 if (ppc_md.restart) 113 ppc_md.restart(cmd); 114#ifdef CONFIG_SMP 115 smp_send_stop(); 116#endif 117 printk(KERN_EMERG "System Halted, OK to turn off power\n"); 118 local_irq_disable(); 119 while (1) ; 120} 121 122void machine_power_off(void) 123{ 124 machine_shutdown(); 125 if (ppc_md.power_off) 126 ppc_md.power_off(); 127#ifdef CONFIG_SMP 128 smp_send_stop(); 129#endif 130 printk(KERN_EMERG "System Halted, OK to turn off power\n"); 131 local_irq_disable(); 132 while (1) ; 133} 134/* Used by the G5 thermal driver */ 135EXPORT_SYMBOL_GPL(machine_power_off); 136 137void (*pm_power_off)(void) = machine_power_off; 138EXPORT_SYMBOL_GPL(pm_power_off); 139 140void machine_halt(void) 141{ 142 machine_shutdown(); 143 if (ppc_md.halt) 144 ppc_md.halt(); 145#ifdef CONFIG_SMP 146 smp_send_stop(); 147#endif 148 printk(KERN_EMERG "System Halted, OK to turn off power\n"); 149 local_irq_disable(); 150 while (1) ; 151} 152 153 154#ifdef CONFIG_TAU 155extern u32 cpu_temp(unsigned long cpu); 156extern u32 cpu_temp_both(unsigned long cpu); 157#endif /* CONFIG_TAU */ 158 159#ifdef CONFIG_SMP 160DEFINE_PER_CPU(unsigned int, pvr); 161#endif 162 163static int show_cpuinfo(struct seq_file *m, void *v) 164{ 165 unsigned long cpu_id = (unsigned long)v - 1; 166 unsigned int pvr; 167 unsigned short maj; 168 unsigned short min; 169 170 if (cpu_id == NR_CPUS) { 171 struct device_node *root; 172 const char *model = NULL; 173#if defined(CONFIG_SMP) && defined(CONFIG_PPC32) 174 unsigned long bogosum = 0; 175 int i; 176 for_each_online_cpu(i) 177 bogosum += loops_per_jiffy; 178 seq_printf(m, "total bogomips\t: %lu.%02lu\n", 179 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); 180#endif /* CONFIG_SMP && CONFIG_PPC32 */ 181 seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq); 182 if (ppc_md.name) 183 seq_printf(m, "platform\t: %s\n", ppc_md.name); 184 root = of_find_node_by_path("/"); 185 if (root) 186 model = of_get_property(root, "model", NULL); 187 if (model) 188 seq_printf(m, "model\t\t: %s\n", model); 189 of_node_put(root); 190 191 if (ppc_md.show_cpuinfo != NULL) 192 ppc_md.show_cpuinfo(m); 193 194#ifdef CONFIG_PPC32 195 /* Display the amount of memory */ 196 seq_printf(m, "Memory\t\t: %d MB\n", 197 (unsigned int)(total_memory / (1024 * 1024))); 198#endif 199 200 return 0; 201 } 202 203 /* We only show online cpus: disable preempt (overzealous, I 204 * knew) to prevent cpu going down. */ 205 preempt_disable(); 206 if (!cpu_online(cpu_id)) { 207 preempt_enable(); 208 return 0; 209 } 210 211#ifdef CONFIG_SMP 212 pvr = per_cpu(pvr, cpu_id); 213#else 214 pvr = mfspr(SPRN_PVR); 215#endif 216 maj = (pvr >> 8) & 0xFF; 217 min = pvr & 0xFF; 218 219 seq_printf(m, "processor\t: %lu\n", cpu_id); 220 seq_printf(m, "cpu\t\t: "); 221 222 if (cur_cpu_spec->pvr_mask) 223 seq_printf(m, "%s", cur_cpu_spec->cpu_name); 224 else 225 seq_printf(m, "unknown (%08x)", pvr); 226 227#ifdef CONFIG_ALTIVEC 228 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 229 seq_printf(m, ", altivec supported"); 230#endif /* CONFIG_ALTIVEC */ 231 232 seq_printf(m, "\n"); 233 234#ifdef CONFIG_TAU 235 if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) { 236#ifdef CONFIG_TAU_AVERAGE 237 /* more straightforward, but potentially misleading */ 238 seq_printf(m, "temperature \t: %u C (uncalibrated)\n", 239 cpu_temp(cpu_id)); 240#else 241 /* show the actual temp sensor range */ 242 u32 temp; 243 temp = cpu_temp_both(cpu_id); 244 seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n", 245 temp & 0xff, temp >> 16); 246#endif 247 } 248#endif /* CONFIG_TAU */ 249 250 /* 251 * Assume here that all clock rates are the same in a 252 * smp system. -- Cort 253 */ 254 if (ppc_proc_freq) 255 seq_printf(m, "clock\t\t: %lu.%06luMHz\n", 256 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); 257 258 if (ppc_md.show_percpuinfo != NULL) 259 ppc_md.show_percpuinfo(m, cpu_id); 260 261 /* If we are a Freescale core do a simple check so 262 * we dont have to keep adding cases in the future */ 263 if (PVR_VER(pvr) & 0x8000) { 264 switch (PVR_VER(pvr)) { 265 case 0x8000: /* 7441/7450/7451, Voyager */ 266 case 0x8001: /* 7445/7455, Apollo 6 */ 267 case 0x8002: /* 7447/7457, Apollo 7 */ 268 case 0x8003: /* 7447A, Apollo 7 PM */ 269 case 0x8004: /* 7448, Apollo 8 */ 270 case 0x800c: /* 7410, Nitro */ 271 maj = ((pvr >> 8) & 0xF); 272 min = PVR_MIN(pvr); 273 break; 274 default: /* e500/book-e */ 275 maj = PVR_MAJ(pvr); 276 min = PVR_MIN(pvr); 277 break; 278 } 279 } else { 280 switch (PVR_VER(pvr)) { 281 case 0x0020: /* 403 family */ 282 maj = PVR_MAJ(pvr) + 1; 283 min = PVR_MIN(pvr); 284 break; 285 case 0x1008: /* 740P/750P ?? */ 286 maj = ((pvr >> 8) & 0xFF) - 1; 287 min = pvr & 0xFF; 288 break; 289 default: 290 maj = (pvr >> 8) & 0xFF; 291 min = pvr & 0xFF; 292 break; 293 } 294 } 295 296 seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n", 297 maj, min, PVR_VER(pvr), PVR_REV(pvr)); 298 299#ifdef CONFIG_PPC32 300 seq_printf(m, "bogomips\t: %lu.%02lu\n", 301 loops_per_jiffy / (500000/HZ), 302 (loops_per_jiffy / (5000/HZ)) % 100); 303#endif 304 305#ifdef CONFIG_SMP 306 seq_printf(m, "\n"); 307#endif 308 309 preempt_enable(); 310 return 0; 311} 312 313static void *c_start(struct seq_file *m, loff_t *pos) 314{ 315 unsigned long i = *pos; 316 317 return i <= NR_CPUS ? (void *)(i + 1) : NULL; 318} 319 320static void *c_next(struct seq_file *m, void *v, loff_t *pos) 321{ 322 ++*pos; 323 return c_start(m, pos); 324} 325 326static void c_stop(struct seq_file *m, void *v) 327{ 328} 329 330struct seq_operations cpuinfo_op = { 331 .start =c_start, 332 .next = c_next, 333 .stop = c_stop, 334 .show = show_cpuinfo, 335}; 336 337void __init check_for_initrd(void) 338{ 339#ifdef CONFIG_BLK_DEV_INITRD 340 DBG(" -> check_for_initrd() initrd_start=0x%lx initrd_end=0x%lx\n", 341 initrd_start, initrd_end); 342 343 /* If we were passed an initrd, set the ROOT_DEV properly if the values 344 * look sensible. If not, clear initrd reference. 345 */ 346 if (is_kernel_addr(initrd_start) && is_kernel_addr(initrd_end) && 347 initrd_end > initrd_start) 348 ROOT_DEV = Root_RAM0; 349 else 350 initrd_start = initrd_end = 0; 351 352 if (initrd_start) 353 printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end); 354 355 DBG(" <- check_for_initrd()\n"); 356#endif /* CONFIG_BLK_DEV_INITRD */ 357} 358 359#ifdef CONFIG_SMP 360 361int threads_per_core, threads_shift; 362cpumask_t threads_core_mask; 363 364static void __init cpu_init_thread_core_maps(int tpc) 365{ 366 int i; 367 368 threads_per_core = tpc; 369 threads_core_mask = CPU_MASK_NONE; 370 371 /* This implementation only supports power of 2 number of threads 372 * for simplicity and performance 373 */ 374 threads_shift = ilog2(tpc); 375 BUG_ON(tpc != (1 << threads_shift)); 376 377 for (i = 0; i < tpc; i++) 378 cpu_set(i, threads_core_mask); 379 380 printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n", 381 tpc, tpc > 1 ? "s" : ""); 382 printk(KERN_DEBUG " (thread shift is %d)\n", threads_shift); 383} 384 385 386/** 387 * setup_cpu_maps - initialize the following cpu maps: 388 * cpu_possible_map 389 * cpu_present_map 390 * 391 * Having the possible map set up early allows us to restrict allocations 392 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS. 393 * 394 * We do not initialize the online map here; cpus set their own bits in 395 * cpu_online_map as they come up. 396 * 397 * This function is valid only for Open Firmware systems. finish_device_tree 398 * must be called before using this. 399 * 400 * While we're here, we may as well set the "physical" cpu ids in the paca. 401 * 402 * NOTE: This must match the parsing done in early_init_dt_scan_cpus. 403 */ 404void __init smp_setup_cpu_maps(void) 405{ 406 struct device_node *dn = NULL; 407 int cpu = 0; 408 int nthreads = 1; 409 410 DBG("smp_setup_cpu_maps()\n"); 411 412 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) { 413 const int *intserv; 414 int j, len; 415 416 DBG(" * %s...\n", dn->full_name); 417 418 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", 419 &len); 420 if (intserv) { 421 nthreads = len / sizeof(int); 422 DBG(" ibm,ppc-interrupt-server#s -> %d threads\n", 423 nthreads); 424 } else { 425 DBG(" no ibm,ppc-interrupt-server#s -> 1 thread\n"); 426 intserv = of_get_property(dn, "reg", NULL); 427 if (!intserv) 428 intserv = &cpu; /* assume logical == phys */ 429 } 430 431 for (j = 0; j < nthreads && cpu < NR_CPUS; j++) { 432 DBG(" thread %d -> cpu %d (hard id %d)\n", 433 j, cpu, intserv[j]); 434 cpu_set(cpu, cpu_present_map); 435 set_hard_smp_processor_id(cpu, intserv[j]); 436 cpu_set(cpu, cpu_possible_map); 437 cpu++; 438 } 439 } 440 441 /* If no SMT supported, nthreads is forced to 1 */ 442 if (!cpu_has_feature(CPU_FTR_SMT)) { 443 DBG(" SMT disabled ! nthreads forced to 1\n"); 444 nthreads = 1; 445 } 446 447#ifdef CONFIG_PPC64 448 /* 449 * On pSeries LPAR, we need to know how many cpus 450 * could possibly be added to this partition. 451 */ 452 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) && 453 (dn = of_find_node_by_path("/rtas"))) { 454 int num_addr_cell, num_size_cell, maxcpus; 455 const unsigned int *ireg; 456 457 num_addr_cell = of_n_addr_cells(dn); 458 num_size_cell = of_n_size_cells(dn); 459 460 ireg = of_get_property(dn, "ibm,lrdr-capacity", NULL); 461 462 if (!ireg) 463 goto out; 464 465 maxcpus = ireg[num_addr_cell + num_size_cell]; 466 467 /* Double maxcpus for processors which have SMT capability */ 468 if (cpu_has_feature(CPU_FTR_SMT)) 469 maxcpus *= nthreads; 470 471 if (maxcpus > NR_CPUS) { 472 printk(KERN_WARNING 473 "Partition configured for %d cpus, " 474 "operating system maximum is %d.\n", 475 maxcpus, NR_CPUS); 476 maxcpus = NR_CPUS; 477 } else 478 printk(KERN_INFO "Partition configured for %d cpus.\n", 479 maxcpus); 480 481 for (cpu = 0; cpu < maxcpus; cpu++) 482 cpu_set(cpu, cpu_possible_map); 483 out: 484 of_node_put(dn); 485 } 486 vdso_data->processorCount = num_present_cpus(); 487#endif /* CONFIG_PPC64 */ 488 489 /* Initialize CPU <=> thread mapping/ 490 * 491 * WARNING: We assume that the number of threads is the same for 492 * every CPU in the system. If that is not the case, then some code 493 * here will have to be reworked 494 */ 495 cpu_init_thread_core_maps(nthreads); 496} 497#endif /* CONFIG_SMP */ 498 499#ifdef CONFIG_PCSPKR_PLATFORM 500static __init int add_pcspkr(void) 501{ 502 struct device_node *np; 503 struct platform_device *pd; 504 int ret; 505 506 np = of_find_compatible_node(NULL, NULL, "pnpPNP,100"); 507 of_node_put(np); 508 if (!np) 509 return -ENODEV; 510 511 pd = platform_device_alloc("pcspkr", -1); 512 if (!pd) 513 return -ENOMEM; 514 515 ret = platform_device_add(pd); 516 if (ret) 517 platform_device_put(pd); 518 519 return ret; 520} 521device_initcall(add_pcspkr); 522#endif /* CONFIG_PCSPKR_PLATFORM */ 523 524void probe_machine(void) 525{ 526 extern struct machdep_calls __machine_desc_start; 527 extern struct machdep_calls __machine_desc_end; 528 529 /* 530 * Iterate all ppc_md structures until we find the proper 531 * one for the current machine type 532 */ 533 DBG("Probing machine type ...\n"); 534 535 for (machine_id = &__machine_desc_start; 536 machine_id < &__machine_desc_end; 537 machine_id++) { 538 DBG(" %s ...", machine_id->name); 539 memcpy(&ppc_md, machine_id, sizeof(struct machdep_calls)); 540 if (ppc_md.probe()) { 541 DBG(" match !\n"); 542 break; 543 } 544 DBG("\n"); 545 } 546 /* What can we do if we didn't find ? */ 547 if (machine_id >= &__machine_desc_end) { 548 DBG("No suitable machine found !\n"); 549 for (;;); 550 } 551 552 printk(KERN_INFO "Using %s machine description\n", ppc_md.name); 553} 554 555/* Match a class of boards, not a specific device configuration. */ 556int check_legacy_ioport(unsigned long base_port) 557{ 558 struct device_node *parent, *np = NULL; 559 int ret = -ENODEV; 560 561 switch(base_port) { 562 case I8042_DATA_REG: 563 if (!(np = of_find_compatible_node(NULL, NULL, "pnpPNP,303"))) 564 np = of_find_compatible_node(NULL, NULL, "pnpPNP,f03"); 565 if (np) { 566 parent = of_get_parent(np); 567 of_node_put(np); 568 np = parent; 569 break; 570 } 571 np = of_find_node_by_type(NULL, "8042"); 572 /* Pegasos has no device_type on its 8042 node, look for the 573 * name instead */ 574 if (!np) 575 np = of_find_node_by_name(NULL, "8042"); 576 break; 577 case FDC_BASE: /* FDC1 */ 578 np = of_find_node_by_type(NULL, "fdc"); 579 break; 580#ifdef CONFIG_PPC_PREP 581 case _PIDXR: 582 case _PNPWRP: 583 case PNPBIOS_BASE: 584 /* implement me */ 585#endif 586 default: 587 /* ipmi is supposed to fail here */ 588 break; 589 } 590 if (!np) 591 return ret; 592 parent = of_get_parent(np); 593 if (parent) { 594 if (strcmp(parent->type, "isa") == 0) 595 ret = 0; 596 of_node_put(parent); 597 } 598 of_node_put(np); 599 return ret; 600} 601EXPORT_SYMBOL(check_legacy_ioport); 602 603static int ppc_panic_event(struct notifier_block *this, 604 unsigned long event, void *ptr) 605{ 606 ppc_md.panic(ptr); /* May not return */ 607 return NOTIFY_DONE; 608} 609 610static struct notifier_block ppc_panic_block = { 611 .notifier_call = ppc_panic_event, 612 .priority = INT_MIN /* may not return; must be done last */ 613}; 614 615void __init setup_panic(void) 616{ 617 atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block); 618} 619 620#ifdef CONFIG_CHECK_CACHE_COHERENCY 621/* 622 * For platforms that have configurable cache-coherency. This function 623 * checks that the cache coherency setting of the kernel matches the setting 624 * left by the firmware, as indicated in the device tree. Since a mismatch 625 * will eventually result in DMA failures, we print * and error and call 626 * BUG() in that case. 627 */ 628 629#ifdef CONFIG_NOT_COHERENT_CACHE 630#define KERNEL_COHERENCY 0 631#else 632#define KERNEL_COHERENCY 1 633#endif 634 635static int __init check_cache_coherency(void) 636{ 637 struct device_node *np; 638 const void *prop; 639 int devtree_coherency; 640 641 np = of_find_node_by_path("/"); 642 prop = of_get_property(np, "coherency-off", NULL); 643 of_node_put(np); 644 645 devtree_coherency = prop ? 0 : 1; 646 647 if (devtree_coherency != KERNEL_COHERENCY) { 648 printk(KERN_ERR 649 "kernel coherency:%s != device tree_coherency:%s\n", 650 KERNEL_COHERENCY ? "on" : "off", 651 devtree_coherency ? "on" : "off"); 652 BUG(); 653 } 654 655 return 0; 656} 657 658late_initcall(check_cache_coherency); 659#endif /* CONFIG_CHECK_CACHE_COHERENCY */ 660 661#ifdef CONFIG_DEBUG_FS 662struct dentry *powerpc_debugfs_root; 663 664static int powerpc_debugfs_init(void) 665{ 666 powerpc_debugfs_root = debugfs_create_dir("powerpc", NULL); 667 668 return powerpc_debugfs_root == NULL; 669} 670arch_initcall(powerpc_debugfs_init); 671#endif