Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.21-rc2 1051 lines 26 kB view raw
1/* 2 * Board setup routines for the Sky Computers HDPU Compute Blade. 3 * 4 * Written by Brian Waite <waite@skycomputers.com> 5 * 6 * Based on code done by - Mark A. Greer <mgreer@mvista.com> 7 * Rabeeh Khoury - rabeeh@galileo.co.il 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the 11 * Free Software Foundation; either version 2 of the License, or (at your 12 * option) any later version. 13 */ 14 15 16#include <linux/pci.h> 17#include <linux/delay.h> 18#include <linux/irq.h> 19#include <linux/ide.h> 20#include <linux/seq_file.h> 21#include <linux/platform_device.h> 22 23#include <linux/initrd.h> 24#include <linux/root_dev.h> 25#include <linux/smp.h> 26 27#include <asm/time.h> 28#include <asm/machdep.h> 29#include <asm/todc.h> 30#include <asm/mv64x60.h> 31#include <asm/ppcboot.h> 32#include <platforms/hdpu.h> 33#include <linux/mv643xx.h> 34#include <linux/hdpu_features.h> 35#include <linux/device.h> 36#include <linux/mtd/physmap.h> 37 38#define BOARD_VENDOR "Sky Computers" 39#define BOARD_MACHINE "HDPU-CB-A" 40 41bd_t ppcboot_bd; 42int ppcboot_bd_valid = 0; 43 44static mv64x60_handle_t bh; 45 46extern char cmd_line[]; 47 48unsigned long hdpu_find_end_of_memory(void); 49void hdpu_mpsc_progress(char *s, unsigned short hex); 50void hdpu_heartbeat(void); 51 52static void parse_bootinfo(unsigned long r3, 53 unsigned long r4, unsigned long r5, 54 unsigned long r6, unsigned long r7); 55static void hdpu_set_l1pe(void); 56static void hdpu_cpustate_set(unsigned char new_state); 57#ifdef CONFIG_SMP 58static DEFINE_SPINLOCK(timebase_lock); 59static unsigned int timebase_upper = 0, timebase_lower = 0; 60extern int smp_tb_synchronized; 61 62void __devinit hdpu_tben_give(void); 63void __devinit hdpu_tben_take(void); 64#endif 65 66static int __init 67hdpu_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin) 68{ 69 struct pci_controller *hose = pci_bus_to_hose(dev->bus->number); 70 71 if (hose->index == 0) { 72 static char pci_irq_table[][4] = { 73 {HDPU_PCI_0_IRQ, 0, 0, 0}, 74 {HDPU_PCI_0_IRQ, 0, 0, 0}, 75 }; 76 77 const long min_idsel = 1, max_idsel = 2, irqs_per_slot = 4; 78 return PCI_IRQ_TABLE_LOOKUP; 79 } else { 80 static char pci_irq_table[][4] = { 81 {HDPU_PCI_1_IRQ, 0, 0, 0}, 82 }; 83 84 const long min_idsel = 1, max_idsel = 1, irqs_per_slot = 4; 85 return PCI_IRQ_TABLE_LOOKUP; 86 } 87} 88 89static void __init hdpu_intr_setup(void) 90{ 91 mv64x60_write(&bh, MV64x60_GPP_IO_CNTL, 92 (1 | (1 << 2) | (1 << 3) | (1 << 4) | (1 << 5) | 93 (1 << 6) | (1 << 7) | (1 << 12) | (1 << 16) | 94 (1 << 18) | (1 << 19) | (1 << 20) | (1 << 21) | 95 (1 << 22) | (1 << 23) | (1 << 24) | (1 << 25) | 96 (1 << 26) | (1 << 27) | (1 << 28) | (1 << 29))); 97 98 /* XXXX Erranum FEr PCI-#8 */ 99 mv64x60_clr_bits(&bh, MV64x60_PCI0_CMD, (1 << 5) | (1 << 9)); 100 mv64x60_clr_bits(&bh, MV64x60_PCI1_CMD, (1 << 5) | (1 << 9)); 101 102 /* 103 * Dismiss and then enable interrupt on GPP interrupt cause 104 * for CPU #0 105 */ 106 mv64x60_write(&bh, MV64x60_GPP_INTR_CAUSE, ~((1 << 8) | (1 << 13))); 107 mv64x60_set_bits(&bh, MV64x60_GPP_INTR_MASK, (1 << 8) | (1 << 13)); 108 109 /* 110 * Dismiss and then enable interrupt on CPU #0 high cause reg 111 * BIT25 summarizes GPP interrupts 8-15 112 */ 113 mv64x60_set_bits(&bh, MV64360_IC_CPU0_INTR_MASK_HI, (1 << 25)); 114} 115 116static void __init hdpu_setup_peripherals(void) 117{ 118 unsigned int val; 119 120 mv64x60_set_32bit_window(&bh, MV64x60_CPU2BOOT_WIN, 121 HDPU_EMB_FLASH_BASE, HDPU_EMB_FLASH_SIZE, 0); 122 bh.ci->enable_window_32bit(&bh, MV64x60_CPU2BOOT_WIN); 123 124 mv64x60_set_32bit_window(&bh, MV64x60_CPU2DEV_0_WIN, 125 HDPU_TBEN_BASE, HDPU_TBEN_SIZE, 0); 126 bh.ci->enable_window_32bit(&bh, MV64x60_CPU2DEV_0_WIN); 127 128 mv64x60_set_32bit_window(&bh, MV64x60_CPU2DEV_1_WIN, 129 HDPU_NEXUS_ID_BASE, HDPU_NEXUS_ID_SIZE, 0); 130 bh.ci->enable_window_32bit(&bh, MV64x60_CPU2DEV_1_WIN); 131 132 mv64x60_set_32bit_window(&bh, MV64x60_CPU2SRAM_WIN, 133 HDPU_INTERNAL_SRAM_BASE, 134 HDPU_INTERNAL_SRAM_SIZE, 0); 135 bh.ci->enable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN); 136 137 bh.ci->disable_window_32bit(&bh, MV64x60_ENET2MEM_4_WIN); 138 mv64x60_set_32bit_window(&bh, MV64x60_ENET2MEM_4_WIN, 0, 0, 0); 139 140 mv64x60_clr_bits(&bh, MV64x60_PCI0_PCI_DECODE_CNTL, (1 << 3)); 141 mv64x60_clr_bits(&bh, MV64x60_PCI1_PCI_DECODE_CNTL, (1 << 3)); 142 mv64x60_clr_bits(&bh, MV64x60_TIMR_CNTR_0_3_CNTL, 143 ((1 << 0) | (1 << 8) | (1 << 16) | (1 << 24))); 144 145 /* Enable pipelining */ 146 mv64x60_set_bits(&bh, MV64x60_CPU_CONFIG, (1 << 13)); 147 /* Enable Snoop Pipelineing */ 148 mv64x60_set_bits(&bh, MV64360_D_UNIT_CONTROL_HIGH, (1 << 24)); 149 150 /* 151 * Change DRAM read buffer assignment. 152 * Assign read buffer 0 dedicated only for CPU, 153 * and the rest read buffer 1. 154 */ 155 val = mv64x60_read(&bh, MV64360_SDRAM_CONFIG); 156 val = val & 0x03ffffff; 157 val = val | 0xf8000000; 158 mv64x60_write(&bh, MV64360_SDRAM_CONFIG, val); 159 160 /* 161 * Configure internal SRAM - 162 * Cache coherent write back, if CONFIG_MV64360_SRAM_CACHE_COHERENT set 163 * Parity enabled. 164 * Parity error propagation 165 * Arbitration not parked for CPU only 166 * Other bits are reserved. 167 */ 168#ifdef CONFIG_MV64360_SRAM_CACHE_COHERENT 169 mv64x60_write(&bh, MV64360_SRAM_CONFIG, 0x001600b2); 170#else 171 mv64x60_write(&bh, MV64360_SRAM_CONFIG, 0x001600b0); 172#endif 173 174 hdpu_intr_setup(); 175} 176 177static void __init hdpu_setup_bridge(void) 178{ 179 struct mv64x60_setup_info si; 180 int i; 181 182 memset(&si, 0, sizeof(si)); 183 184 si.phys_reg_base = HDPU_BRIDGE_REG_BASE; 185 si.pci_0.enable_bus = 1; 186 si.pci_0.pci_io.cpu_base = HDPU_PCI0_IO_START_PROC_ADDR; 187 si.pci_0.pci_io.pci_base_hi = 0; 188 si.pci_0.pci_io.pci_base_lo = HDPU_PCI0_IO_START_PCI_ADDR; 189 si.pci_0.pci_io.size = HDPU_PCI0_IO_SIZE; 190 si.pci_0.pci_io.swap = MV64x60_CPU2PCI_SWAP_NONE; 191 si.pci_0.pci_mem[0].cpu_base = HDPU_PCI0_MEM_START_PROC_ADDR; 192 si.pci_0.pci_mem[0].pci_base_hi = HDPU_PCI0_MEM_START_PCI_HI_ADDR; 193 si.pci_0.pci_mem[0].pci_base_lo = HDPU_PCI0_MEM_START_PCI_LO_ADDR; 194 si.pci_0.pci_mem[0].size = HDPU_PCI0_MEM_SIZE; 195 si.pci_0.pci_mem[0].swap = MV64x60_CPU2PCI_SWAP_NONE; 196 si.pci_0.pci_cmd_bits = 0; 197 si.pci_0.latency_timer = 0x80; 198 199 si.pci_1.enable_bus = 1; 200 si.pci_1.pci_io.cpu_base = HDPU_PCI1_IO_START_PROC_ADDR; 201 si.pci_1.pci_io.pci_base_hi = 0; 202 si.pci_1.pci_io.pci_base_lo = HDPU_PCI1_IO_START_PCI_ADDR; 203 si.pci_1.pci_io.size = HDPU_PCI1_IO_SIZE; 204 si.pci_1.pci_io.swap = MV64x60_CPU2PCI_SWAP_NONE; 205 si.pci_1.pci_mem[0].cpu_base = HDPU_PCI1_MEM_START_PROC_ADDR; 206 si.pci_1.pci_mem[0].pci_base_hi = HDPU_PCI1_MEM_START_PCI_HI_ADDR; 207 si.pci_1.pci_mem[0].pci_base_lo = HDPU_PCI1_MEM_START_PCI_LO_ADDR; 208 si.pci_1.pci_mem[0].size = HDPU_PCI1_MEM_SIZE; 209 si.pci_1.pci_mem[0].swap = MV64x60_CPU2PCI_SWAP_NONE; 210 si.pci_1.pci_cmd_bits = 0; 211 si.pci_1.latency_timer = 0x80; 212 213 for (i = 0; i < MV64x60_CPU2MEM_WINDOWS; i++) { 214#if defined(CONFIG_NOT_COHERENT_CACHE) 215 si.cpu_prot_options[i] = 0; 216 si.enet_options[i] = MV64360_ENET2MEM_SNOOP_NONE; 217 si.mpsc_options[i] = MV64360_MPSC2MEM_SNOOP_NONE; 218 si.idma_options[i] = MV64360_IDMA2MEM_SNOOP_NONE; 219 220 si.pci_1.acc_cntl_options[i] = 221 MV64360_PCI_ACC_CNTL_SNOOP_NONE | 222 MV64360_PCI_ACC_CNTL_SWAP_NONE | 223 MV64360_PCI_ACC_CNTL_MBURST_128_BYTES | 224 MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES; 225 226 si.pci_0.acc_cntl_options[i] = 227 MV64360_PCI_ACC_CNTL_SNOOP_NONE | 228 MV64360_PCI_ACC_CNTL_SWAP_NONE | 229 MV64360_PCI_ACC_CNTL_MBURST_128_BYTES | 230 MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES; 231 232#else 233 si.cpu_prot_options[i] = 0; 234 si.enet_options[i] = MV64360_ENET2MEM_SNOOP_WB; /* errata */ 235 si.mpsc_options[i] = MV64360_MPSC2MEM_SNOOP_WB; /* errata */ 236 si.idma_options[i] = MV64360_IDMA2MEM_SNOOP_WB; /* errata */ 237 238 si.pci_0.acc_cntl_options[i] = 239 MV64360_PCI_ACC_CNTL_SNOOP_WB | 240 MV64360_PCI_ACC_CNTL_SWAP_NONE | 241 MV64360_PCI_ACC_CNTL_MBURST_32_BYTES | 242 MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES; 243 244 si.pci_1.acc_cntl_options[i] = 245 MV64360_PCI_ACC_CNTL_SNOOP_WB | 246 MV64360_PCI_ACC_CNTL_SWAP_NONE | 247 MV64360_PCI_ACC_CNTL_MBURST_32_BYTES | 248 MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES; 249#endif 250 } 251 252 hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_INIT_PCI); 253 254 /* Lookup PCI host bridges */ 255 mv64x60_init(&bh, &si); 256 pci_dram_offset = 0; /* System mem at same addr on PCI & cpu bus */ 257 ppc_md.pci_swizzle = common_swizzle; 258 ppc_md.pci_map_irq = hdpu_map_irq; 259 260 mv64x60_set_bus(&bh, 0, 0); 261 bh.hose_a->first_busno = 0; 262 bh.hose_a->last_busno = 0xff; 263 bh.hose_a->last_busno = pciauto_bus_scan(bh.hose_a, 0); 264 265 bh.hose_b->first_busno = bh.hose_a->last_busno + 1; 266 mv64x60_set_bus(&bh, 1, bh.hose_b->first_busno); 267 bh.hose_b->last_busno = 0xff; 268 bh.hose_b->last_busno = pciauto_bus_scan(bh.hose_b, 269 bh.hose_b->first_busno); 270 271 ppc_md.pci_exclude_device = mv64x60_pci_exclude_device; 272 273 hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_INIT_REG); 274 /* 275 * Enabling of PCI internal-vs-external arbitration 276 * is a platform- and errata-dependent decision. 277 */ 278 return; 279} 280 281#if defined(CONFIG_SERIAL_MPSC_CONSOLE) 282static void __init hdpu_early_serial_map(void) 283{ 284#ifdef CONFIG_KGDB 285 static char first_time = 1; 286 287#if defined(CONFIG_KGDB_TTYS0) 288#define KGDB_PORT 0 289#elif defined(CONFIG_KGDB_TTYS1) 290#define KGDB_PORT 1 291#else 292#error "Invalid kgdb_tty port" 293#endif 294 295 if (first_time) { 296 gt_early_mpsc_init(KGDB_PORT, 297 B9600 | CS8 | CREAD | HUPCL | CLOCAL); 298 first_time = 0; 299 } 300 301 return; 302#endif 303} 304#endif 305 306static void hdpu_init2(void) 307{ 308 return; 309} 310 311#if defined(CONFIG_MV643XX_ETH) 312static void __init hdpu_fixup_eth_pdata(struct platform_device *pd) 313{ 314 315 struct mv643xx_eth_platform_data *eth_pd; 316 eth_pd = pd->dev.platform_data; 317 318 eth_pd->force_phy_addr = 1; 319 eth_pd->phy_addr = pd->id; 320 eth_pd->speed = SPEED_100; 321 eth_pd->duplex = DUPLEX_FULL; 322 eth_pd->tx_queue_size = 400; 323 eth_pd->rx_queue_size = 800; 324} 325#endif 326 327static void __init hdpu_fixup_mpsc_pdata(struct platform_device *pd) 328{ 329 330 struct mpsc_pdata *pdata; 331 332 pdata = (struct mpsc_pdata *)pd->dev.platform_data; 333 334 pdata->max_idle = 40; 335 if (ppcboot_bd_valid) 336 pdata->default_baud = ppcboot_bd.bi_baudrate; 337 else 338 pdata->default_baud = HDPU_DEFAULT_BAUD; 339 pdata->brg_clk_src = HDPU_MPSC_CLK_SRC; 340 pdata->brg_clk_freq = HDPU_MPSC_CLK_FREQ; 341} 342 343#if defined(CONFIG_HDPU_FEATURES) 344static void __init hdpu_fixup_cpustate_pdata(struct platform_device *pd) 345{ 346 struct platform_device *pds[1]; 347 pds[0] = pd; 348 mv64x60_pd_fixup(&bh, pds, 1); 349} 350#endif 351 352static int hdpu_platform_notify(struct device *dev) 353{ 354 static struct { 355 char *bus_id; 356 void ((*rtn) (struct platform_device * pdev)); 357 } dev_map[] = { 358 { 359 MPSC_CTLR_NAME ".0", hdpu_fixup_mpsc_pdata}, 360#if defined(CONFIG_MV643XX_ETH) 361 { 362 MV643XX_ETH_NAME ".0", hdpu_fixup_eth_pdata}, 363#endif 364#if defined(CONFIG_HDPU_FEATURES) 365 { 366 HDPU_CPUSTATE_NAME ".0", hdpu_fixup_cpustate_pdata}, 367#endif 368 }; 369 struct platform_device *pdev; 370 int i; 371 372 if (dev && dev->bus_id) 373 for (i = 0; i < ARRAY_SIZE(dev_map); i++) 374 if (!strncmp(dev->bus_id, dev_map[i].bus_id, 375 BUS_ID_SIZE)) { 376 377 pdev = container_of(dev, 378 struct platform_device, 379 dev); 380 dev_map[i].rtn(pdev); 381 } 382 383 return 0; 384} 385 386static void __init hdpu_setup_arch(void) 387{ 388 if (ppc_md.progress) 389 ppc_md.progress("hdpu_setup_arch: enter", 0); 390#ifdef CONFIG_BLK_DEV_INITRD 391 if (initrd_start) 392 ROOT_DEV = Root_RAM0; 393 else 394#endif 395#ifdef CONFIG_ROOT_NFS 396 ROOT_DEV = Root_NFS; 397#else 398 ROOT_DEV = Root_SDA2; 399#endif 400 401 ppc_md.heartbeat = hdpu_heartbeat; 402 403 ppc_md.heartbeat_reset = HZ; 404 ppc_md.heartbeat_count = 1; 405 406 if (ppc_md.progress) 407 ppc_md.progress("hdpu_setup_arch: Enabling L2 cache", 0); 408 409 /* Enable L1 Parity Bits */ 410 hdpu_set_l1pe(); 411 412 /* Enable L2 and L3 caches (if 745x) */ 413 _set_L2CR(0x80080000); 414 415 if (ppc_md.progress) 416 ppc_md.progress("hdpu_setup_arch: enter", 0); 417 418 hdpu_setup_bridge(); 419 420 hdpu_setup_peripherals(); 421 422#ifdef CONFIG_SERIAL_MPSC_CONSOLE 423 hdpu_early_serial_map(); 424#endif 425 426 printk("SKY HDPU Compute Blade \n"); 427 428 if (ppc_md.progress) 429 ppc_md.progress("hdpu_setup_arch: exit", 0); 430 431 hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_OK); 432 return; 433} 434static void __init hdpu_init_irq(void) 435{ 436 mv64360_init_irq(); 437} 438 439static void __init hdpu_set_l1pe() 440{ 441 unsigned long ictrl; 442 asm volatile ("mfspr %0, 1011":"=r" (ictrl):); 443 ictrl |= ICTRL_EICE | ICTRL_EDC | ICTRL_EICP; 444 asm volatile ("mtspr 1011, %0"::"r" (ictrl)); 445} 446 447/* 448 * Set BAT 1 to map 0xf1000000 to end of physical memory space. 449 */ 450static __inline__ void hdpu_set_bat(void) 451{ 452 mb(); 453 mtspr(SPRN_DBAT1U, 0xf10001fe); 454 mtspr(SPRN_DBAT1L, 0xf100002a); 455 mb(); 456 457 return; 458} 459 460unsigned long __init hdpu_find_end_of_memory(void) 461{ 462 return mv64x60_get_mem_size(CONFIG_MV64X60_NEW_BASE, 463 MV64x60_TYPE_MV64360); 464} 465 466static void hdpu_reset_board(void) 467{ 468 volatile int infinite = 1; 469 470 hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_RESET); 471 472 local_irq_disable(); 473 474 /* Clear all the LEDs */ 475 mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, ((1 << 4) | 476 (1 << 5) | (1 << 6))); 477 478 /* disable and invalidate the L2 cache */ 479 _set_L2CR(0); 480 _set_L2CR(0x200000); 481 482 /* flush and disable L1 I/D cache */ 483 __asm__ __volatile__ 484 ("\n" 485 "mfspr 3,1008\n" 486 "ori 5,5,0xcc00\n" 487 "ori 4,3,0xc00\n" 488 "andc 5,3,5\n" 489 "sync\n" 490 "mtspr 1008,4\n" 491 "isync\n" "sync\n" "mtspr 1008,5\n" "isync\n" "sync\n"); 492 493 /* Hit the reset bit */ 494 mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, (1 << 3)); 495 496 while (infinite) 497 infinite = infinite; 498 499 return; 500} 501 502static void hdpu_restart(char *cmd) 503{ 504 volatile ulong i = 10000000; 505 506 hdpu_reset_board(); 507 508 while (i-- > 0) ; 509 panic("restart failed\n"); 510} 511 512static void hdpu_halt(void) 513{ 514 local_irq_disable(); 515 516 hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_HALT); 517 518 /* Clear all the LEDs */ 519 mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, ((1 << 4) | (1 << 5) | 520 (1 << 6))); 521 while (1) ; 522 /* NOTREACHED */ 523} 524 525static void hdpu_power_off(void) 526{ 527 hdpu_halt(); 528 /* NOTREACHED */ 529} 530 531static int hdpu_show_cpuinfo(struct seq_file *m) 532{ 533 uint pvid; 534 535 pvid = mfspr(SPRN_PVR); 536 seq_printf(m, "vendor\t\t: Sky Computers\n"); 537 seq_printf(m, "machine\t\t: HDPU Compute Blade\n"); 538 seq_printf(m, "PVID\t\t: 0x%x, vendor: %s\n", 539 pvid, (pvid & (1 << 15) ? "IBM" : "Motorola")); 540 541 return 0; 542} 543 544static void __init hdpu_calibrate_decr(void) 545{ 546 ulong freq; 547 548 if (ppcboot_bd_valid) 549 freq = ppcboot_bd.bi_busfreq / 4; 550 else 551 freq = 133000000; 552 553 printk("time_init: decrementer frequency = %lu.%.6lu MHz\n", 554 freq / 1000000, freq % 1000000); 555 556 tb_ticks_per_jiffy = freq / HZ; 557 tb_to_us = mulhwu_scale_factor(freq, 1000000); 558 559 return; 560} 561 562static void parse_bootinfo(unsigned long r3, 563 unsigned long r4, unsigned long r5, 564 unsigned long r6, unsigned long r7) 565{ 566 bd_t *bd = NULL; 567 char *cmdline_start = NULL; 568 int cmdline_len = 0; 569 570 if (r3) { 571 if ((r3 & 0xf0000000) == 0) 572 r3 += KERNELBASE; 573 if ((r3 & 0xf0000000) == KERNELBASE) { 574 bd = (void *)r3; 575 576 memcpy(&ppcboot_bd, bd, sizeof(ppcboot_bd)); 577 ppcboot_bd_valid = 1; 578 } 579 } 580#ifdef CONFIG_BLK_DEV_INITRD 581 if (r4 && r5 && r5 > r4) { 582 if ((r4 & 0xf0000000) == 0) 583 r4 += KERNELBASE; 584 if ((r5 & 0xf0000000) == 0) 585 r5 += KERNELBASE; 586 if ((r4 & 0xf0000000) == KERNELBASE) { 587 initrd_start = r4; 588 initrd_end = r5; 589 initrd_below_start_ok = 1; 590 } 591 } 592#endif /* CONFIG_BLK_DEV_INITRD */ 593 594 if (r6 && r7 && r7 > r6) { 595 if ((r6 & 0xf0000000) == 0) 596 r6 += KERNELBASE; 597 if ((r7 & 0xf0000000) == 0) 598 r7 += KERNELBASE; 599 if ((r6 & 0xf0000000) == KERNELBASE) { 600 cmdline_start = (void *)r6; 601 cmdline_len = (r7 - r6); 602 strncpy(cmd_line, cmdline_start, cmdline_len); 603 } 604 } 605} 606 607#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) 608static void 609hdpu_ide_request_region(ide_ioreg_t from, unsigned int extent, const char *name) 610{ 611 request_region(from, extent, name); 612 return; 613} 614 615static void hdpu_ide_release_region(ide_ioreg_t from, unsigned int extent) 616{ 617 release_region(from, extent); 618 return; 619} 620 621static void __init 622hdpu_ide_pci_init_hwif_ports(hw_regs_t * hw, ide_ioreg_t data_port, 623 ide_ioreg_t ctrl_port, int *irq) 624{ 625 struct pci_dev *dev; 626 627 pci_for_each_dev(dev) { 628 if (((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) || 629 ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)) { 630 hw->irq = dev->irq; 631 632 if (irq != NULL) { 633 *irq = dev->irq; 634 } 635 } 636 } 637 638 return; 639} 640#endif 641 642void hdpu_heartbeat(void) 643{ 644 if (mv64x60_read(&bh, MV64x60_GPP_VALUE) & (1 << 5)) 645 mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, (1 << 5)); 646 else 647 mv64x60_write(&bh, MV64x60_GPP_VALUE_SET, (1 << 5)); 648 649 ppc_md.heartbeat_count = ppc_md.heartbeat_reset; 650 651} 652 653static void __init hdpu_map_io(void) 654{ 655 io_block_mapping(0xf1000000, 0xf1000000, 0x20000, _PAGE_IO); 656} 657 658#ifdef CONFIG_SMP 659char hdpu_smp0[] = "SMP Cpu #0"; 660char hdpu_smp1[] = "SMP Cpu #1"; 661 662static irqreturn_t hdpu_smp_cpu0_int_handler(int irq, void *dev_id) 663{ 664 volatile unsigned int doorbell; 665 666 doorbell = mv64x60_read(&bh, MV64360_CPU0_DOORBELL); 667 668 /* Ack the doorbell interrupts */ 669 mv64x60_write(&bh, MV64360_CPU0_DOORBELL_CLR, doorbell); 670 671 if (doorbell & 1) { 672 smp_message_recv(0); 673 } 674 if (doorbell & 2) { 675 smp_message_recv(1); 676 } 677 if (doorbell & 4) { 678 smp_message_recv(2); 679 } 680 if (doorbell & 8) { 681 smp_message_recv(3); 682 } 683 return IRQ_HANDLED; 684} 685 686static irqreturn_t hdpu_smp_cpu1_int_handler(int irq, void *dev_id) 687{ 688 volatile unsigned int doorbell; 689 690 doorbell = mv64x60_read(&bh, MV64360_CPU1_DOORBELL); 691 692 /* Ack the doorbell interrupts */ 693 mv64x60_write(&bh, MV64360_CPU1_DOORBELL_CLR, doorbell); 694 695 if (doorbell & 1) { 696 smp_message_recv(0); 697 } 698 if (doorbell & 2) { 699 smp_message_recv(1); 700 } 701 if (doorbell & 4) { 702 smp_message_recv(2); 703 } 704 if (doorbell & 8) { 705 smp_message_recv(3); 706 } 707 return IRQ_HANDLED; 708} 709 710static void smp_hdpu_CPU_two(void) 711{ 712 __asm__ __volatile__ 713 ("\n" 714 "lis 3,0x0000\n" 715 "ori 3,3,0x00c0\n" 716 "mtspr 26, 3\n" "li 4,0\n" "mtspr 27,4\n" "rfi"); 717 718} 719 720static int smp_hdpu_probe(void) 721{ 722 int *cpu_count_reg; 723 int num_cpus = 0; 724 725 cpu_count_reg = ioremap(HDPU_NEXUS_ID_BASE, HDPU_NEXUS_ID_SIZE); 726 if (cpu_count_reg) { 727 num_cpus = (*cpu_count_reg >> 20) & 0x3; 728 iounmap(cpu_count_reg); 729 } 730 731 /* Validate the bits in the CPLD. If we could not map the reg, return 2. 732 * If the register reported 0 or 3, return 2. 733 * Older CPLD revisions set these bits to all ones (val = 3). 734 */ 735 if ((num_cpus < 1) || (num_cpus > 2)) { 736 printk 737 ("Unable to determine the number of processors %d . deafulting to 2.\n", 738 num_cpus); 739 num_cpus = 2; 740 } 741 return num_cpus; 742} 743 744static void 745smp_hdpu_message_pass(int target, int msg) 746{ 747 if (msg > 0x3) { 748 printk("SMP %d: smp_message_pass: unknown msg %d\n", 749 smp_processor_id(), msg); 750 return; 751 } 752 switch (target) { 753 case MSG_ALL: 754 mv64x60_write(&bh, MV64360_CPU0_DOORBELL, 1 << msg); 755 mv64x60_write(&bh, MV64360_CPU1_DOORBELL, 1 << msg); 756 break; 757 case MSG_ALL_BUT_SELF: 758 if (smp_processor_id()) 759 mv64x60_write(&bh, MV64360_CPU0_DOORBELL, 1 << msg); 760 else 761 mv64x60_write(&bh, MV64360_CPU1_DOORBELL, 1 << msg); 762 break; 763 default: 764 if (target == 0) 765 mv64x60_write(&bh, MV64360_CPU0_DOORBELL, 1 << msg); 766 else 767 mv64x60_write(&bh, MV64360_CPU1_DOORBELL, 1 << msg); 768 break; 769 } 770} 771 772static void smp_hdpu_kick_cpu(int nr) 773{ 774 volatile unsigned int *bootaddr; 775 776 if (ppc_md.progress) 777 ppc_md.progress("smp_hdpu_kick_cpu", 0); 778 779 hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_CPU1_KICK); 780 781 /* Disable BootCS. Must also reduce the windows size to zero. */ 782 bh.ci->disable_window_32bit(&bh, MV64x60_CPU2BOOT_WIN); 783 mv64x60_set_32bit_window(&bh, MV64x60_CPU2BOOT_WIN, 0, 0, 0); 784 785 bootaddr = ioremap(HDPU_INTERNAL_SRAM_BASE, HDPU_INTERNAL_SRAM_SIZE); 786 if (!bootaddr) { 787 if (ppc_md.progress) 788 ppc_md.progress("smp_hdpu_kick_cpu: ioremap failed", 0); 789 return; 790 } 791 792 memcpy((void *)(bootaddr + 0x40), (void *)&smp_hdpu_CPU_two, 0x20); 793 794 /* map SRAM to 0xfff00000 */ 795 bh.ci->disable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN); 796 797 mv64x60_set_32bit_window(&bh, MV64x60_CPU2SRAM_WIN, 798 0xfff00000, HDPU_INTERNAL_SRAM_SIZE, 0); 799 bh.ci->enable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN); 800 801 /* Enable CPU1 arbitration */ 802 mv64x60_clr_bits(&bh, MV64x60_CPU_MASTER_CNTL, (1 << 9)); 803 804 /* 805 * Wait 100mSecond until other CPU has reached __secondary_start. 806 * When it reaches, it is permittable to rever the SRAM mapping etc... 807 */ 808 mdelay(100); 809 *(unsigned long *)KERNELBASE = nr; 810 asm volatile ("dcbf 0,%0"::"r" (KERNELBASE):"memory"); 811 812 iounmap(bootaddr); 813 814 /* Set up window for internal sram (256KByte insize) */ 815 bh.ci->disable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN); 816 mv64x60_set_32bit_window(&bh, MV64x60_CPU2SRAM_WIN, 817 HDPU_INTERNAL_SRAM_BASE, 818 HDPU_INTERNAL_SRAM_SIZE, 0); 819 bh.ci->enable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN); 820 /* 821 * Set up windows for embedded FLASH (using boot CS window). 822 */ 823 824 bh.ci->disable_window_32bit(&bh, MV64x60_CPU2BOOT_WIN); 825 mv64x60_set_32bit_window(&bh, MV64x60_CPU2BOOT_WIN, 826 HDPU_EMB_FLASH_BASE, HDPU_EMB_FLASH_SIZE, 0); 827 bh.ci->enable_window_32bit(&bh, MV64x60_CPU2BOOT_WIN); 828} 829 830static void smp_hdpu_setup_cpu(int cpu_nr) 831{ 832 if (cpu_nr == 0) { 833 if (ppc_md.progress) 834 ppc_md.progress("smp_hdpu_setup_cpu 0", 0); 835 mv64x60_write(&bh, MV64360_CPU0_DOORBELL_CLR, 0xff); 836 mv64x60_write(&bh, MV64360_CPU0_DOORBELL_MASK, 0xff); 837 request_irq(60, hdpu_smp_cpu0_int_handler, 838 IRQF_DISABLED, hdpu_smp0, 0); 839 } 840 841 if (cpu_nr == 1) { 842 if (ppc_md.progress) 843 ppc_md.progress("smp_hdpu_setup_cpu 1", 0); 844 845 hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | 846 CPUSTATE_KERNEL_CPU1_OK); 847 848 /* Enable L1 Parity Bits */ 849 hdpu_set_l1pe(); 850 851 /* Enable L2 cache */ 852 _set_L2CR(0); 853 _set_L2CR(0x80080000); 854 855 mv64x60_write(&bh, MV64360_CPU1_DOORBELL_CLR, 0x0); 856 mv64x60_write(&bh, MV64360_CPU1_DOORBELL_MASK, 0xff); 857 request_irq(28, hdpu_smp_cpu1_int_handler, 858 IRQF_DISABLED, hdpu_smp1, 0); 859 } 860 861} 862 863void __devinit hdpu_tben_give() 864{ 865 volatile unsigned long *val = 0; 866 867 /* By writing 0 to the TBEN_BASE, the timebases is frozen */ 868 val = ioremap(HDPU_TBEN_BASE, 4); 869 *val = 0; 870 mb(); 871 872 spin_lock(&timebase_lock); 873 timebase_upper = get_tbu(); 874 timebase_lower = get_tbl(); 875 spin_unlock(&timebase_lock); 876 877 while (timebase_upper || timebase_lower) 878 barrier(); 879 880 /* By writing 1 to the TBEN_BASE, the timebases is thawed */ 881 *val = 1; 882 mb(); 883 884 iounmap(val); 885 886} 887 888void __devinit hdpu_tben_take() 889{ 890 while (!(timebase_upper || timebase_lower)) 891 barrier(); 892 893 spin_lock(&timebase_lock); 894 set_tb(timebase_upper, timebase_lower); 895 timebase_upper = 0; 896 timebase_lower = 0; 897 spin_unlock(&timebase_lock); 898} 899 900static struct smp_ops_t hdpu_smp_ops = { 901 .message_pass = smp_hdpu_message_pass, 902 .probe = smp_hdpu_probe, 903 .kick_cpu = smp_hdpu_kick_cpu, 904 .setup_cpu = smp_hdpu_setup_cpu, 905 .give_timebase = hdpu_tben_give, 906 .take_timebase = hdpu_tben_take, 907}; 908#endif /* CONFIG_SMP */ 909 910void __init 911platform_init(unsigned long r3, unsigned long r4, unsigned long r5, 912 unsigned long r6, unsigned long r7) 913{ 914 parse_bootinfo(r3, r4, r5, r6, r7); 915 916 isa_mem_base = 0; 917 918 ppc_md.setup_arch = hdpu_setup_arch; 919 ppc_md.init = hdpu_init2; 920 ppc_md.show_cpuinfo = hdpu_show_cpuinfo; 921 ppc_md.init_IRQ = hdpu_init_irq; 922 ppc_md.get_irq = mv64360_get_irq; 923 ppc_md.restart = hdpu_restart; 924 ppc_md.power_off = hdpu_power_off; 925 ppc_md.halt = hdpu_halt; 926 ppc_md.find_end_of_memory = hdpu_find_end_of_memory; 927 ppc_md.calibrate_decr = hdpu_calibrate_decr; 928 ppc_md.setup_io_mappings = hdpu_map_io; 929 930 bh.p_base = CONFIG_MV64X60_NEW_BASE; 931 bh.v_base = (unsigned long *)bh.p_base; 932 933 hdpu_set_bat(); 934 935#if defined(CONFIG_SERIAL_TEXT_DEBUG) 936 ppc_md.progress = hdpu_mpsc_progress; /* embedded UART */ 937 mv64x60_progress_init(bh.p_base); 938#endif /* CONFIG_SERIAL_TEXT_DEBUG */ 939 940#ifdef CONFIG_SMP 941 smp_ops = &hdpu_smp_ops; 942#endif /* CONFIG_SMP */ 943 944#if defined(CONFIG_SERIAL_MPSC) || defined(CONFIG_MV643XX_ETH) 945 platform_notify = hdpu_platform_notify; 946#endif 947 return; 948} 949 950#if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(CONFIG_SERIAL_MPSC_CONSOLE) 951/* SMP safe version of the serial text debug routine. Uses Semaphore 0 */ 952void hdpu_mpsc_progress(char *s, unsigned short hex) 953{ 954 while (mv64x60_read(&bh, MV64360_WHO_AM_I) != 955 mv64x60_read(&bh, MV64360_SEMAPHORE_0)) { 956 } 957 mv64x60_mpsc_progress(s, hex); 958 mv64x60_write(&bh, MV64360_SEMAPHORE_0, 0xff); 959} 960#endif 961 962static void hdpu_cpustate_set(unsigned char new_state) 963{ 964 unsigned int state = (new_state << 21); 965 mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, (0xff << 21)); 966 mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, state); 967} 968 969#ifdef CONFIG_MTD_PHYSMAP 970static struct mtd_partition hdpu_partitions[] = { 971 { 972 .name = "Root FS", 973 .size = 0x03400000, 974 .offset = 0, 975 .mask_flags = 0, 976 },{ 977 .name = "User FS", 978 .size = 0x00800000, 979 .offset = 0x03400000, 980 .mask_flags = 0, 981 },{ 982 .name = "Kernel Image", 983 .size = 0x002C0000, 984 .offset = 0x03C00000, 985 .mask_flags = 0, 986 },{ 987 .name = "bootEnv", 988 .size = 0x00040000, 989 .offset = 0x03EC0000, 990 .mask_flags = 0, 991 },{ 992 .name = "bootROM", 993 .size = 0x00100000, 994 .offset = 0x03F00000, 995 .mask_flags = 0, 996 } 997}; 998 999static int __init hdpu_setup_mtd(void) 1000{ 1001 1002 physmap_set_partitions(hdpu_partitions, 5); 1003 return 0; 1004} 1005 1006arch_initcall(hdpu_setup_mtd); 1007#endif 1008 1009#ifdef CONFIG_HDPU_FEATURES 1010 1011static struct resource hdpu_cpustate_resources[] = { 1012 [0] = { 1013 .name = "addr base", 1014 .start = MV64x60_GPP_VALUE_SET, 1015 .end = MV64x60_GPP_VALUE_CLR + 1, 1016 .flags = IORESOURCE_MEM, 1017 }, 1018}; 1019 1020static struct resource hdpu_nexus_resources[] = { 1021 [0] = { 1022 .name = "nexus register", 1023 .start = HDPU_NEXUS_ID_BASE, 1024 .end = HDPU_NEXUS_ID_BASE + HDPU_NEXUS_ID_SIZE, 1025 .flags = IORESOURCE_MEM, 1026 }, 1027}; 1028 1029static struct platform_device hdpu_cpustate_device = { 1030 .name = HDPU_CPUSTATE_NAME, 1031 .id = 0, 1032 .num_resources = ARRAY_SIZE(hdpu_cpustate_resources), 1033 .resource = hdpu_cpustate_resources, 1034}; 1035 1036static struct platform_device hdpu_nexus_device = { 1037 .name = HDPU_NEXUS_NAME, 1038 .id = 0, 1039 .num_resources = ARRAY_SIZE(hdpu_nexus_resources), 1040 .resource = hdpu_nexus_resources, 1041}; 1042 1043static int __init hdpu_add_pds(void) 1044{ 1045 platform_device_register(&hdpu_cpustate_device); 1046 platform_device_register(&hdpu_nexus_device); 1047 return 0; 1048} 1049 1050arch_initcall(hdpu_add_pds); 1051#endif