Merge HEAD from master.kernel.org:/pub/scm/linux/kernel/git/paulus/ppc64-2.6

+2245 -2061
-203
arch/ppc/boot/utils/addRamDisk.c
··· 1 - #include <stdio.h> 2 - #include <stdlib.h> 3 - #include <netinet/in.h> 4 - #include <unistd.h> 5 - #include <sys/types.h> 6 - #include <sys/stat.h> 7 - #include <string.h> 8 - 9 - #define ElfHeaderSize (64 * 1024) 10 - #define ElfPages (ElfHeaderSize / 4096) 11 - #define KERNELBASE (0xc0000000) 12 - 13 - void get4k(FILE *file, char *buf ) 14 - { 15 - unsigned j; 16 - unsigned num = fread(buf, 1, 4096, file); 17 - for ( j=num; j<4096; ++j ) 18 - buf[j] = 0; 19 - } 20 - 21 - void put4k(FILE *file, char *buf ) 22 - { 23 - fwrite(buf, 1, 4096, file); 24 - } 25 - 26 - void death(const char *msg, FILE *fdesc, const char *fname) 27 - { 28 - printf(msg); 29 - fclose(fdesc); 30 - unlink(fname); 31 - exit(1); 32 - } 33 - 34 - int main(int argc, char **argv) 35 - { 36 - char inbuf[4096]; 37 - FILE *ramDisk = NULL; 38 - FILE *inputVmlinux = NULL; 39 - FILE *outputVmlinux = NULL; 40 - unsigned i = 0; 41 - u_int32_t ramFileLen = 0; 42 - u_int32_t ramLen = 0; 43 - u_int32_t roundR = 0; 44 - u_int32_t kernelLen = 0; 45 - u_int32_t actualKernelLen = 0; 46 - u_int32_t round = 0; 47 - u_int32_t roundedKernelLen = 0; 48 - u_int32_t ramStartOffs = 0; 49 - u_int32_t ramPages = 0; 50 - u_int32_t roundedKernelPages = 0; 51 - u_int32_t hvReleaseData = 0; 52 - u_int32_t eyeCatcher = 0xc8a5d9c4; 53 - u_int32_t naca = 0; 54 - u_int32_t xRamDisk = 0; 55 - u_int32_t xRamDiskSize = 0; 56 - if ( argc < 2 ) { 57 - printf("Name of RAM disk file missing.\n"); 58 - exit(1); 59 - } 60 - 61 - if ( argc < 3 ) { 62 - printf("Name of vmlinux file missing.\n"); 63 - exit(1); 64 - } 65 - 66 - if ( argc < 4 ) { 67 - printf("Name of vmlinux output file missing.\n"); 68 - exit(1); 69 - } 70 - 71 - ramDisk = fopen(argv[1], "r"); 72 - if ( ! ramDisk ) { 73 - printf("RAM disk file \"%s\" failed to open.\n", argv[1]); 74 - exit(1); 75 - } 76 - inputVmlinux = fopen(argv[2], "r"); 77 - if ( ! inputVmlinux ) { 78 - printf("vmlinux file \"%s\" failed to open.\n", argv[2]); 79 - exit(1); 80 - } 81 - outputVmlinux = fopen(argv[3], "w+"); 82 - if ( ! outputVmlinux ) { 83 - printf("output vmlinux file \"%s\" failed to open.\n", argv[3]); 84 - exit(1); 85 - } 86 - fseek(ramDisk, 0, SEEK_END); 87 - ramFileLen = ftell(ramDisk); 88 - fseek(ramDisk, 0, SEEK_SET); 89 - printf("%s file size = %d\n", argv[1], ramFileLen); 90 - 91 - ramLen = ramFileLen; 92 - 93 - roundR = 4096 - (ramLen % 4096); 94 - if ( roundR ) { 95 - printf("Rounding RAM disk file up to a multiple of 4096, adding %d\n", roundR); 96 - ramLen += roundR; 97 - } 98 - 99 - printf("Rounded RAM disk size is %d\n", ramLen); 100 - fseek(inputVmlinux, 0, SEEK_END); 101 - kernelLen = ftell(inputVmlinux); 102 - fseek(inputVmlinux, 0, SEEK_SET); 103 - printf("kernel file size = %d\n", kernelLen); 104 - if ( kernelLen == 0 ) { 105 - printf("You must have a linux kernel specified as argv[2]\n"); 106 - exit(1); 107 - } 108 - 109 - actualKernelLen = kernelLen - ElfHeaderSize; 110 - 111 - printf("actual kernel length (minus ELF header) = %d\n", actualKernelLen); 112 - 113 - round = actualKernelLen % 4096; 114 - roundedKernelLen = actualKernelLen; 115 - if ( round ) 116 - roundedKernelLen += (4096 - round); 117 - 118 - printf("actual kernel length rounded up to a 4k multiple = %d\n", roundedKernelLen); 119 - 120 - ramStartOffs = roundedKernelLen; 121 - ramPages = ramLen / 4096; 122 - 123 - printf("RAM disk pages to copy = %d\n", ramPages); 124 - 125 - // Copy 64K ELF header 126 - for (i=0; i<(ElfPages); ++i) { 127 - get4k( inputVmlinux, inbuf ); 128 - put4k( outputVmlinux, inbuf ); 129 - } 130 - 131 - roundedKernelPages = roundedKernelLen / 4096; 132 - 133 - fseek(inputVmlinux, ElfHeaderSize, SEEK_SET); 134 - 135 - for ( i=0; i<roundedKernelPages; ++i ) { 136 - get4k( inputVmlinux, inbuf ); 137 - put4k( outputVmlinux, inbuf ); 138 - } 139 - 140 - for ( i=0; i<ramPages; ++i ) { 141 - get4k( ramDisk, inbuf ); 142 - put4k( outputVmlinux, inbuf ); 143 - } 144 - 145 - /* Close the input files */ 146 - fclose(ramDisk); 147 - fclose(inputVmlinux); 148 - /* And flush the written output file */ 149 - fflush(outputVmlinux); 150 - 151 - /* fseek to the hvReleaseData pointer */ 152 - fseek(outputVmlinux, ElfHeaderSize + 0x24, SEEK_SET); 153 - if (fread(&hvReleaseData, 4, 1, outputVmlinux) != 1) { 154 - death("Could not read hvReleaseData pointer\n", outputVmlinux, argv[3]); 155 - } 156 - hvReleaseData = ntohl(hvReleaseData); /* Convert to native int */ 157 - printf("hvReleaseData is at %08x\n", hvReleaseData); 158 - 159 - /* fseek to the hvReleaseData */ 160 - fseek(outputVmlinux, ElfHeaderSize + hvReleaseData, SEEK_SET); 161 - if (fread(inbuf, 0x40, 1, outputVmlinux) != 1) { 162 - death("Could not read hvReleaseData\n", outputVmlinux, argv[3]); 163 - } 164 - /* Check hvReleaseData sanity */ 165 - if (memcmp(inbuf, &eyeCatcher, 4) != 0) { 166 - death("hvReleaseData is invalid\n", outputVmlinux, argv[3]); 167 - } 168 - /* Get the naca pointer */ 169 - naca = ntohl(*((u_int32_t *) &inbuf[0x0c])) - KERNELBASE; 170 - printf("naca is at %08x\n", naca); 171 - 172 - /* fseek to the naca */ 173 - fseek(outputVmlinux, ElfHeaderSize + naca, SEEK_SET); 174 - if (fread(inbuf, 0x18, 1, outputVmlinux) != 1) { 175 - death("Could not read naca\n", outputVmlinux, argv[3]); 176 - } 177 - xRamDisk = ntohl(*((u_int32_t *) &inbuf[0x0c])); 178 - xRamDiskSize = ntohl(*((u_int32_t *) &inbuf[0x14])); 179 - /* Make sure a RAM disk isn't already present */ 180 - if ((xRamDisk != 0) || (xRamDiskSize != 0)) { 181 - death("RAM disk is already attached to this kernel\n", outputVmlinux, argv[3]); 182 - } 183 - /* Fill in the values */ 184 - *((u_int32_t *) &inbuf[0x0c]) = htonl(ramStartOffs); 185 - *((u_int32_t *) &inbuf[0x14]) = htonl(ramPages); 186 - 187 - /* Write out the new naca */ 188 - fflush(outputVmlinux); 189 - fseek(outputVmlinux, ElfHeaderSize + naca, SEEK_SET); 190 - if (fwrite(inbuf, 0x18, 1, outputVmlinux) != 1) { 191 - death("Could not write naca\n", outputVmlinux, argv[3]); 192 - } 193 - printf("RAM Disk of 0x%x pages size is attached to the kernel at offset 0x%08x\n", 194 - ramPages, ramStartOffs); 195 - 196 - /* Done */ 197 - fclose(outputVmlinux); 198 - /* Set permission to executable */ 199 - chmod(argv[3], S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH); 200 - 201 - return 0; 202 - } 203 -
···
+34 -40
arch/ppc64/Kconfig
··· 302 bool 303 default y 304 305 - config MSCHUNKS 306 - bool 307 - depends on PPC_ISERIES 308 - default y 309 - 310 - 311 config PPC_RTAS 312 bool 313 depends on PPC_PSERIES || PPC_BPA ··· 344 345 If unsure, say Y. Only embedded should say N here. 346 347 endmenu 348 349 config ISA_DMA_API 350 bool 351 default y 352 353 - menu "General setup" 354 355 config ISA 356 bool ··· 416 bool 417 default PCI 418 419 - source "fs/Kconfig.binfmt" 420 - 421 source "drivers/pci/Kconfig" 422 - 423 - config HOTPLUG_CPU 424 - bool "Support for hot-pluggable CPUs" 425 - depends on SMP && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC) 426 - select HOTPLUG 427 - ---help--- 428 - Say Y here to be able to turn CPUs off and on. 429 - 430 - Say N if you are unsure. 431 432 source "drivers/pcmcia/Kconfig" 433 434 source "drivers/pci/hotplug/Kconfig" 435 - 436 - config PROC_DEVICETREE 437 - bool "Support for Open Firmware device tree in /proc" 438 - depends on !PPC_ISERIES 439 - help 440 - This option adds a device-tree directory under /proc which contains 441 - an image of the device tree that the kernel copies from Open 442 - Firmware. If unsure, say Y here. 443 - 444 - config CMDLINE_BOOL 445 - bool "Default bootloader kernel arguments" 446 - depends on !PPC_ISERIES 447 - 448 - config CMDLINE 449 - string "Initial kernel command string" 450 - depends on CMDLINE_BOOL 451 - default "console=ttyS0,9600 console=tty0 root=/dev/sda2" 452 - help 453 - On some platforms, there is currently no way for the boot loader to 454 - pass arguments to the kernel. For these platforms, you can supply 455 - some command-line options at build time by entering them here. In 456 - most cases you will need to specify the root device here. 457 458 endmenu 459
··· 302 bool 303 default y 304 305 config PPC_RTAS 306 bool 307 depends on PPC_PSERIES || PPC_BPA ··· 350 351 If unsure, say Y. Only embedded should say N here. 352 353 + source "fs/Kconfig.binfmt" 354 + 355 + config HOTPLUG_CPU 356 + bool "Support for hot-pluggable CPUs" 357 + depends on SMP && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC) 358 + select HOTPLUG 359 + ---help--- 360 + Say Y here to be able to turn CPUs off and on. 361 + 362 + Say N if you are unsure. 363 + 364 + config PROC_DEVICETREE 365 + bool "Support for Open Firmware device tree in /proc" 366 + depends on !PPC_ISERIES 367 + help 368 + This option adds a device-tree directory under /proc which contains 369 + an image of the device tree that the kernel copies from Open 370 + Firmware. If unsure, say Y here. 371 + 372 + config CMDLINE_BOOL 373 + bool "Default bootloader kernel arguments" 374 + depends on !PPC_ISERIES 375 + 376 + config CMDLINE 377 + string "Initial kernel command string" 378 + depends on CMDLINE_BOOL 379 + default "console=ttyS0,9600 console=tty0 root=/dev/sda2" 380 + help 381 + On some platforms, there is currently no way for the boot loader to 382 + pass arguments to the kernel. For these platforms, you can supply 383 + some command-line options at build time by entering them here. In 384 + most cases you will need to specify the root device here. 385 + 386 endmenu 387 388 config ISA_DMA_API 389 bool 390 default y 391 392 + menu "Bus Options" 393 394 config ISA 395 bool ··· 389 bool 390 default PCI 391 392 source "drivers/pci/Kconfig" 393 394 source "drivers/pcmcia/Kconfig" 395 396 source "drivers/pci/hotplug/Kconfig" 397 398 endmenu 399
+2 -2
arch/ppc64/boot/Makefile
··· 22 23 24 HOSTCC := gcc 25 - BOOTCFLAGS := $(HOSTCFLAGS) $(LINUXINCLUDE) -fno-builtin 26 - BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional 27 BOOTLFLAGS := -Ttext 0x00400000 -e _start -T $(srctree)/$(src)/zImage.lds 28 OBJCOPYFLAGS := contents,alloc,load,readonly,data 29
··· 22 23 24 HOSTCC := gcc 25 + BOOTCFLAGS := $(HOSTCFLAGS) -fno-builtin -nostdinc -isystem $(shell $(CROSS32CC) -print-file-name=include) 26 + BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc 27 BOOTLFLAGS := -Ttext 0x00400000 -e _start -T $(srctree)/$(src)/zImage.lds 28 OBJCOPYFLAGS := contents,alloc,load,readonly,data 29
+2 -2
arch/ppc64/boot/addnote.c
··· 157 PUT_32BE(ns, strlen(arch) + 1); 158 PUT_32BE(ns + 4, N_DESCR * 4); 159 PUT_32BE(ns + 8, 0x1275); 160 - strcpy(&buf[ns + 12], arch); 161 ns += 12 + strlen(arch) + 1; 162 for (i = 0; i < N_DESCR; ++i, ns += 4) 163 PUT_32BE(ns, descr[i]); ··· 172 PUT_32BE(ns, strlen(rpaname) + 1); 173 PUT_32BE(ns + 4, sizeof(rpanote)); 174 PUT_32BE(ns + 8, 0x12759999); 175 - strcpy(&buf[ns + 12], rpaname); 176 ns += 12 + ROUNDUP(strlen(rpaname) + 1); 177 for (i = 0; i < N_RPA_DESCR; ++i, ns += 4) 178 PUT_32BE(ns, rpanote[i]);
··· 157 PUT_32BE(ns, strlen(arch) + 1); 158 PUT_32BE(ns + 4, N_DESCR * 4); 159 PUT_32BE(ns + 8, 0x1275); 160 + strcpy((char *) &buf[ns + 12], arch); 161 ns += 12 + strlen(arch) + 1; 162 for (i = 0; i < N_DESCR; ++i, ns += 4) 163 PUT_32BE(ns, descr[i]); ··· 172 PUT_32BE(ns, strlen(rpaname) + 1); 173 PUT_32BE(ns + 4, sizeof(rpanote)); 174 PUT_32BE(ns + 8, 0x12759999); 175 + strcpy((char *) &buf[ns + 12], rpaname); 176 ns += 12 + ROUNDUP(strlen(rpaname) + 1); 177 for (i = 0; i < N_RPA_DESCR; ++i, ns += 4) 178 PUT_32BE(ns, rpanote[i]);
+1 -1
arch/ppc64/boot/crt0.S
··· 9 * NOTE: this code runs in 32 bit mode and is packaged as ELF32. 10 */ 11 12 - #include <asm/ppc_asm.h> 13 14 .text 15 .globl _start
··· 9 * NOTE: this code runs in 32 bit mode and is packaged as ELF32. 10 */ 11 12 + #include "ppc_asm.h" 13 14 .text 15 .globl _start
+1 -1
arch/ppc64/boot/div64.S
··· 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 */ 16 - #include <asm/ppc_asm.h> 17 18 .globl __div64_32 19 __div64_32:
··· 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 */ 16 + #include "ppc_asm.h" 17 18 .globl __div64_32 19 __div64_32:
+149
arch/ppc64/boot/elf.h
···
··· 1 + #ifndef _PPC_BOOT_ELF_H_ 2 + #define _PPC_BOOT_ELF_H_ 3 + 4 + /* 32-bit ELF base types. */ 5 + typedef unsigned int Elf32_Addr; 6 + typedef unsigned short Elf32_Half; 7 + typedef unsigned int Elf32_Off; 8 + typedef signed int Elf32_Sword; 9 + typedef unsigned int Elf32_Word; 10 + 11 + /* 64-bit ELF base types. */ 12 + typedef unsigned long long Elf64_Addr; 13 + typedef unsigned short Elf64_Half; 14 + typedef signed short Elf64_SHalf; 15 + typedef unsigned long long Elf64_Off; 16 + typedef signed int Elf64_Sword; 17 + typedef unsigned int Elf64_Word; 18 + typedef unsigned long long Elf64_Xword; 19 + typedef signed long long Elf64_Sxword; 20 + 21 + /* These constants are for the segment types stored in the image headers */ 22 + #define PT_NULL 0 23 + #define PT_LOAD 1 24 + #define PT_DYNAMIC 2 25 + #define PT_INTERP 3 26 + #define PT_NOTE 4 27 + #define PT_SHLIB 5 28 + #define PT_PHDR 6 29 + #define PT_TLS 7 /* Thread local storage segment */ 30 + #define PT_LOOS 0x60000000 /* OS-specific */ 31 + #define PT_HIOS 0x6fffffff /* OS-specific */ 32 + #define PT_LOPROC 0x70000000 33 + #define PT_HIPROC 0x7fffffff 34 + #define PT_GNU_EH_FRAME 0x6474e550 35 + 36 + #define PT_GNU_STACK (PT_LOOS + 0x474e551) 37 + 38 + /* These constants define the different elf file types */ 39 + #define ET_NONE 0 40 + #define ET_REL 1 41 + #define ET_EXEC 2 42 + #define ET_DYN 3 43 + #define ET_CORE 4 44 + #define ET_LOPROC 0xff00 45 + #define ET_HIPROC 0xffff 46 + 47 + /* These constants define the various ELF target machines */ 48 + #define EM_NONE 0 49 + #define EM_PPC 20 /* PowerPC */ 50 + #define EM_PPC64 21 /* PowerPC64 */ 51 + 52 + #define EI_NIDENT 16 53 + 54 + typedef struct elf32_hdr { 55 + unsigned char e_ident[EI_NIDENT]; 56 + Elf32_Half e_type; 57 + Elf32_Half e_machine; 58 + Elf32_Word e_version; 59 + Elf32_Addr e_entry; /* Entry point */ 60 + Elf32_Off e_phoff; 61 + Elf32_Off e_shoff; 62 + Elf32_Word e_flags; 63 + Elf32_Half e_ehsize; 64 + Elf32_Half e_phentsize; 65 + Elf32_Half e_phnum; 66 + Elf32_Half e_shentsize; 67 + Elf32_Half e_shnum; 68 + Elf32_Half e_shstrndx; 69 + } Elf32_Ehdr; 70 + 71 + typedef struct elf64_hdr { 72 + unsigned char e_ident[16]; /* ELF "magic number" */ 73 + Elf64_Half e_type; 74 + Elf64_Half e_machine; 75 + Elf64_Word e_version; 76 + Elf64_Addr e_entry; /* Entry point virtual address */ 77 + Elf64_Off e_phoff; /* Program header table file offset */ 78 + Elf64_Off e_shoff; /* Section header table file offset */ 79 + Elf64_Word e_flags; 80 + Elf64_Half e_ehsize; 81 + Elf64_Half e_phentsize; 82 + Elf64_Half e_phnum; 83 + Elf64_Half e_shentsize; 84 + Elf64_Half e_shnum; 85 + Elf64_Half e_shstrndx; 86 + } Elf64_Ehdr; 87 + 88 + /* These constants define the permissions on sections in the program 89 + header, p_flags. */ 90 + #define PF_R 0x4 91 + #define PF_W 0x2 92 + #define PF_X 0x1 93 + 94 + typedef struct elf32_phdr { 95 + Elf32_Word p_type; 96 + Elf32_Off p_offset; 97 + Elf32_Addr p_vaddr; 98 + Elf32_Addr p_paddr; 99 + Elf32_Word p_filesz; 100 + Elf32_Word p_memsz; 101 + Elf32_Word p_flags; 102 + Elf32_Word p_align; 103 + } Elf32_Phdr; 104 + 105 + typedef struct elf64_phdr { 106 + Elf64_Word p_type; 107 + Elf64_Word p_flags; 108 + Elf64_Off p_offset; /* Segment file offset */ 109 + Elf64_Addr p_vaddr; /* Segment virtual address */ 110 + Elf64_Addr p_paddr; /* Segment physical address */ 111 + Elf64_Xword p_filesz; /* Segment size in file */ 112 + Elf64_Xword p_memsz; /* Segment size in memory */ 113 + Elf64_Xword p_align; /* Segment alignment, file & memory */ 114 + } Elf64_Phdr; 115 + 116 + #define EI_MAG0 0 /* e_ident[] indexes */ 117 + #define EI_MAG1 1 118 + #define EI_MAG2 2 119 + #define EI_MAG3 3 120 + #define EI_CLASS 4 121 + #define EI_DATA 5 122 + #define EI_VERSION 6 123 + #define EI_OSABI 7 124 + #define EI_PAD 8 125 + 126 + #define ELFMAG0 0x7f /* EI_MAG */ 127 + #define ELFMAG1 'E' 128 + #define ELFMAG2 'L' 129 + #define ELFMAG3 'F' 130 + #define ELFMAG "\177ELF" 131 + #define SELFMAG 4 132 + 133 + #define ELFCLASSNONE 0 /* EI_CLASS */ 134 + #define ELFCLASS32 1 135 + #define ELFCLASS64 2 136 + #define ELFCLASSNUM 3 137 + 138 + #define ELFDATANONE 0 /* e_ident[EI_DATA] */ 139 + #define ELFDATA2LSB 1 140 + #define ELFDATA2MSB 2 141 + 142 + #define EV_NONE 0 /* e_version, EI_VERSION */ 143 + #define EV_CURRENT 1 144 + #define EV_NUM 2 145 + 146 + #define ELFOSABI_NONE 0 147 + #define ELFOSABI_LINUX 3 148 + 149 + #endif /* _PPC_BOOT_ELF_H_ */
+18 -33
arch/ppc64/boot/main.c
··· 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 - #include "ppc32-types.h" 12 #include "zlib.h" 13 - #include <linux/elf.h> 14 - #include <linux/string.h> 15 - #include <asm/processor.h> 16 - #include <asm/page.h> 17 18 - extern void *finddevice(const char *); 19 - extern int getprop(void *, const char *, void *, int); 20 - extern void printf(const char *fmt, ...); 21 - extern int sprintf(char *buf, const char *fmt, ...); 22 - void gunzip(void *, int, unsigned char *, int *); 23 - void *claim(unsigned int, unsigned int, unsigned int); 24 - void flush_cache(void *, unsigned long); 25 - void pause(void); 26 - extern void exit(void); 27 28 - unsigned long strlen(const char *s); 29 - void *memmove(void *dest, const void *src, unsigned long n); 30 - void *memcpy(void *dest, const void *src, unsigned long n); 31 32 /* Value picked to match that used by yaboot */ 33 #define PROG_START 0x01400000 34 #define RAM_END (256<<20) // Fixme: use OF */ 35 36 - char *avail_ram; 37 - char *begin_avail, *end_avail; 38 - char *avail_high; 39 - unsigned int heap_use; 40 - unsigned int heap_max; 41 42 extern char _start[]; 43 extern char _vmlinux_start[]; ··· 44 unsigned long size; 45 unsigned long memsize; 46 }; 47 - struct addr_range vmlinux = {0, 0, 0}; 48 - struct addr_range vmlinuz = {0, 0, 0}; 49 - struct addr_range initrd = {0, 0, 0}; 50 51 static char scratch[128<<10]; /* 128kB of scratch space for gunzip */ 52 ··· 55 void *, 56 void *); 57 58 - 59 - int (*prom)(void *); 60 - 61 - void *chosen_handle; 62 - void *stdin; 63 - void *stdout; 64 - void *stderr; 65 66 #undef DEBUG 67 ··· 262 263 #define DEFLATED 8 264 265 - void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp) 266 { 267 z_stream s; 268 int r, i, flags;
··· 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 + #include <stdarg.h> 12 + #include <stddef.h> 13 + #include "elf.h" 14 + #include "page.h" 15 + #include "string.h" 16 + #include "stdio.h" 17 + #include "prom.h" 18 #include "zlib.h" 19 20 + static void gunzip(void *, int, unsigned char *, int *); 21 + extern void flush_cache(void *, unsigned long); 22 23 24 /* Value picked to match that used by yaboot */ 25 #define PROG_START 0x01400000 26 #define RAM_END (256<<20) // Fixme: use OF */ 27 28 + static char *avail_ram; 29 + static char *begin_avail, *end_avail; 30 + static char *avail_high; 31 + static unsigned int heap_use; 32 + static unsigned int heap_max; 33 34 extern char _start[]; 35 extern char _vmlinux_start[]; ··· 52 unsigned long size; 53 unsigned long memsize; 54 }; 55 + static struct addr_range vmlinux = {0, 0, 0}; 56 + static struct addr_range vmlinuz = {0, 0, 0}; 57 + static struct addr_range initrd = {0, 0, 0}; 58 59 static char scratch[128<<10]; /* 128kB of scratch space for gunzip */ 60 ··· 63 void *, 64 void *); 65 66 67 #undef DEBUG 68 ··· 277 278 #define DEFLATED 8 279 280 + static void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp) 281 { 282 z_stream s; 283 int r, i, flags;
+34
arch/ppc64/boot/page.h
···
··· 1 + #ifndef _PPC_BOOT_PAGE_H 2 + #define _PPC_BOOT_PAGE_H 3 + /* 4 + * Copyright (C) 2001 PPC64 Team, IBM Corp 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License 8 + * as published by the Free Software Foundation; either version 9 + * 2 of the License, or (at your option) any later version. 10 + */ 11 + 12 + #ifdef __ASSEMBLY__ 13 + #define ASM_CONST(x) x 14 + #else 15 + #define __ASM_CONST(x) x##UL 16 + #define ASM_CONST(x) __ASM_CONST(x) 17 + #endif 18 + 19 + /* PAGE_SHIFT determines the page size */ 20 + #define PAGE_SHIFT 12 21 + #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT) 22 + #define PAGE_MASK (~(PAGE_SIZE-1)) 23 + 24 + /* align addr on a size boundary - adjust address up/down if needed */ 25 + #define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1))) 26 + #define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1))) 27 + 28 + /* align addr on a size boundary - adjust address up if needed */ 29 + #define _ALIGN(addr,size) _ALIGN_UP(addr,size) 30 + 31 + /* to align the pointer to the (next) page boundary */ 32 + #define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE) 33 + 34 + #endif /* _PPC_BOOT_PAGE_H */
-36
arch/ppc64/boot/ppc32-types.h
··· 1 - #ifndef _PPC64_TYPES_H 2 - #define _PPC64_TYPES_H 3 - 4 - typedef __signed__ char __s8; 5 - typedef unsigned char __u8; 6 - 7 - typedef __signed__ short __s16; 8 - typedef unsigned short __u16; 9 - 10 - typedef __signed__ int __s32; 11 - typedef unsigned int __u32; 12 - 13 - typedef __signed__ long long __s64; 14 - typedef unsigned long long __u64; 15 - 16 - typedef signed char s8; 17 - typedef unsigned char u8; 18 - 19 - typedef signed short s16; 20 - typedef unsigned short u16; 21 - 22 - typedef signed int s32; 23 - typedef unsigned int u32; 24 - 25 - typedef signed long long s64; 26 - typedef unsigned long long u64; 27 - 28 - typedef struct { 29 - __u32 u[4]; 30 - } __attribute((aligned(16))) __vector128; 31 - 32 - #define BITS_PER_LONG 32 33 - 34 - typedef __vector128 vector128; 35 - 36 - #endif /* _PPC64_TYPES_H */
···
+62
arch/ppc64/boot/ppc_asm.h
···
··· 1 + #ifndef _PPC64_PPC_ASM_H 2 + #define _PPC64_PPC_ASM_H 3 + /* 4 + * 5 + * Definitions used by various bits of low-level assembly code on PowerPC. 6 + * 7 + * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan. 8 + * 9 + * This program is free software; you can redistribute it and/or 10 + * modify it under the terms of the GNU General Public License 11 + * as published by the Free Software Foundation; either version 12 + * 2 of the License, or (at your option) any later version. 13 + */ 14 + 15 + /* Condition Register Bit Fields */ 16 + 17 + #define cr0 0 18 + #define cr1 1 19 + #define cr2 2 20 + #define cr3 3 21 + #define cr4 4 22 + #define cr5 5 23 + #define cr6 6 24 + #define cr7 7 25 + 26 + 27 + /* General Purpose Registers (GPRs) */ 28 + 29 + #define r0 0 30 + #define r1 1 31 + #define r2 2 32 + #define r3 3 33 + #define r4 4 34 + #define r5 5 35 + #define r6 6 36 + #define r7 7 37 + #define r8 8 38 + #define r9 9 39 + #define r10 10 40 + #define r11 11 41 + #define r12 12 42 + #define r13 13 43 + #define r14 14 44 + #define r15 15 45 + #define r16 16 46 + #define r17 17 47 + #define r18 18 48 + #define r19 19 49 + #define r20 20 50 + #define r21 21 51 + #define r22 22 52 + #define r23 23 53 + #define r24 24 54 + #define r25 25 55 + #define r26 26 56 + #define r27 27 57 + #define r28 28 58 + #define r29 29 59 + #define r30 30 60 + #define r31 31 61 + 62 + #endif /* _PPC64_PPC_ASM_H */
+27 -169
arch/ppc64/boot/prom.c
··· 7 * 2 of the License, or (at your option) any later version. 8 */ 9 #include <stdarg.h> 10 - #include <linux/types.h> 11 - #include <linux/string.h> 12 - #include <linux/ctype.h> 13 - 14 - extern __u32 __div64_32(unsigned long long *dividend, __u32 divisor); 15 - 16 - /* The unnecessary pointer compare is there 17 - * to check for type safety (n must be 64bit) 18 - */ 19 - # define do_div(n,base) ({ \ 20 - __u32 __base = (base); \ 21 - __u32 __rem; \ 22 - (void)(((typeof((n)) *)0) == ((unsigned long long *)0)); \ 23 - if (((n) >> 32) == 0) { \ 24 - __rem = (__u32)(n) % __base; \ 25 - (n) = (__u32)(n) / __base; \ 26 - } else \ 27 - __rem = __div64_32(&(n), __base); \ 28 - __rem; \ 29 - }) 30 31 int (*prom)(void *); 32 33 void *chosen_handle; 34 void *stdin; 35 void *stdout; 36 void *stderr; 37 38 - void exit(void); 39 - void *finddevice(const char *name); 40 - int getprop(void *phandle, const char *name, void *buf, int buflen); 41 - void chrpboot(int a1, int a2, void *prom); /* in main.c */ 42 - 43 - int printf(char *fmt, ...); 44 - 45 - /* there is no convenient header to get this from... -- paulus */ 46 - extern unsigned long strlen(const char *); 47 48 int 49 write(void *handle, void *ptr, int nb) ··· 186 return write(f, str, n) == n? 0: -1; 187 } 188 189 - int 190 - readchar(void) 191 - { 192 - char ch; 193 - 194 - for (;;) { 195 - switch (read(stdin, &ch, 1)) { 196 - case 1: 197 - return ch; 198 - case -1: 199 - printf("read(stdin) returned -1\r\n"); 200 - return -1; 201 - } 202 - } 203 - } 204 - 205 - static char line[256]; 206 - static char *lineptr; 207 - static int lineleft; 208 - 209 - int 210 - getchar(void) 211 - { 212 - int c; 213 - 214 - if (lineleft == 0) { 215 - lineptr = line; 216 - for (;;) { 217 - c = readchar(); 218 - if (c == -1 || c == 4) 219 - break; 220 - if (c == '\r' || c == '\n') { 221 - *lineptr++ = '\n'; 222 - putchar('\n'); 223 - break; 224 - } 225 - switch (c) { 226 - case 0177: 227 - case '\b': 228 - if (lineptr > line) { 229 - putchar('\b'); 230 - putchar(' '); 231 - putchar('\b'); 232 - --lineptr; 233 - } 234 - break; 235 - case 'U' & 0x1F: 236 - while (lineptr > line) { 237 - putchar('\b'); 238 - putchar(' '); 239 - putchar('\b'); 240 - --lineptr; 241 - } 242 - break; 243 - default: 244 - if (lineptr >= &line[sizeof(line) - 1]) 245 - putchar('\a'); 246 - else { 247 - putchar(c); 248 - *lineptr++ = c; 249 - } 250 - } 251 - } 252 - lineleft = lineptr - line; 253 - lineptr = line; 254 - } 255 - if (lineleft == 0) 256 - return -1; 257 - --lineleft; 258 - return *lineptr++; 259 - } 260 - 261 - 262 - 263 - /* String functions lifted from lib/vsprintf.c and lib/ctype.c */ 264 - unsigned char _ctype[] = { 265 - _C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */ 266 - _C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */ 267 - _C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */ 268 - _C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */ 269 - _S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */ 270 - _P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */ 271 - _D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */ 272 - _D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */ 273 - _P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */ 274 - _U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */ 275 - _U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */ 276 - _U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */ 277 - _P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */ 278 - _L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */ 279 - _L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */ 280 - _L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */ 281 - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */ 282 - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */ 283 - _S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */ 284 - _P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */ 285 - _U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */ 286 - _U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */ 287 - _L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */ 288 - _L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */ 289 - 290 size_t strnlen(const char * s, size_t count) 291 { 292 const char *sc; ··· 195 return sc - s; 196 } 197 198 - unsigned long simple_strtoul(const char *cp,char **endp,unsigned int base) 199 - { 200 - unsigned long result = 0,value; 201 202 - if (!base) { 203 - base = 10; 204 - if (*cp == '0') { 205 - base = 8; 206 - cp++; 207 - if ((*cp == 'x') && isxdigit(cp[1])) { 208 - cp++; 209 - base = 16; 210 - } 211 - } 212 - } 213 - while (isxdigit(*cp) && 214 - (value = isdigit(*cp) ? *cp-'0' : toupper(*cp)-'A'+10) < base) { 215 - result = result*base + value; 216 - cp++; 217 - } 218 - if (endp) 219 - *endp = (char *)cp; 220 - return result; 221 - } 222 - 223 - long simple_strtol(const char *cp,char **endp,unsigned int base) 224 - { 225 - if(*cp=='-') 226 - return -simple_strtoul(cp+1,endp,base); 227 - return simple_strtoul(cp,endp,base); 228 - } 229 230 static int skip_atoi(const char **s) 231 { 232 - int i=0; 233 234 - while (isdigit(**s)) 235 - i = i*10 + *((*s)++) - '0'; 236 return i; 237 } 238 ··· 297 return str; 298 } 299 300 - /* Forward decl. needed for IP address printing stuff... */ 301 - int sprintf(char * buf, const char *fmt, ...); 302 - 303 int vsprintf(char *buf, const char *fmt, va_list args) 304 { 305 int len; ··· 335 336 /* get field width */ 337 field_width = -1; 338 - if (isdigit(*fmt)) 339 field_width = skip_atoi(&fmt); 340 else if (*fmt == '*') { 341 ++fmt; ··· 351 precision = -1; 352 if (*fmt == '.') { 353 ++fmt; 354 - if (isdigit(*fmt)) 355 precision = skip_atoi(&fmt); 356 else if (*fmt == '*') { 357 ++fmt; ··· 486 static char sprint_buf[1024]; 487 488 int 489 - printf(char *fmt, ...) 490 { 491 va_list args; 492 int n;
··· 7 * 2 of the License, or (at your option) any later version. 8 */ 9 #include <stdarg.h> 10 + #include <stddef.h> 11 + #include "string.h" 12 + #include "stdio.h" 13 + #include "prom.h" 14 15 int (*prom)(void *); 16 17 void *chosen_handle; 18 + 19 void *stdin; 20 void *stdout; 21 void *stderr; 22 23 24 int 25 write(void *handle, void *ptr, int nb) ··· 210 return write(f, str, n) == n? 0: -1; 211 } 212 213 size_t strnlen(const char * s, size_t count) 214 { 215 const char *sc; ··· 320 return sc - s; 321 } 322 323 + extern unsigned int __div64_32(unsigned long long *dividend, 324 + unsigned int divisor); 325 326 + /* The unnecessary pointer compare is there 327 + * to check for type safety (n must be 64bit) 328 + */ 329 + # define do_div(n,base) ({ \ 330 + unsigned int __base = (base); \ 331 + unsigned int __rem; \ 332 + (void)(((typeof((n)) *)0) == ((unsigned long long *)0)); \ 333 + if (((n) >> 32) == 0) { \ 334 + __rem = (unsigned int)(n) % __base; \ 335 + (n) = (unsigned int)(n) / __base; \ 336 + } else \ 337 + __rem = __div64_32(&(n), __base); \ 338 + __rem; \ 339 + }) 340 341 static int skip_atoi(const char **s) 342 { 343 + int i, c; 344 345 + for (i = 0; '0' <= (c = **s) && c <= '9'; ++*s) 346 + i = i*10 + c - '0'; 347 return i; 348 } 349 ··· 436 return str; 437 } 438 439 int vsprintf(char *buf, const char *fmt, va_list args) 440 { 441 int len; ··· 477 478 /* get field width */ 479 field_width = -1; 480 + if ('0' <= *fmt && *fmt <= '9') 481 field_width = skip_atoi(&fmt); 482 else if (*fmt == '*') { 483 ++fmt; ··· 493 precision = -1; 494 if (*fmt == '.') { 495 ++fmt; 496 + if ('0' <= *fmt && *fmt <= '9') 497 precision = skip_atoi(&fmt); 498 else if (*fmt == '*') { 499 ++fmt; ··· 628 static char sprint_buf[1024]; 629 630 int 631 + printf(const char *fmt, ...) 632 { 633 va_list args; 634 int n;
+18
arch/ppc64/boot/prom.h
···
··· 1 + #ifndef _PPC_BOOT_PROM_H_ 2 + #define _PPC_BOOT_PROM_H_ 3 + 4 + extern int (*prom) (void *); 5 + extern void *chosen_handle; 6 + 7 + extern void *stdin; 8 + extern void *stdout; 9 + extern void *stderr; 10 + 11 + extern int write(void *handle, void *ptr, int nb); 12 + extern int read(void *handle, void *ptr, int nb); 13 + extern void exit(void); 14 + extern void pause(void); 15 + extern void *finddevice(const char *); 16 + extern void *claim(unsigned long virt, unsigned long size, unsigned long align); 17 + extern int getprop(void *phandle, const char *name, void *buf, int buflen); 18 + #endif /* _PPC_BOOT_PROM_H_ */
+16
arch/ppc64/boot/stdio.h
···
··· 1 + #ifndef _PPC_BOOT_STDIO_H_ 2 + #define _PPC_BOOT_STDIO_H_ 3 + 4 + extern int printf(const char *fmt, ...); 5 + 6 + extern int sprintf(char *buf, const char *fmt, ...); 7 + 8 + extern int vsprintf(char *buf, const char *fmt, va_list args); 9 + 10 + extern int putc(int c, void *f); 11 + extern int putchar(int c); 12 + extern int getchar(void); 13 + 14 + extern int fputs(char *str, void *f); 15 + 16 + #endif /* _PPC_BOOT_STDIO_H_ */
+1 -1
arch/ppc64/boot/string.S
··· 9 * NOTE: this code runs in 32 bit mode and is packaged as ELF32. 10 */ 11 12 - #include <asm/ppc_asm.h> 13 14 .text 15 .globl strcpy
··· 9 * NOTE: this code runs in 32 bit mode and is packaged as ELF32. 10 */ 11 12 + #include "ppc_asm.h" 13 14 .text 15 .globl strcpy
+16
arch/ppc64/boot/string.h
···
··· 1 + #ifndef _PPC_BOOT_STRING_H_ 2 + #define _PPC_BOOT_STRING_H_ 3 + 4 + extern char *strcpy(char *dest, const char *src); 5 + extern char *strncpy(char *dest, const char *src, size_t n); 6 + extern char *strcat(char *dest, const char *src); 7 + extern int strcmp(const char *s1, const char *s2); 8 + extern size_t strlen(const char *s); 9 + extern size_t strnlen(const char *s, size_t count); 10 + 11 + extern void *memset(void *s, int c, size_t n); 12 + extern void *memmove(void *dest, const void *src, unsigned long n); 13 + extern void *memcpy(void *dest, const void *src, unsigned long n); 14 + extern int memcmp(const void *s1, const void *s2, size_t n); 15 + 16 + #endif /* _PPC_BOOT_STRING_H_ */
+1 -1
arch/ppc64/boot/zlib.c
··· 107 108 /* Diagnostic functions */ 109 #ifdef DEBUG_ZLIB 110 - # include <stdio.h> 111 # ifndef verbose 112 # define verbose 0 113 # endif
··· 107 108 /* Diagnostic functions */ 109 #ifdef DEBUG_ZLIB 110 + # include "stdio.h" 111 # ifndef verbose 112 # define verbose 0 113 # endif
-1
arch/ppc64/configs/iSeries_defconfig
··· 99 # CONFIG_HZ_1000 is not set 100 CONFIG_HZ=100 101 CONFIG_GENERIC_HARDIRQS=y 102 - CONFIG_MSCHUNKS=y 103 CONFIG_LPARCFG=y 104 CONFIG_SECCOMP=y 105 CONFIG_ISA_DMA_API=y
··· 99 # CONFIG_HZ_1000 is not set 100 CONFIG_HZ=100 101 CONFIG_GENERIC_HARDIRQS=y 102 CONFIG_LPARCFG=y 103 CONFIG_SECCOMP=y 104 CONFIG_ISA_DMA_API=y
+11 -26
arch/ppc64/kernel/LparData.c
··· 51 0xf4, 0x4b, 0xf6, 0xf4 }, 52 }; 53 54 extern void system_reset_iSeries(void); 55 extern void machine_check_iSeries(void); 56 extern void data_access_iSeries(void); ··· 225 0,0 226 } 227 }; 228 - 229 - struct msChunks msChunks; 230 - EXPORT_SYMBOL(msChunks); 231 - 232 - /* Depending on whether this is called from iSeries or pSeries setup 233 - * code, the location of the msChunks struct may or may not have 234 - * to be reloc'd, so we force the caller to do that for us by passing 235 - * in a pointer to the structure. 236 - */ 237 - unsigned long 238 - msChunks_alloc(unsigned long mem, unsigned long num_chunks, unsigned long chunk_size) 239 - { 240 - unsigned long offset = reloc_offset(); 241 - struct msChunks *_msChunks = PTRRELOC(&msChunks); 242 - 243 - _msChunks->num_chunks = num_chunks; 244 - _msChunks->chunk_size = chunk_size; 245 - _msChunks->chunk_shift = __ilog2(chunk_size); 246 - _msChunks->chunk_mask = (1UL<<_msChunks->chunk_shift)-1; 247 - 248 - mem = _ALIGN(mem, sizeof(msChunks_entry)); 249 - _msChunks->abs = (msChunks_entry *)(mem + offset); 250 - mem += num_chunks * sizeof(msChunks_entry); 251 - 252 - return mem; 253 - }
··· 51 0xf4, 0x4b, 0xf6, 0xf4 }, 52 }; 53 54 + /* 55 + * The NACA. The first dword of the naca is required by the iSeries 56 + * hypervisor to point to itVpdAreas. The hypervisor finds the NACA 57 + * through the pointer in hvReleaseData. 58 + */ 59 + struct naca_struct naca = { 60 + .xItVpdAreas = &itVpdAreas, 61 + .xRamDisk = 0, 62 + .xRamDiskSize = 0, 63 + }; 64 + 65 extern void system_reset_iSeries(void); 66 extern void machine_check_iSeries(void); 67 extern void data_access_iSeries(void); ··· 214 0,0 215 } 216 };
+5 -2
arch/ppc64/kernel/Makefile
··· 11 udbg.o binfmt_elf32.o sys_ppc32.o ioctl32.o \ 12 ptrace32.o signal32.o rtc.o init_task.o \ 13 lmb.o cputable.o cpu_setup_power4.o idle_power4.o \ 14 - iommu.o sysfs.o vdso.o pmc.o 15 obj-y += vdso32/ vdso64/ 16 17 obj-$(CONFIG_PPC_OF) += of_device.o ··· 50 obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o 51 obj-$(CONFIG_BOOTX_TEXT) += btext.o 52 obj-$(CONFIG_HVCS) += hvcserver.o 53 - obj-$(CONFIG_IBMVIO) += vio.o 54 obj-$(CONFIG_XICS) += xics.o 55 obj-$(CONFIG_MPIC) += mpic.o 56
··· 11 udbg.o binfmt_elf32.o sys_ppc32.o ioctl32.o \ 12 ptrace32.o signal32.o rtc.o init_task.o \ 13 lmb.o cputable.o cpu_setup_power4.o idle_power4.o \ 14 + iommu.o sysfs.o vdso.o pmc.o firmware.o 15 obj-y += vdso32/ vdso64/ 16 17 obj-$(CONFIG_PPC_OF) += of_device.o ··· 50 obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o 51 obj-$(CONFIG_BOOTX_TEXT) += btext.o 52 obj-$(CONFIG_HVCS) += hvcserver.o 53 + 54 + vio-obj-$(CONFIG_PPC_PSERIES) += pSeries_vio.o 55 + vio-obj-$(CONFIG_PPC_ISERIES) += iSeries_vio.o 56 + obj-$(CONFIG_IBMVIO) += vio.o $(vio-obj-y) 57 obj-$(CONFIG_XICS) += xics.o 58 obj-$(CONFIG_MPIC) += mpic.o 59
+2 -1
arch/ppc64/kernel/asm-offsets.c
··· 94 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); 95 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); 96 #ifdef CONFIG_HUGETLB_PAGE 97 - DEFINE(PACAHTLBSEGS, offsetof(struct paca_struct, context.htlb_segs)); 98 #endif /* CONFIG_HUGETLB_PAGE */ 99 DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr)); 100 DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
··· 94 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); 95 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); 96 #ifdef CONFIG_HUGETLB_PAGE 97 + DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas)); 98 + DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas)); 99 #endif /* CONFIG_HUGETLB_PAGE */ 100 DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr)); 101 DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
+1 -39
arch/ppc64/kernel/cputable.c
··· 5 * 6 * Modifications for ppc64: 7 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com> 8 - * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version ··· 60 .icache_bsize = 128, 61 .dcache_bsize = 128, 62 .cpu_setup = __setup_cpu_power3, 63 - .firmware_features = COMMON_PPC64_FW, 64 }, 65 { /* Power3+ */ 66 .pvr_mask = 0xffff0000, ··· 72 .icache_bsize = 128, 73 .dcache_bsize = 128, 74 .cpu_setup = __setup_cpu_power3, 75 - .firmware_features = COMMON_PPC64_FW, 76 }, 77 { /* Northstar */ 78 .pvr_mask = 0xffff0000, ··· 84 .icache_bsize = 128, 85 .dcache_bsize = 128, 86 .cpu_setup = __setup_cpu_power3, 87 - .firmware_features = COMMON_PPC64_FW, 88 }, 89 { /* Pulsar */ 90 .pvr_mask = 0xffff0000, ··· 96 .icache_bsize = 128, 97 .dcache_bsize = 128, 98 .cpu_setup = __setup_cpu_power3, 99 - .firmware_features = COMMON_PPC64_FW, 100 }, 101 { /* I-star */ 102 .pvr_mask = 0xffff0000, ··· 108 .icache_bsize = 128, 109 .dcache_bsize = 128, 110 .cpu_setup = __setup_cpu_power3, 111 - .firmware_features = COMMON_PPC64_FW, 112 }, 113 { /* S-star */ 114 .pvr_mask = 0xffff0000, ··· 120 .icache_bsize = 128, 121 .dcache_bsize = 128, 122 .cpu_setup = __setup_cpu_power3, 123 - .firmware_features = COMMON_PPC64_FW, 124 }, 125 { /* Power4 */ 126 .pvr_mask = 0xffff0000, ··· 132 .icache_bsize = 128, 133 .dcache_bsize = 128, 134 .cpu_setup = __setup_cpu_power4, 135 - .firmware_features = COMMON_PPC64_FW, 136 }, 137 { /* Power4+ */ 138 .pvr_mask = 0xffff0000, ··· 144 .icache_bsize = 128, 145 .dcache_bsize = 128, 146 .cpu_setup = __setup_cpu_power4, 147 - .firmware_features = COMMON_PPC64_FW, 148 }, 149 { /* PPC970 */ 150 .pvr_mask = 0xffff0000, ··· 158 .icache_bsize = 128, 159 .dcache_bsize = 128, 160 .cpu_setup = __setup_cpu_ppc970, 161 - .firmware_features = COMMON_PPC64_FW, 162 }, 163 { /* PPC970FX */ 164 .pvr_mask = 0xffff0000, ··· 172 .icache_bsize = 128, 173 .dcache_bsize = 128, 174 .cpu_setup = __setup_cpu_ppc970, 175 - .firmware_features = COMMON_PPC64_FW, 176 }, 177 { /* PPC970MP */ 178 .pvr_mask = 0xffff0000, ··· 186 .icache_bsize = 128, 187 .dcache_bsize = 128, 188 .cpu_setup = __setup_cpu_ppc970, 189 - .firmware_features = COMMON_PPC64_FW, 190 }, 191 { /* Power5 */ 192 .pvr_mask = 0xffff0000, ··· 200 .icache_bsize = 128, 201 .dcache_bsize = 128, 202 .cpu_setup = __setup_cpu_power4, 203 - .firmware_features = COMMON_PPC64_FW, 204 }, 205 { /* Power5 */ 206 .pvr_mask = 0xffff0000, ··· 214 .icache_bsize = 128, 215 .dcache_bsize = 128, 216 .cpu_setup = __setup_cpu_power4, 217 - .firmware_features = COMMON_PPC64_FW, 218 }, 219 { /* BE DD1.x */ 220 .pvr_mask = 0xffff0000, ··· 228 .icache_bsize = 128, 229 .dcache_bsize = 128, 230 .cpu_setup = __setup_cpu_be, 231 - .firmware_features = COMMON_PPC64_FW, 232 }, 233 { /* default match */ 234 .pvr_mask = 0x00000000, ··· 240 .icache_bsize = 128, 241 .dcache_bsize = 128, 242 .cpu_setup = __setup_cpu_power4, 243 - .firmware_features = COMMON_PPC64_FW, 244 } 245 - }; 246 - 247 - firmware_feature_t firmware_features_table[FIRMWARE_MAX_FEATURES] = { 248 - {FW_FEATURE_PFT, "hcall-pft"}, 249 - {FW_FEATURE_TCE, "hcall-tce"}, 250 - {FW_FEATURE_SPRG0, "hcall-sprg0"}, 251 - {FW_FEATURE_DABR, "hcall-dabr"}, 252 - {FW_FEATURE_COPY, "hcall-copy"}, 253 - {FW_FEATURE_ASR, "hcall-asr"}, 254 - {FW_FEATURE_DEBUG, "hcall-debug"}, 255 - {FW_FEATURE_PERF, "hcall-perf"}, 256 - {FW_FEATURE_DUMP, "hcall-dump"}, 257 - {FW_FEATURE_INTERRUPT, "hcall-interrupt"}, 258 - {FW_FEATURE_MIGRATE, "hcall-migrate"}, 259 - {FW_FEATURE_PERFMON, "hcall-perfmon"}, 260 - {FW_FEATURE_CRQ, "hcall-crq"}, 261 - {FW_FEATURE_VIO, "hcall-vio"}, 262 - {FW_FEATURE_RDMA, "hcall-rdma"}, 263 - {FW_FEATURE_LLAN, "hcall-lLAN"}, 264 - {FW_FEATURE_BULK, "hcall-bulk"}, 265 - {FW_FEATURE_XDABR, "hcall-xdabr"}, 266 - {FW_FEATURE_MULTITCE, "hcall-multi-tce"}, 267 - {FW_FEATURE_SPLPAR, "hcall-splpar"}, 268 };
··· 5 * 6 * Modifications for ppc64: 7 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com> 8 + * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version ··· 60 .icache_bsize = 128, 61 .dcache_bsize = 128, 62 .cpu_setup = __setup_cpu_power3, 63 }, 64 { /* Power3+ */ 65 .pvr_mask = 0xffff0000, ··· 73 .icache_bsize = 128, 74 .dcache_bsize = 128, 75 .cpu_setup = __setup_cpu_power3, 76 }, 77 { /* Northstar */ 78 .pvr_mask = 0xffff0000, ··· 86 .icache_bsize = 128, 87 .dcache_bsize = 128, 88 .cpu_setup = __setup_cpu_power3, 89 }, 90 { /* Pulsar */ 91 .pvr_mask = 0xffff0000, ··· 99 .icache_bsize = 128, 100 .dcache_bsize = 128, 101 .cpu_setup = __setup_cpu_power3, 102 }, 103 { /* I-star */ 104 .pvr_mask = 0xffff0000, ··· 112 .icache_bsize = 128, 113 .dcache_bsize = 128, 114 .cpu_setup = __setup_cpu_power3, 115 }, 116 { /* S-star */ 117 .pvr_mask = 0xffff0000, ··· 125 .icache_bsize = 128, 126 .dcache_bsize = 128, 127 .cpu_setup = __setup_cpu_power3, 128 }, 129 { /* Power4 */ 130 .pvr_mask = 0xffff0000, ··· 138 .icache_bsize = 128, 139 .dcache_bsize = 128, 140 .cpu_setup = __setup_cpu_power4, 141 }, 142 { /* Power4+ */ 143 .pvr_mask = 0xffff0000, ··· 151 .icache_bsize = 128, 152 .dcache_bsize = 128, 153 .cpu_setup = __setup_cpu_power4, 154 }, 155 { /* PPC970 */ 156 .pvr_mask = 0xffff0000, ··· 166 .icache_bsize = 128, 167 .dcache_bsize = 128, 168 .cpu_setup = __setup_cpu_ppc970, 169 }, 170 { /* PPC970FX */ 171 .pvr_mask = 0xffff0000, ··· 181 .icache_bsize = 128, 182 .dcache_bsize = 128, 183 .cpu_setup = __setup_cpu_ppc970, 184 }, 185 { /* PPC970MP */ 186 .pvr_mask = 0xffff0000, ··· 196 .icache_bsize = 128, 197 .dcache_bsize = 128, 198 .cpu_setup = __setup_cpu_ppc970, 199 }, 200 { /* Power5 */ 201 .pvr_mask = 0xffff0000, ··· 211 .icache_bsize = 128, 212 .dcache_bsize = 128, 213 .cpu_setup = __setup_cpu_power4, 214 }, 215 { /* Power5 */ 216 .pvr_mask = 0xffff0000, ··· 226 .icache_bsize = 128, 227 .dcache_bsize = 128, 228 .cpu_setup = __setup_cpu_power4, 229 }, 230 { /* BE DD1.x */ 231 .pvr_mask = 0xffff0000, ··· 241 .icache_bsize = 128, 242 .dcache_bsize = 128, 243 .cpu_setup = __setup_cpu_be, 244 }, 245 { /* default match */ 246 .pvr_mask = 0x00000000, ··· 254 .icache_bsize = 128, 255 .dcache_bsize = 128, 256 .cpu_setup = __setup_cpu_power4, 257 } 258 };
+47
arch/ppc64/kernel/firmware.c
···
··· 1 + /* 2 + * arch/ppc64/kernel/firmware.c 3 + * 4 + * Extracted from cputable.c 5 + * 6 + * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) 7 + * 8 + * Modifications for ppc64: 9 + * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com> 10 + * Copyright (C) 2005 Stephen Rothwell, IBM Corporation 11 + * 12 + * This program is free software; you can redistribute it and/or 13 + * modify it under the terms of the GNU General Public License 14 + * as published by the Free Software Foundation; either version 15 + * 2 of the License, or (at your option) any later version. 16 + */ 17 + 18 + #include <linux/config.h> 19 + 20 + #include <asm/firmware.h> 21 + 22 + unsigned long ppc64_firmware_features; 23 + 24 + #ifdef CONFIG_PPC_PSERIES 25 + firmware_feature_t firmware_features_table[FIRMWARE_MAX_FEATURES] = { 26 + {FW_FEATURE_PFT, "hcall-pft"}, 27 + {FW_FEATURE_TCE, "hcall-tce"}, 28 + {FW_FEATURE_SPRG0, "hcall-sprg0"}, 29 + {FW_FEATURE_DABR, "hcall-dabr"}, 30 + {FW_FEATURE_COPY, "hcall-copy"}, 31 + {FW_FEATURE_ASR, "hcall-asr"}, 32 + {FW_FEATURE_DEBUG, "hcall-debug"}, 33 + {FW_FEATURE_PERF, "hcall-perf"}, 34 + {FW_FEATURE_DUMP, "hcall-dump"}, 35 + {FW_FEATURE_INTERRUPT, "hcall-interrupt"}, 36 + {FW_FEATURE_MIGRATE, "hcall-migrate"}, 37 + {FW_FEATURE_PERFMON, "hcall-perfmon"}, 38 + {FW_FEATURE_CRQ, "hcall-crq"}, 39 + {FW_FEATURE_VIO, "hcall-vio"}, 40 + {FW_FEATURE_RDMA, "hcall-rdma"}, 41 + {FW_FEATURE_LLAN, "hcall-lLAN"}, 42 + {FW_FEATURE_BULK, "hcall-bulk"}, 43 + {FW_FEATURE_XDABR, "hcall-xdabr"}, 44 + {FW_FEATURE_MULTITCE, "hcall-multi-tce"}, 45 + {FW_FEATURE_SPLPAR, "hcall-splpar"}, 46 + }; 47 + #endif
+194 -319
arch/ppc64/kernel/head.S
··· 23 * 2 of the License, or (at your option) any later version. 24 */ 25 26 - #define SECONDARY_PROCESSORS 27 - 28 #include <linux/config.h> 29 #include <linux/threads.h> 30 #include <asm/processor.h> 31 #include <asm/page.h> 32 #include <asm/mmu.h> 33 - #include <asm/naca.h> 34 #include <asm/systemcfg.h> 35 #include <asm/ppc_asm.h> 36 #include <asm/offsets.h> ··· 42 #endif 43 44 /* 45 - * hcall interface to pSeries LPAR 46 - */ 47 - #define H_SET_ASR 0x30 48 - 49 - /* 50 * We layout physical memory as follows: 51 * 0x0000 - 0x00ff : Secondary processor spin code 52 * 0x0100 - 0x2fff : pSeries Interrupt prologs 53 - * 0x3000 - 0x3fff : Interrupt support 54 - * 0x4000 - 0x4fff : NACA 55 - * 0x6000 : iSeries and common interrupt prologs 56 - * 0x9000 - 0x9fff : Initial segment table 57 */ 58 59 /* ··· 86 87 /* Catch branch to 0 in real mode */ 88 trap 89 #ifdef CONFIG_PPC_ISERIES 90 /* 91 * At offset 0x20, there is a pointer to iSeries LPAR data. ··· 96 .llong hvReleaseData-KERNELBASE 97 98 /* 99 - * At offset 0x28 and 0x30 are offsets to the msChunks 100 * array (used by the iSeries LPAR debugger to do translation 101 * between physical addresses and absolute addresses) and 102 * to the pidhash table (also used by the debugger) 103 */ 104 - .llong msChunks-KERNELBASE 105 .llong 0 /* pidhash-KERNELBASE SFRXXX */ 106 107 /* Offset 0x38 - Pointer to start of embedded System.map */ ··· 113 embedded_sysmap_end: 114 .llong 0 115 116 - #else /* CONFIG_PPC_ISERIES */ 117 118 /* Secondary processors spin on this value until it goes to 1. */ 119 .globl __secondary_hold_spinloop ··· 148 std r24,__secondary_hold_acknowledge@l(0) 149 sync 150 151 - /* All secondary cpu's wait here until told to start. */ 152 100: ld r4,__secondary_hold_spinloop@l(0) 153 cmpdi 0,r4,1 154 bne 100b ··· 161 b .pSeries_secondary_smp_init 162 #else 163 BUG_OPCODE 164 - #endif 165 #endif 166 #endif 167 ··· 494 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) 495 STD_EXCEPTION_PSERIES(0x1700, altivec_assist) 496 497 - /* moved from 0xf00 */ 498 - STD_EXCEPTION_PSERIES(0x3000, performance_monitor) 499 500 - . = 0x3100 501 _GLOBAL(do_stab_bolted_pSeries) 502 mtcrf 0x80,r12 503 mfspr r12,SPRG2 504 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) 505 506 - 507 - /* Space for the naca. Architected to be located at real address 508 - * NACA_PHYS_ADDR. Various tools rely on this location being fixed. 509 - * The first dword of the naca is required by iSeries LPAR to 510 - * point to itVpdAreas. On pSeries native, this value is not used. 511 - */ 512 - . = NACA_PHYS_ADDR 513 - .globl __end_interrupts 514 - __end_interrupts: 515 #ifdef CONFIG_PPC_ISERIES 516 - .globl naca 517 - naca: 518 - .llong itVpdAreas 519 - .llong 0 /* xRamDisk */ 520 - .llong 0 /* xRamDiskSize */ 521 - 522 - . = 0x6100 523 - 524 /*** ISeries-LPAR interrupt handlers ***/ 525 526 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC) ··· 622 623 cmpwi 0,r23,0 624 beq iSeries_secondary_smp_loop /* Loop until told to go */ 625 - #ifdef SECONDARY_PROCESSORS 626 bne .__secondary_start /* Loop until told to go */ 627 - #endif 628 iSeries_secondary_smp_loop: 629 /* Let the Hypervisor know we are alive */ 630 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ ··· 665 ld r13,PACA_EXGEN+EX_R13(r13) 666 rfid 667 b . /* prevent speculative execution */ 668 - #endif 669 - 670 - /* 671 - * Data area reserved for FWNMI option. 672 - */ 673 - .= 0x7000 674 - .globl fwnmi_data_area 675 - fwnmi_data_area: 676 - 677 - #ifdef CONFIG_PPC_ISERIES 678 - . = LPARMAP_PHYS 679 - #include "lparmap.s" 680 #endif /* CONFIG_PPC_ISERIES */ 681 - 682 - /* 683 - * Vectors for the FWNMI option. Share common code. 684 - */ 685 - . = 0x8000 686 - .globl system_reset_fwnmi 687 - system_reset_fwnmi: 688 - HMT_MEDIUM 689 - mtspr SPRG1,r13 /* save r13 */ 690 - RUNLATCH_ON(r13) 691 - EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) 692 - .globl machine_check_fwnmi 693 - machine_check_fwnmi: 694 - HMT_MEDIUM 695 - mtspr SPRG1,r13 /* save r13 */ 696 - RUNLATCH_ON(r13) 697 - EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 698 - 699 - /* 700 - * Space for the initial segment table 701 - * For LPAR, the hypervisor must fill in at least one entry 702 - * before we get control (with relocate on) 703 - */ 704 - . = STAB0_PHYS_ADDR 705 - .globl __start_stab 706 - __start_stab: 707 - 708 - . = (STAB0_PHYS_ADDR + PAGE_SIZE) 709 - .globl __end_stab 710 - __end_stab: 711 - 712 713 /*** Common interrupt handlers ***/ 714 ··· 703 * R9 contains the saved CR, r13 points to the paca, 704 * r10 contains the (bad) kernel stack pointer, 705 * r11 and r12 contain the saved SRR0 and SRR1. 706 - * We switch to using the paca guard page as an emergency stack, 707 - * save the registers there, and call kernel_bad_stack(), which panics. 708 */ 709 bad_stack: 710 ld r1,PACAEMERGSP(r13) ··· 857 bl .kernel_fp_unavailable_exception 858 BUG_OPCODE 859 860 .align 7 861 .globl altivec_unavailable_common 862 altivec_unavailable_common: ··· 927 ENABLE_INTS 928 bl .altivec_unavailable_exception 929 b .ret_from_except 930 931 /* 932 * Hash table stuff ··· 1248 bl .unrecoverable_exception 1249 b 1b 1250 1251 1252 /* 1253 * On pSeries, secondary processors spin in the following code. ··· 1303 b .kexec_wait /* next kernel might do better */ 1304 1305 2: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 1306 - /* From now on, r24 is expected to be logica cpuid */ 1307 mr r24,r5 1308 3: HMT_LOW 1309 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ ··· 1316 1317 cmpwi 0,r23,0 1318 #ifdef CONFIG_SMP 1319 - #ifdef SECONDARY_PROCESSORS 1320 bne .__secondary_start 1321 - #endif 1322 #endif 1323 b 3b /* Loop until told to go */ 1324 ··· 1530 1531 .align 8 1532 copy_to_here: 1533 - 1534 - /* 1535 - * load_up_fpu(unused, unused, tsk) 1536 - * Disable FP for the task which had the FPU previously, 1537 - * and save its floating-point registers in its thread_struct. 1538 - * Enables the FPU for use in the kernel on return. 1539 - * On SMP we know the fpu is free, since we give it up every 1540 - * switch (ie, no lazy save of the FP registers). 1541 - * On entry: r13 == 'current' && last_task_used_math != 'current' 1542 - */ 1543 - _STATIC(load_up_fpu) 1544 - mfmsr r5 /* grab the current MSR */ 1545 - ori r5,r5,MSR_FP 1546 - mtmsrd r5 /* enable use of fpu now */ 1547 - isync 1548 - /* 1549 - * For SMP, we don't do lazy FPU switching because it just gets too 1550 - * horrendously complex, especially when a task switches from one CPU 1551 - * to another. Instead we call giveup_fpu in switch_to. 1552 - * 1553 - */ 1554 - #ifndef CONFIG_SMP 1555 - ld r3,last_task_used_math@got(r2) 1556 - ld r4,0(r3) 1557 - cmpdi 0,r4,0 1558 - beq 1f 1559 - /* Save FP state to last_task_used_math's THREAD struct */ 1560 - addi r4,r4,THREAD 1561 - SAVE_32FPRS(0, r4) 1562 - mffs fr0 1563 - stfd fr0,THREAD_FPSCR(r4) 1564 - /* Disable FP for last_task_used_math */ 1565 - ld r5,PT_REGS(r4) 1566 - ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1567 - li r6,MSR_FP|MSR_FE0|MSR_FE1 1568 - andc r4,r4,r6 1569 - std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1570 - 1: 1571 - #endif /* CONFIG_SMP */ 1572 - /* enable use of FP after return */ 1573 - ld r4,PACACURRENT(r13) 1574 - addi r5,r4,THREAD /* Get THREAD */ 1575 - ld r4,THREAD_FPEXC_MODE(r5) 1576 - ori r12,r12,MSR_FP 1577 - or r12,r12,r4 1578 - std r12,_MSR(r1) 1579 - lfd fr0,THREAD_FPSCR(r5) 1580 - mtfsf 0xff,fr0 1581 - REST_32FPRS(0, r5) 1582 - #ifndef CONFIG_SMP 1583 - /* Update last_task_used_math to 'current' */ 1584 - subi r4,r5,THREAD /* Back to 'current' */ 1585 - std r4,0(r3) 1586 - #endif /* CONFIG_SMP */ 1587 - /* restore registers and return */ 1588 - b fast_exception_return 1589 - 1590 - /* 1591 - * disable_kernel_fp() 1592 - * Disable the FPU. 1593 - */ 1594 - _GLOBAL(disable_kernel_fp) 1595 - mfmsr r3 1596 - rldicl r0,r3,(63-MSR_FP_LG),1 1597 - rldicl r3,r0,(MSR_FP_LG+1),0 1598 - mtmsrd r3 /* disable use of fpu now */ 1599 - isync 1600 - blr 1601 - 1602 - /* 1603 - * giveup_fpu(tsk) 1604 - * Disable FP for the task given as the argument, 1605 - * and save the floating-point registers in its thread_struct. 1606 - * Enables the FPU for use in the kernel on return. 1607 - */ 1608 - _GLOBAL(giveup_fpu) 1609 - mfmsr r5 1610 - ori r5,r5,MSR_FP 1611 - mtmsrd r5 /* enable use of fpu now */ 1612 - isync 1613 - cmpdi 0,r3,0 1614 - beqlr- /* if no previous owner, done */ 1615 - addi r3,r3,THREAD /* want THREAD of task */ 1616 - ld r5,PT_REGS(r3) 1617 - cmpdi 0,r5,0 1618 - SAVE_32FPRS(0, r3) 1619 - mffs fr0 1620 - stfd fr0,THREAD_FPSCR(r3) 1621 - beq 1f 1622 - ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1623 - li r3,MSR_FP|MSR_FE0|MSR_FE1 1624 - andc r4,r4,r3 /* disable FP for previous task */ 1625 - std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1626 - 1: 1627 - #ifndef CONFIG_SMP 1628 - li r5,0 1629 - ld r4,last_task_used_math@got(r2) 1630 - std r5,0(r4) 1631 - #endif /* CONFIG_SMP */ 1632 - blr 1633 - 1634 - 1635 - #ifdef CONFIG_ALTIVEC 1636 - 1637 - /* 1638 - * load_up_altivec(unused, unused, tsk) 1639 - * Disable VMX for the task which had it previously, 1640 - * and save its vector registers in its thread_struct. 1641 - * Enables the VMX for use in the kernel on return. 1642 - * On SMP we know the VMX is free, since we give it up every 1643 - * switch (ie, no lazy save of the vector registers). 1644 - * On entry: r13 == 'current' && last_task_used_altivec != 'current' 1645 - */ 1646 - _STATIC(load_up_altivec) 1647 - mfmsr r5 /* grab the current MSR */ 1648 - oris r5,r5,MSR_VEC@h 1649 - mtmsrd r5 /* enable use of VMX now */ 1650 - isync 1651 - 1652 - /* 1653 - * For SMP, we don't do lazy VMX switching because it just gets too 1654 - * horrendously complex, especially when a task switches from one CPU 1655 - * to another. Instead we call giveup_altvec in switch_to. 1656 - * VRSAVE isn't dealt with here, that is done in the normal context 1657 - * switch code. Note that we could rely on vrsave value to eventually 1658 - * avoid saving all of the VREGs here... 1659 - */ 1660 - #ifndef CONFIG_SMP 1661 - ld r3,last_task_used_altivec@got(r2) 1662 - ld r4,0(r3) 1663 - cmpdi 0,r4,0 1664 - beq 1f 1665 - /* Save VMX state to last_task_used_altivec's THREAD struct */ 1666 - addi r4,r4,THREAD 1667 - SAVE_32VRS(0,r5,r4) 1668 - mfvscr vr0 1669 - li r10,THREAD_VSCR 1670 - stvx vr0,r10,r4 1671 - /* Disable VMX for last_task_used_altivec */ 1672 - ld r5,PT_REGS(r4) 1673 - ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1674 - lis r6,MSR_VEC@h 1675 - andc r4,r4,r6 1676 - std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1677 - 1: 1678 - #endif /* CONFIG_SMP */ 1679 - /* Hack: if we get an altivec unavailable trap with VRSAVE 1680 - * set to all zeros, we assume this is a broken application 1681 - * that fails to set it properly, and thus we switch it to 1682 - * all 1's 1683 - */ 1684 - mfspr r4,SPRN_VRSAVE 1685 - cmpdi 0,r4,0 1686 - bne+ 1f 1687 - li r4,-1 1688 - mtspr SPRN_VRSAVE,r4 1689 - 1: 1690 - /* enable use of VMX after return */ 1691 - ld r4,PACACURRENT(r13) 1692 - addi r5,r4,THREAD /* Get THREAD */ 1693 - oris r12,r12,MSR_VEC@h 1694 - std r12,_MSR(r1) 1695 - li r4,1 1696 - li r10,THREAD_VSCR 1697 - stw r4,THREAD_USED_VR(r5) 1698 - lvx vr0,r10,r5 1699 - mtvscr vr0 1700 - REST_32VRS(0,r4,r5) 1701 - #ifndef CONFIG_SMP 1702 - /* Update last_task_used_math to 'current' */ 1703 - subi r4,r5,THREAD /* Back to 'current' */ 1704 - std r4,0(r3) 1705 - #endif /* CONFIG_SMP */ 1706 - /* restore registers and return */ 1707 - b fast_exception_return 1708 - 1709 - /* 1710 - * disable_kernel_altivec() 1711 - * Disable the VMX. 1712 - */ 1713 - _GLOBAL(disable_kernel_altivec) 1714 - mfmsr r3 1715 - rldicl r0,r3,(63-MSR_VEC_LG),1 1716 - rldicl r3,r0,(MSR_VEC_LG+1),0 1717 - mtmsrd r3 /* disable use of VMX now */ 1718 - isync 1719 - blr 1720 - 1721 - /* 1722 - * giveup_altivec(tsk) 1723 - * Disable VMX for the task given as the argument, 1724 - * and save the vector registers in its thread_struct. 1725 - * Enables the VMX for use in the kernel on return. 1726 - */ 1727 - _GLOBAL(giveup_altivec) 1728 - mfmsr r5 1729 - oris r5,r5,MSR_VEC@h 1730 - mtmsrd r5 /* enable use of VMX now */ 1731 - isync 1732 - cmpdi 0,r3,0 1733 - beqlr- /* if no previous owner, done */ 1734 - addi r3,r3,THREAD /* want THREAD of task */ 1735 - ld r5,PT_REGS(r3) 1736 - cmpdi 0,r5,0 1737 - SAVE_32VRS(0,r4,r3) 1738 - mfvscr vr0 1739 - li r4,THREAD_VSCR 1740 - stvx vr0,r4,r3 1741 - beq 1f 1742 - ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1743 - lis r3,MSR_VEC@h 1744 - andc r4,r4,r3 /* disable FP for previous task */ 1745 - std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1746 - 1: 1747 - #ifndef CONFIG_SMP 1748 - li r5,0 1749 - ld r4,last_task_used_altivec@got(r2) 1750 - std r5,0(r4) 1751 - #endif /* CONFIG_SMP */ 1752 - blr 1753 - 1754 - #endif /* CONFIG_ALTIVEC */ 1755 1756 #ifdef CONFIG_SMP 1757 #ifdef CONFIG_PPC_PMAC ··· 1881 1882 bl .start_kernel 1883 1884 - _GLOBAL(__setup_cpu_power3) 1885 - blr 1886 - 1887 _GLOBAL(hmt_init) 1888 #ifdef CONFIG_HMT 1889 LOADADDR(r5, hmt_thread_data) ··· 1971 1972 /* 1973 * We put a few things here that have to be page-aligned. 1974 - * This stuff goes at the beginning of the data segment, 1975 - * which is page-aligned. 1976 */ 1977 - .data 1978 .align 12 1979 - .globl sdata 1980 - sdata: 1981 .globl empty_zero_page 1982 empty_zero_page: 1983 - .space 4096 1984 1985 .globl swapper_pg_dir 1986 swapper_pg_dir: 1987 - .space 4096 1988 1989 /* 1990 * This space gets a copy of optional info passed to us by the bootstrap
··· 23 * 2 of the License, or (at your option) any later version. 24 */ 25 26 #include <linux/config.h> 27 #include <linux/threads.h> 28 #include <asm/processor.h> 29 #include <asm/page.h> 30 #include <asm/mmu.h> 31 #include <asm/systemcfg.h> 32 #include <asm/ppc_asm.h> 33 #include <asm/offsets.h> ··· 45 #endif 46 47 /* 48 * We layout physical memory as follows: 49 * 0x0000 - 0x00ff : Secondary processor spin code 50 * 0x0100 - 0x2fff : pSeries Interrupt prologs 51 + * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs 52 + * 0x6000 - 0x6fff : Initial (CPU0) segment table 53 + * 0x7000 - 0x7fff : FWNMI data area 54 + * 0x8000 - : Early init and support code 55 */ 56 57 /* ··· 94 95 /* Catch branch to 0 in real mode */ 96 trap 97 + 98 #ifdef CONFIG_PPC_ISERIES 99 /* 100 * At offset 0x20, there is a pointer to iSeries LPAR data. ··· 103 .llong hvReleaseData-KERNELBASE 104 105 /* 106 + * At offset 0x28 and 0x30 are offsets to the mschunks_map 107 * array (used by the iSeries LPAR debugger to do translation 108 * between physical addresses and absolute addresses) and 109 * to the pidhash table (also used by the debugger) 110 */ 111 + .llong mschunks_map-KERNELBASE 112 .llong 0 /* pidhash-KERNELBASE SFRXXX */ 113 114 /* Offset 0x38 - Pointer to start of embedded System.map */ ··· 120 embedded_sysmap_end: 121 .llong 0 122 123 + #endif /* CONFIG_PPC_ISERIES */ 124 125 /* Secondary processors spin on this value until it goes to 1. */ 126 .globl __secondary_hold_spinloop ··· 155 std r24,__secondary_hold_acknowledge@l(0) 156 sync 157 158 + /* All secondary cpus wait here until told to start. */ 159 100: ld r4,__secondary_hold_spinloop@l(0) 160 cmpdi 0,r4,1 161 bne 100b ··· 168 b .pSeries_secondary_smp_init 169 #else 170 BUG_OPCODE 171 #endif 172 #endif 173 ··· 502 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) 503 STD_EXCEPTION_PSERIES(0x1700, altivec_assist) 504 505 + . = 0x3000 506 507 + /*** pSeries interrupt support ***/ 508 + 509 + /* moved from 0xf00 */ 510 + STD_EXCEPTION_PSERIES(., performance_monitor) 511 + 512 + .align 7 513 _GLOBAL(do_stab_bolted_pSeries) 514 mtcrf 0x80,r12 515 mfspr r12,SPRG2 516 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) 517 518 + /* 519 + * Vectors for the FWNMI option. Share common code. 520 + */ 521 + .globl system_reset_fwnmi 522 + system_reset_fwnmi: 523 + HMT_MEDIUM 524 + mtspr SPRG1,r13 /* save r13 */ 525 + RUNLATCH_ON(r13) 526 + EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) 527 + 528 + .globl machine_check_fwnmi 529 + machine_check_fwnmi: 530 + HMT_MEDIUM 531 + mtspr SPRG1,r13 /* save r13 */ 532 + RUNLATCH_ON(r13) 533 + EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 534 + 535 #ifdef CONFIG_PPC_ISERIES 536 /*** ISeries-LPAR interrupt handlers ***/ 537 538 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC) ··· 626 627 cmpwi 0,r23,0 628 beq iSeries_secondary_smp_loop /* Loop until told to go */ 629 bne .__secondary_start /* Loop until told to go */ 630 iSeries_secondary_smp_loop: 631 /* Let the Hypervisor know we are alive */ 632 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ ··· 671 ld r13,PACA_EXGEN+EX_R13(r13) 672 rfid 673 b . /* prevent speculative execution */ 674 #endif /* CONFIG_PPC_ISERIES */ 675 676 /*** Common interrupt handlers ***/ 677 ··· 752 * R9 contains the saved CR, r13 points to the paca, 753 * r10 contains the (bad) kernel stack pointer, 754 * r11 and r12 contain the saved SRR0 and SRR1. 755 + * We switch to using an emergency stack, save the registers there, 756 + * and call kernel_bad_stack(), which panics. 757 */ 758 bad_stack: 759 ld r1,PACAEMERGSP(r13) ··· 906 bl .kernel_fp_unavailable_exception 907 BUG_OPCODE 908 909 + /* 910 + * load_up_fpu(unused, unused, tsk) 911 + * Disable FP for the task which had the FPU previously, 912 + * and save its floating-point registers in its thread_struct. 913 + * Enables the FPU for use in the kernel on return. 914 + * On SMP we know the fpu is free, since we give it up every 915 + * switch (ie, no lazy save of the FP registers). 916 + * On entry: r13 == 'current' && last_task_used_math != 'current' 917 + */ 918 + _STATIC(load_up_fpu) 919 + mfmsr r5 /* grab the current MSR */ 920 + ori r5,r5,MSR_FP 921 + mtmsrd r5 /* enable use of fpu now */ 922 + isync 923 + /* 924 + * For SMP, we don't do lazy FPU switching because it just gets too 925 + * horrendously complex, especially when a task switches from one CPU 926 + * to another. Instead we call giveup_fpu in switch_to. 927 + * 928 + */ 929 + #ifndef CONFIG_SMP 930 + ld r3,last_task_used_math@got(r2) 931 + ld r4,0(r3) 932 + cmpdi 0,r4,0 933 + beq 1f 934 + /* Save FP state to last_task_used_math's THREAD struct */ 935 + addi r4,r4,THREAD 936 + SAVE_32FPRS(0, r4) 937 + mffs fr0 938 + stfd fr0,THREAD_FPSCR(r4) 939 + /* Disable FP for last_task_used_math */ 940 + ld r5,PT_REGS(r4) 941 + ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 942 + li r6,MSR_FP|MSR_FE0|MSR_FE1 943 + andc r4,r4,r6 944 + std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 945 + 1: 946 + #endif /* CONFIG_SMP */ 947 + /* enable use of FP after return */ 948 + ld r4,PACACURRENT(r13) 949 + addi r5,r4,THREAD /* Get THREAD */ 950 + ld r4,THREAD_FPEXC_MODE(r5) 951 + ori r12,r12,MSR_FP 952 + or r12,r12,r4 953 + std r12,_MSR(r1) 954 + lfd fr0,THREAD_FPSCR(r5) 955 + mtfsf 0xff,fr0 956 + REST_32FPRS(0, r5) 957 + #ifndef CONFIG_SMP 958 + /* Update last_task_used_math to 'current' */ 959 + subi r4,r5,THREAD /* Back to 'current' */ 960 + std r4,0(r3) 961 + #endif /* CONFIG_SMP */ 962 + /* restore registers and return */ 963 + b fast_exception_return 964 + 965 .align 7 966 .globl altivec_unavailable_common 967 altivec_unavailable_common: ··· 920 ENABLE_INTS 921 bl .altivec_unavailable_exception 922 b .ret_from_except 923 + 924 + #ifdef CONFIG_ALTIVEC 925 + /* 926 + * load_up_altivec(unused, unused, tsk) 927 + * Disable VMX for the task which had it previously, 928 + * and save its vector registers in its thread_struct. 929 + * Enables the VMX for use in the kernel on return. 930 + * On SMP we know the VMX is free, since we give it up every 931 + * switch (ie, no lazy save of the vector registers). 932 + * On entry: r13 == 'current' && last_task_used_altivec != 'current' 933 + */ 934 + _STATIC(load_up_altivec) 935 + mfmsr r5 /* grab the current MSR */ 936 + oris r5,r5,MSR_VEC@h 937 + mtmsrd r5 /* enable use of VMX now */ 938 + isync 939 + 940 + /* 941 + * For SMP, we don't do lazy VMX switching because it just gets too 942 + * horrendously complex, especially when a task switches from one CPU 943 + * to another. Instead we call giveup_altvec in switch_to. 944 + * VRSAVE isn't dealt with here, that is done in the normal context 945 + * switch code. Note that we could rely on vrsave value to eventually 946 + * avoid saving all of the VREGs here... 947 + */ 948 + #ifndef CONFIG_SMP 949 + ld r3,last_task_used_altivec@got(r2) 950 + ld r4,0(r3) 951 + cmpdi 0,r4,0 952 + beq 1f 953 + /* Save VMX state to last_task_used_altivec's THREAD struct */ 954 + addi r4,r4,THREAD 955 + SAVE_32VRS(0,r5,r4) 956 + mfvscr vr0 957 + li r10,THREAD_VSCR 958 + stvx vr0,r10,r4 959 + /* Disable VMX for last_task_used_altivec */ 960 + ld r5,PT_REGS(r4) 961 + ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 962 + lis r6,MSR_VEC@h 963 + andc r4,r4,r6 964 + std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 965 + 1: 966 + #endif /* CONFIG_SMP */ 967 + /* Hack: if we get an altivec unavailable trap with VRSAVE 968 + * set to all zeros, we assume this is a broken application 969 + * that fails to set it properly, and thus we switch it to 970 + * all 1's 971 + */ 972 + mfspr r4,SPRN_VRSAVE 973 + cmpdi 0,r4,0 974 + bne+ 1f 975 + li r4,-1 976 + mtspr SPRN_VRSAVE,r4 977 + 1: 978 + /* enable use of VMX after return */ 979 + ld r4,PACACURRENT(r13) 980 + addi r5,r4,THREAD /* Get THREAD */ 981 + oris r12,r12,MSR_VEC@h 982 + std r12,_MSR(r1) 983 + li r4,1 984 + li r10,THREAD_VSCR 985 + stw r4,THREAD_USED_VR(r5) 986 + lvx vr0,r10,r5 987 + mtvscr vr0 988 + REST_32VRS(0,r4,r5) 989 + #ifndef CONFIG_SMP 990 + /* Update last_task_used_math to 'current' */ 991 + subi r4,r5,THREAD /* Back to 'current' */ 992 + std r4,0(r3) 993 + #endif /* CONFIG_SMP */ 994 + /* restore registers and return */ 995 + b fast_exception_return 996 + #endif /* CONFIG_ALTIVEC */ 997 998 /* 999 * Hash table stuff ··· 1167 bl .unrecoverable_exception 1168 b 1b 1169 1170 + /* 1171 + * Space for CPU0's segment table. 1172 + * 1173 + * On iSeries, the hypervisor must fill in at least one entry before 1174 + * we get control (with relocate on). The address is give to the hv 1175 + * as a page number (see xLparMap in LparData.c), so this must be at a 1176 + * fixed address (the linker can't compute (u64)&initial_stab >> 1177 + * PAGE_SHIFT). 1178 + */ 1179 + . = STAB0_PHYS_ADDR /* 0x6000 */ 1180 + .globl initial_stab 1181 + initial_stab: 1182 + .space 4096 1183 + 1184 + /* 1185 + * Data area reserved for FWNMI option. 1186 + * This address (0x7000) is fixed by the RPA. 1187 + */ 1188 + .= 0x7000 1189 + .globl fwnmi_data_area 1190 + fwnmi_data_area: 1191 + .space PAGE_SIZE 1192 1193 /* 1194 * On pSeries, secondary processors spin in the following code. ··· 1200 b .kexec_wait /* next kernel might do better */ 1201 1202 2: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 1203 + /* From now on, r24 is expected to be logical cpuid */ 1204 mr r24,r5 1205 3: HMT_LOW 1206 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ ··· 1213 1214 cmpwi 0,r23,0 1215 #ifdef CONFIG_SMP 1216 bne .__secondary_start 1217 #endif 1218 b 3b /* Loop until told to go */ 1219 ··· 1429 1430 .align 8 1431 copy_to_here: 1432 1433 #ifdef CONFIG_SMP 1434 #ifdef CONFIG_PPC_PMAC ··· 2002 2003 bl .start_kernel 2004 2005 _GLOBAL(hmt_init) 2006 #ifdef CONFIG_HMT 2007 LOADADDR(r5, hmt_thread_data) ··· 2095 2096 /* 2097 * We put a few things here that have to be page-aligned. 2098 + * This stuff goes at the beginning of the bss, which is page-aligned. 2099 */ 2100 + .section ".bss" 2101 + 2102 .align 12 2103 + 2104 .globl empty_zero_page 2105 empty_zero_page: 2106 + .space PAGE_SIZE 2107 2108 .globl swapper_pg_dir 2109 swapper_pg_dir: 2110 + .space PAGE_SIZE 2111 2112 /* 2113 * This space gets a copy of optional info passed to us by the bootstrap
+4 -1
arch/ppc64/kernel/iSeries_htab.c
··· 41 unsigned long prpn, unsigned long vflags, 42 unsigned long rflags) 43 { 44 long slot; 45 hpte_t lhpte; 46 int secondary = 0; ··· 71 slot &= 0x7fffffffffffffff; 72 } 73 74 lhpte.v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID; 75 - lhpte.r = (physRpn_to_absRpn(prpn) << HPTE_R_RPN_SHIFT) | rflags; 76 77 /* Now fill in the actual HPTE */ 78 HvCallHpt_addValidate(slot, secondary, &lhpte);
··· 41 unsigned long prpn, unsigned long vflags, 42 unsigned long rflags) 43 { 44 + unsigned long arpn; 45 long slot; 46 hpte_t lhpte; 47 int secondary = 0; ··· 70 slot &= 0x7fffffffffffffff; 71 } 72 73 + arpn = phys_to_abs(prpn << PAGE_SHIFT) >> PAGE_SHIFT; 74 + 75 lhpte.v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID; 76 + lhpte.r = (arpn << HPTE_R_RPN_SHIFT) | rflags; 77 78 /* Now fill in the actual HPTE */ 79 HvCallHpt_addValidate(slot, secondary, &lhpte);
+26 -4
arch/ppc64/kernel/iSeries_setup.c
··· 39 #include <asm/cputable.h> 40 #include <asm/sections.h> 41 #include <asm/iommu.h> 42 43 #include <asm/time.h> 44 #include "iSeries_setup.h" ··· 315 316 DBG(" -> iSeries_init_early()\n"); 317 318 ppcdbg_initialize(); 319 320 #if defined(CONFIG_BLK_DEV_INITRD) ··· 415 DBG(" <- iSeries_init_early()\n"); 416 } 417 418 /* 419 * The iSeries may have very large memories ( > 128 GB ) and a partition 420 * may get memory in "chunks" that may be anywhere in the 2**52 real ··· 468 469 /* Chunk size on iSeries is 256K bytes */ 470 totalChunks = (u32)HvLpConfig_getMsChunks(); 471 - klimit = msChunks_alloc(klimit, totalChunks, 1UL << 18); 472 473 /* 474 * Get absolute address of our load area ··· 505 printk("Load area size %dK\n", loadAreaSize * 256); 506 507 for (nextPhysChunk = 0; nextPhysChunk < loadAreaSize; ++nextPhysChunk) 508 - msChunks.abs[nextPhysChunk] = 509 loadAreaFirstChunk + nextPhysChunk; 510 511 /* ··· 514 */ 515 hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress()); 516 hptSizePages = (u32)HvCallHpt_getHptPages(); 517 - hptSizeChunks = hptSizePages >> (msChunks.chunk_shift - PAGE_SHIFT); 518 hptLastChunk = hptFirstChunk + hptSizeChunks - 1; 519 520 printk("HPT absolute addr = %016lx, size = %dK\n", ··· 571 (absChunk > hptLastChunk)) && 572 ((absChunk < loadAreaFirstChunk) || 573 (absChunk > loadAreaLastChunk))) { 574 - msChunks.abs[nextPhysChunk] = absChunk; 575 ++nextPhysChunk; 576 } 577 } ··· 963 ppc_md.get_rtc_time = iSeries_get_rtc_time; 964 ppc_md.calibrate_decr = iSeries_calibrate_decr; 965 ppc_md.progress = iSeries_progress; 966 967 if (get_paca()->lppaca.shared_proc) { 968 ppc_md.idle_loop = iseries_shared_idle;
··· 39 #include <asm/cputable.h> 40 #include <asm/sections.h> 41 #include <asm/iommu.h> 42 + #include <asm/firmware.h> 43 44 #include <asm/time.h> 45 #include "iSeries_setup.h" ··· 314 315 DBG(" -> iSeries_init_early()\n"); 316 317 + ppc64_firmware_features = FW_FEATURE_ISERIES; 318 + 319 ppcdbg_initialize(); 320 321 #if defined(CONFIG_BLK_DEV_INITRD) ··· 412 DBG(" <- iSeries_init_early()\n"); 413 } 414 415 + struct mschunks_map mschunks_map = { 416 + /* XXX We don't use these, but Piranha might need them. */ 417 + .chunk_size = MSCHUNKS_CHUNK_SIZE, 418 + .chunk_shift = MSCHUNKS_CHUNK_SHIFT, 419 + .chunk_mask = MSCHUNKS_OFFSET_MASK, 420 + }; 421 + EXPORT_SYMBOL(mschunks_map); 422 + 423 + void mschunks_alloc(unsigned long num_chunks) 424 + { 425 + klimit = _ALIGN(klimit, sizeof(u32)); 426 + mschunks_map.mapping = (u32 *)klimit; 427 + klimit += num_chunks * sizeof(u32); 428 + mschunks_map.num_chunks = num_chunks; 429 + } 430 + 431 /* 432 * The iSeries may have very large memories ( > 128 GB ) and a partition 433 * may get memory in "chunks" that may be anywhere in the 2**52 real ··· 449 450 /* Chunk size on iSeries is 256K bytes */ 451 totalChunks = (u32)HvLpConfig_getMsChunks(); 452 + mschunks_alloc(totalChunks); 453 454 /* 455 * Get absolute address of our load area ··· 486 printk("Load area size %dK\n", loadAreaSize * 256); 487 488 for (nextPhysChunk = 0; nextPhysChunk < loadAreaSize; ++nextPhysChunk) 489 + mschunks_map.mapping[nextPhysChunk] = 490 loadAreaFirstChunk + nextPhysChunk; 491 492 /* ··· 495 */ 496 hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress()); 497 hptSizePages = (u32)HvCallHpt_getHptPages(); 498 + hptSizeChunks = hptSizePages >> (MSCHUNKS_CHUNK_SHIFT - PAGE_SHIFT); 499 hptLastChunk = hptFirstChunk + hptSizeChunks - 1; 500 501 printk("HPT absolute addr = %016lx, size = %dK\n", ··· 552 (absChunk > hptLastChunk)) && 553 ((absChunk < loadAreaFirstChunk) || 554 (absChunk > loadAreaLastChunk))) { 555 + mschunks_map.mapping[nextPhysChunk] = 556 + absChunk; 557 ++nextPhysChunk; 558 } 559 } ··· 943 ppc_md.get_rtc_time = iSeries_get_rtc_time; 944 ppc_md.calibrate_decr = iSeries_calibrate_decr; 945 ppc_md.progress = iSeries_progress; 946 + 947 + /* XXX Implement enable_pmcs for iSeries */ 948 949 if (get_paca()->lppaca.shared_proc) { 950 ppc_md.idle_loop = iseries_shared_idle;
+144
arch/ppc64/kernel/iSeries_vio.c
···
··· 1 + /* 2 + * IBM PowerPC iSeries Virtual I/O Infrastructure Support. 3 + * 4 + * Copyright (c) 2005 Stephen Rothwell, IBM Corp. 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License 8 + * as published by the Free Software Foundation; either version 9 + * 2 of the License, or (at your option) any later version. 10 + */ 11 + #include <linux/types.h> 12 + #include <linux/device.h> 13 + #include <linux/init.h> 14 + 15 + #include <asm/vio.h> 16 + #include <asm/iommu.h> 17 + #include <asm/abs_addr.h> 18 + #include <asm/page.h> 19 + #include <asm/iSeries/vio.h> 20 + #include <asm/iSeries/HvTypes.h> 21 + #include <asm/iSeries/HvLpConfig.h> 22 + #include <asm/iSeries/HvCallXm.h> 23 + 24 + struct device *iSeries_vio_dev = &vio_bus_device.dev; 25 + EXPORT_SYMBOL(iSeries_vio_dev); 26 + 27 + static struct iommu_table veth_iommu_table; 28 + static struct iommu_table vio_iommu_table; 29 + 30 + static void __init iommu_vio_init(void) 31 + { 32 + struct iommu_table *t; 33 + struct iommu_table_cb cb; 34 + unsigned long cbp; 35 + unsigned long itc_entries; 36 + 37 + cb.itc_busno = 255; /* Bus 255 is the virtual bus */ 38 + cb.itc_virtbus = 0xff; /* Ask for virtual bus */ 39 + 40 + cbp = virt_to_abs(&cb); 41 + HvCallXm_getTceTableParms(cbp); 42 + 43 + itc_entries = cb.itc_size * PAGE_SIZE / sizeof(union tce_entry); 44 + veth_iommu_table.it_size = itc_entries / 2; 45 + veth_iommu_table.it_busno = cb.itc_busno; 46 + veth_iommu_table.it_offset = cb.itc_offset; 47 + veth_iommu_table.it_index = cb.itc_index; 48 + veth_iommu_table.it_type = TCE_VB; 49 + veth_iommu_table.it_blocksize = 1; 50 + 51 + t = iommu_init_table(&veth_iommu_table); 52 + 53 + if (!t) 54 + printk("Virtual Bus VETH TCE table failed.\n"); 55 + 56 + vio_iommu_table.it_size = itc_entries - veth_iommu_table.it_size; 57 + vio_iommu_table.it_busno = cb.itc_busno; 58 + vio_iommu_table.it_offset = cb.itc_offset + 59 + veth_iommu_table.it_size; 60 + vio_iommu_table.it_index = cb.itc_index; 61 + vio_iommu_table.it_type = TCE_VB; 62 + vio_iommu_table.it_blocksize = 1; 63 + 64 + t = iommu_init_table(&vio_iommu_table); 65 + 66 + if (!t) 67 + printk("Virtual Bus VIO TCE table failed.\n"); 68 + } 69 + 70 + /** 71 + * vio_register_device: - Register a new vio device. 72 + * @voidev: The device to register. 73 + */ 74 + static struct vio_dev *__init vio_register_device_iseries(char *type, 75 + uint32_t unit_num) 76 + { 77 + struct vio_dev *viodev; 78 + 79 + /* allocate a vio_dev for this node */ 80 + viodev = kmalloc(sizeof(struct vio_dev), GFP_KERNEL); 81 + if (!viodev) 82 + return NULL; 83 + memset(viodev, 0, sizeof(struct vio_dev)); 84 + 85 + snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%s%d", type, unit_num); 86 + 87 + return vio_register_device_common(viodev, viodev->dev.bus_id, type, 88 + unit_num, &vio_iommu_table); 89 + } 90 + 91 + void __init probe_bus_iseries(void) 92 + { 93 + HvLpIndexMap vlan_map; 94 + struct vio_dev *viodev; 95 + int i; 96 + 97 + /* there is only one of each of these */ 98 + vio_register_device_iseries("viocons", 0); 99 + vio_register_device_iseries("vscsi", 0); 100 + 101 + vlan_map = HvLpConfig_getVirtualLanIndexMap(); 102 + for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) { 103 + if ((vlan_map & (0x8000 >> i)) == 0) 104 + continue; 105 + viodev = vio_register_device_iseries("vlan", i); 106 + /* veth is special and has it own iommu_table */ 107 + viodev->iommu_table = &veth_iommu_table; 108 + } 109 + for (i = 0; i < HVMAXARCHITECTEDVIRTUALDISKS; i++) 110 + vio_register_device_iseries("viodasd", i); 111 + for (i = 0; i < HVMAXARCHITECTEDVIRTUALCDROMS; i++) 112 + vio_register_device_iseries("viocd", i); 113 + for (i = 0; i < HVMAXARCHITECTEDVIRTUALTAPES; i++) 114 + vio_register_device_iseries("viotape", i); 115 + } 116 + 117 + /** 118 + * vio_match_device_iseries: - Tell if a iSeries VIO device matches a 119 + * vio_device_id 120 + */ 121 + static int vio_match_device_iseries(const struct vio_device_id *id, 122 + const struct vio_dev *dev) 123 + { 124 + return strncmp(dev->type, id->type, strlen(id->type)) == 0; 125 + } 126 + 127 + /** 128 + * vio_bus_init_iseries: - Initialize the iSeries virtual IO bus 129 + */ 130 + static int __init vio_bus_init_iseries(void) 131 + { 132 + int err; 133 + 134 + err = vio_bus_init(vio_match_device_iseries, NULL, NULL); 135 + if (err == 0) { 136 + iommu_vio_init(); 137 + vio_bus_device.iommu_table = &vio_iommu_table; 138 + iSeries_vio_dev = &vio_bus_device.dev; 139 + probe_bus_iseries(); 140 + } 141 + return err; 142 + } 143 + 144 + __initcall(vio_bus_init_iseries);
+38 -111
arch/ppc64/kernel/lmb.c
··· 28 { 29 #ifdef DEBUG 30 unsigned long i; 31 - struct lmb *_lmb = &lmb; 32 33 udbg_printf("lmb_dump_all:\n"); 34 udbg_printf(" memory.cnt = 0x%lx\n", 35 - _lmb->memory.cnt); 36 udbg_printf(" memory.size = 0x%lx\n", 37 - _lmb->memory.size); 38 - for (i=0; i < _lmb->memory.cnt ;i++) { 39 udbg_printf(" memory.region[0x%x].base = 0x%lx\n", 40 - i, _lmb->memory.region[i].base); 41 - udbg_printf(" .physbase = 0x%lx\n", 42 - _lmb->memory.region[i].physbase); 43 udbg_printf(" .size = 0x%lx\n", 44 - _lmb->memory.region[i].size); 45 } 46 47 udbg_printf("\n reserved.cnt = 0x%lx\n", 48 - _lmb->reserved.cnt); 49 udbg_printf(" reserved.size = 0x%lx\n", 50 - _lmb->reserved.size); 51 - for (i=0; i < _lmb->reserved.cnt ;i++) { 52 udbg_printf(" reserved.region[0x%x].base = 0x%lx\n", 53 - i, _lmb->reserved.region[i].base); 54 - udbg_printf(" .physbase = 0x%lx\n", 55 - _lmb->reserved.region[i].physbase); 56 udbg_printf(" .size = 0x%lx\n", 57 - _lmb->reserved.region[i].size); 58 } 59 #endif /* DEBUG */ 60 } ··· 93 rgn->region[r1].size += rgn->region[r2].size; 94 for (i=r2; i < rgn->cnt-1; i++) { 95 rgn->region[i].base = rgn->region[i+1].base; 96 - rgn->region[i].physbase = rgn->region[i+1].physbase; 97 rgn->region[i].size = rgn->region[i+1].size; 98 } 99 rgn->cnt--; ··· 102 void __init 103 lmb_init(void) 104 { 105 - struct lmb *_lmb = &lmb; 106 - 107 /* Create a dummy zero size LMB which will get coalesced away later. 108 * This simplifies the lmb_add() code below... 109 */ 110 - _lmb->memory.region[0].base = 0; 111 - _lmb->memory.region[0].size = 0; 112 - _lmb->memory.cnt = 1; 113 114 /* Ditto. */ 115 - _lmb->reserved.region[0].base = 0; 116 - _lmb->reserved.region[0].size = 0; 117 - _lmb->reserved.cnt = 1; 118 } 119 120 /* This routine called with relocation disabled. */ 121 void __init 122 lmb_analyze(void) 123 { 124 - unsigned long i; 125 - unsigned long mem_size = 0; 126 - unsigned long size_mask = 0; 127 - struct lmb *_lmb = &lmb; 128 - #ifdef CONFIG_MSCHUNKS 129 - unsigned long physbase = 0; 130 - #endif 131 132 - for (i=0; i < _lmb->memory.cnt; i++) { 133 - unsigned long lmb_size; 134 135 - lmb_size = _lmb->memory.region[i].size; 136 - 137 - #ifdef CONFIG_MSCHUNKS 138 - _lmb->memory.region[i].physbase = physbase; 139 - physbase += lmb_size; 140 - #else 141 - _lmb->memory.region[i].physbase = _lmb->memory.region[i].base; 142 - #endif 143 - mem_size += lmb_size; 144 - size_mask |= lmb_size; 145 - } 146 - 147 - _lmb->memory.size = mem_size; 148 } 149 150 /* This routine called with relocation disabled. */ ··· 142 adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize); 143 if ( adjacent > 0 ) { 144 rgn->region[i].base -= size; 145 - rgn->region[i].physbase -= size; 146 rgn->region[i].size += size; 147 coalesced++; 148 break; ··· 168 for (i=rgn->cnt-1; i >= 0; i--) { 169 if (base < rgn->region[i].base) { 170 rgn->region[i+1].base = rgn->region[i].base; 171 - rgn->region[i+1].physbase = rgn->region[i].physbase; 172 rgn->region[i+1].size = rgn->region[i].size; 173 } else { 174 rgn->region[i+1].base = base; 175 - rgn->region[i+1].physbase = lmb_abs_to_phys(base); 176 rgn->region[i+1].size = size; 177 break; 178 } ··· 184 long __init 185 lmb_add(unsigned long base, unsigned long size) 186 { 187 - struct lmb *_lmb = &lmb; 188 - struct lmb_region *_rgn = &(_lmb->memory); 189 190 /* On pSeries LPAR systems, the first LMB is our RMO region. */ 191 if ( base == 0 ) 192 - _lmb->rmo_size = size; 193 194 return lmb_add_region(_rgn, base, size); 195 ··· 197 long __init 198 lmb_reserve(unsigned long base, unsigned long size) 199 { 200 - struct lmb *_lmb = &lmb; 201 - struct lmb_region *_rgn = &(_lmb->reserved); 202 203 return lmb_add_region(_rgn, base, size); 204 } ··· 229 { 230 long i, j; 231 unsigned long base = 0; 232 - struct lmb *_lmb = &lmb; 233 - struct lmb_region *_mem = &(_lmb->memory); 234 - struct lmb_region *_rsv = &(_lmb->reserved); 235 236 - for (i=_mem->cnt-1; i >= 0; i--) { 237 - unsigned long lmbbase = _mem->region[i].base; 238 - unsigned long lmbsize = _mem->region[i].size; 239 240 if ( max_addr == LMB_ALLOC_ANYWHERE ) 241 base = _ALIGN_DOWN(lmbbase+lmbsize-size, align); ··· 242 continue; 243 244 while ( (lmbbase <= base) && 245 - ((j = lmb_overlaps_region(_rsv,base,size)) >= 0) ) { 246 - base = _ALIGN_DOWN(_rsv->region[j].base-size, align); 247 } 248 249 if ( (base != 0) && (lmbbase <= base) ) ··· 253 if ( i < 0 ) 254 return 0; 255 256 - lmb_add_region(_rsv, base, size); 257 258 return base; 259 } 260 261 unsigned long __init 262 lmb_phys_mem_size(void) 263 { 264 - struct lmb *_lmb = &lmb; 265 - #ifdef CONFIG_MSCHUNKS 266 - return _lmb->memory.size; 267 - #else 268 - struct lmb_region *_mem = &(_lmb->memory); 269 - unsigned long total = 0; 270 - int i; 271 - 272 - /* add all physical memory to the bootmem map */ 273 - for (i=0; i < _mem->cnt; i++) 274 - total += _mem->region[i].size; 275 - return total; 276 - #endif /* CONFIG_MSCHUNKS */ 277 } 278 279 unsigned long __init 280 lmb_end_of_DRAM(void) 281 { 282 - struct lmb *_lmb = &lmb; 283 - struct lmb_region *_mem = &(_lmb->memory); 284 - int idx = _mem->cnt - 1; 285 286 - #ifdef CONFIG_MSCHUNKS 287 - return (_mem->region[idx].physbase + _mem->region[idx].size); 288 - #else 289 - return (_mem->region[idx].base + _mem->region[idx].size); 290 - #endif /* CONFIG_MSCHUNKS */ 291 - 292 - return 0; 293 - } 294 - 295 - unsigned long __init 296 - lmb_abs_to_phys(unsigned long aa) 297 - { 298 - unsigned long i, pa = aa; 299 - struct lmb *_lmb = &lmb; 300 - struct lmb_region *_mem = &(_lmb->memory); 301 - 302 - for (i=0; i < _mem->cnt; i++) { 303 - unsigned long lmbbase = _mem->region[i].base; 304 - unsigned long lmbsize = _mem->region[i].size; 305 - if ( lmb_addrs_overlap(aa,1,lmbbase,lmbsize) ) { 306 - pa = _mem->region[i].physbase + (aa - lmbbase); 307 - break; 308 - } 309 - } 310 - 311 - return pa; 312 } 313 314 /* ··· 281 { 282 extern unsigned long memory_limit; 283 unsigned long i, limit; 284 - struct lmb_region *mem = &(lmb.memory); 285 286 if (! memory_limit) 287 return; 288 289 limit = memory_limit; 290 - for (i = 0; i < mem->cnt; i++) { 291 - if (limit > mem->region[i].size) { 292 - limit -= mem->region[i].size; 293 continue; 294 } 295 296 - mem->region[i].size = limit; 297 - mem->cnt = i + 1; 298 break; 299 } 300 }
··· 28 { 29 #ifdef DEBUG 30 unsigned long i; 31 32 udbg_printf("lmb_dump_all:\n"); 33 udbg_printf(" memory.cnt = 0x%lx\n", 34 + lmb.memory.cnt); 35 udbg_printf(" memory.size = 0x%lx\n", 36 + lmb.memory.size); 37 + for (i=0; i < lmb.memory.cnt ;i++) { 38 udbg_printf(" memory.region[0x%x].base = 0x%lx\n", 39 + i, lmb.memory.region[i].base); 40 udbg_printf(" .size = 0x%lx\n", 41 + lmb.memory.region[i].size); 42 } 43 44 udbg_printf("\n reserved.cnt = 0x%lx\n", 45 + lmb.reserved.cnt); 46 udbg_printf(" reserved.size = 0x%lx\n", 47 + lmb.reserved.size); 48 + for (i=0; i < lmb.reserved.cnt ;i++) { 49 udbg_printf(" reserved.region[0x%x].base = 0x%lx\n", 50 + i, lmb.reserved.region[i].base); 51 udbg_printf(" .size = 0x%lx\n", 52 + lmb.reserved.region[i].size); 53 } 54 #endif /* DEBUG */ 55 } ··· 98 rgn->region[r1].size += rgn->region[r2].size; 99 for (i=r2; i < rgn->cnt-1; i++) { 100 rgn->region[i].base = rgn->region[i+1].base; 101 rgn->region[i].size = rgn->region[i+1].size; 102 } 103 rgn->cnt--; ··· 108 void __init 109 lmb_init(void) 110 { 111 /* Create a dummy zero size LMB which will get coalesced away later. 112 * This simplifies the lmb_add() code below... 113 */ 114 + lmb.memory.region[0].base = 0; 115 + lmb.memory.region[0].size = 0; 116 + lmb.memory.cnt = 1; 117 118 /* Ditto. */ 119 + lmb.reserved.region[0].base = 0; 120 + lmb.reserved.region[0].size = 0; 121 + lmb.reserved.cnt = 1; 122 } 123 124 /* This routine called with relocation disabled. */ 125 void __init 126 lmb_analyze(void) 127 { 128 + int i; 129 130 + lmb.memory.size = 0; 131 132 + for (i = 0; i < lmb.memory.cnt; i++) 133 + lmb.memory.size += lmb.memory.region[i].size; 134 } 135 136 /* This routine called with relocation disabled. */ ··· 168 adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize); 169 if ( adjacent > 0 ) { 170 rgn->region[i].base -= size; 171 rgn->region[i].size += size; 172 coalesced++; 173 break; ··· 195 for (i=rgn->cnt-1; i >= 0; i--) { 196 if (base < rgn->region[i].base) { 197 rgn->region[i+1].base = rgn->region[i].base; 198 rgn->region[i+1].size = rgn->region[i].size; 199 } else { 200 rgn->region[i+1].base = base; 201 rgn->region[i+1].size = size; 202 break; 203 } ··· 213 long __init 214 lmb_add(unsigned long base, unsigned long size) 215 { 216 + struct lmb_region *_rgn = &(lmb.memory); 217 218 /* On pSeries LPAR systems, the first LMB is our RMO region. */ 219 if ( base == 0 ) 220 + lmb.rmo_size = size; 221 222 return lmb_add_region(_rgn, base, size); 223 ··· 227 long __init 228 lmb_reserve(unsigned long base, unsigned long size) 229 { 230 + struct lmb_region *_rgn = &(lmb.reserved); 231 232 return lmb_add_region(_rgn, base, size); 233 } ··· 260 { 261 long i, j; 262 unsigned long base = 0; 263 264 + for (i=lmb.memory.cnt-1; i >= 0; i--) { 265 + unsigned long lmbbase = lmb.memory.region[i].base; 266 + unsigned long lmbsize = lmb.memory.region[i].size; 267 268 if ( max_addr == LMB_ALLOC_ANYWHERE ) 269 base = _ALIGN_DOWN(lmbbase+lmbsize-size, align); ··· 276 continue; 277 278 while ( (lmbbase <= base) && 279 + ((j = lmb_overlaps_region(&lmb.reserved,base,size)) >= 0) ) { 280 + base = _ALIGN_DOWN(lmb.reserved.region[j].base-size, align); 281 } 282 283 if ( (base != 0) && (lmbbase <= base) ) ··· 287 if ( i < 0 ) 288 return 0; 289 290 + lmb_add_region(&lmb.reserved, base, size); 291 292 return base; 293 } 294 295 + /* You must call lmb_analyze() before this. */ 296 unsigned long __init 297 lmb_phys_mem_size(void) 298 { 299 + return lmb.memory.size; 300 } 301 302 unsigned long __init 303 lmb_end_of_DRAM(void) 304 { 305 + int idx = lmb.memory.cnt - 1; 306 307 + return (lmb.memory.region[idx].base + lmb.memory.region[idx].size); 308 } 309 310 /* ··· 353 { 354 extern unsigned long memory_limit; 355 unsigned long i, limit; 356 357 if (! memory_limit) 358 return; 359 360 limit = memory_limit; 361 + for (i = 0; i < lmb.memory.cnt; i++) { 362 + if (limit > lmb.memory.region[i].size) { 363 + limit -= lmb.memory.region[i].size; 364 continue; 365 } 366 367 + lmb.memory.region[i].size = limit; 368 + lmb.memory.cnt = i + 1; 369 break; 370 } 371 }
+3 -3
arch/ppc64/kernel/lparcfg.c
··· 29 #include <asm/iSeries/HvLpConfig.h> 30 #include <asm/lppaca.h> 31 #include <asm/hvcall.h> 32 - #include <asm/cputable.h> 33 #include <asm/rtas.h> 34 #include <asm/system.h> 35 #include <asm/time.h> ··· 377 378 partition_active_processors = lparcfg_count_active_processors(); 379 380 - if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { 381 unsigned long h_entitled, h_unallocated; 382 unsigned long h_aggregation, h_resource; 383 unsigned long pool_idle_time, pool_procs; ··· 571 mode_t mode = S_IRUSR; 572 573 /* Allow writing if we have FW_FEATURE_SPLPAR */ 574 - if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { 575 lparcfg_fops.write = lparcfg_write; 576 mode |= S_IWUSR; 577 }
··· 29 #include <asm/iSeries/HvLpConfig.h> 30 #include <asm/lppaca.h> 31 #include <asm/hvcall.h> 32 + #include <asm/firmware.h> 33 #include <asm/rtas.h> 34 #include <asm/system.h> 35 #include <asm/time.h> ··· 377 378 partition_active_processors = lparcfg_count_active_processors(); 379 380 + if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 381 unsigned long h_entitled, h_unallocated; 382 unsigned long h_aggregation, h_resource; 383 unsigned long pool_idle_time, pool_procs; ··· 571 mode_t mode = S_IRUSR; 572 573 /* Allow writing if we have FW_FEATURE_SPLPAR */ 574 + if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 575 lparcfg_fops.write = lparcfg_write; 576 mode |= S_IWUSR; 577 }
+98
arch/ppc64/kernel/misc.S
··· 680 ld r30,-16(r1) 681 blr 682 683 /* kexec_wait(phys_cpu) 684 * 685 * wait for the flag to change, indicating this kernel is going away but
··· 680 ld r30,-16(r1) 681 blr 682 683 + /* 684 + * disable_kernel_fp() 685 + * Disable the FPU. 686 + */ 687 + _GLOBAL(disable_kernel_fp) 688 + mfmsr r3 689 + rldicl r0,r3,(63-MSR_FP_LG),1 690 + rldicl r3,r0,(MSR_FP_LG+1),0 691 + mtmsrd r3 /* disable use of fpu now */ 692 + isync 693 + blr 694 + 695 + /* 696 + * giveup_fpu(tsk) 697 + * Disable FP for the task given as the argument, 698 + * and save the floating-point registers in its thread_struct. 699 + * Enables the FPU for use in the kernel on return. 700 + */ 701 + _GLOBAL(giveup_fpu) 702 + mfmsr r5 703 + ori r5,r5,MSR_FP 704 + mtmsrd r5 /* enable use of fpu now */ 705 + isync 706 + cmpdi 0,r3,0 707 + beqlr- /* if no previous owner, done */ 708 + addi r3,r3,THREAD /* want THREAD of task */ 709 + ld r5,PT_REGS(r3) 710 + cmpdi 0,r5,0 711 + SAVE_32FPRS(0, r3) 712 + mffs fr0 713 + stfd fr0,THREAD_FPSCR(r3) 714 + beq 1f 715 + ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 716 + li r3,MSR_FP|MSR_FE0|MSR_FE1 717 + andc r4,r4,r3 /* disable FP for previous task */ 718 + std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 719 + 1: 720 + #ifndef CONFIG_SMP 721 + li r5,0 722 + ld r4,last_task_used_math@got(r2) 723 + std r5,0(r4) 724 + #endif /* CONFIG_SMP */ 725 + blr 726 + 727 + #ifdef CONFIG_ALTIVEC 728 + 729 + #if 0 /* this has no callers for now */ 730 + /* 731 + * disable_kernel_altivec() 732 + * Disable the VMX. 733 + */ 734 + _GLOBAL(disable_kernel_altivec) 735 + mfmsr r3 736 + rldicl r0,r3,(63-MSR_VEC_LG),1 737 + rldicl r3,r0,(MSR_VEC_LG+1),0 738 + mtmsrd r3 /* disable use of VMX now */ 739 + isync 740 + blr 741 + #endif /* 0 */ 742 + 743 + /* 744 + * giveup_altivec(tsk) 745 + * Disable VMX for the task given as the argument, 746 + * and save the vector registers in its thread_struct. 747 + * Enables the VMX for use in the kernel on return. 748 + */ 749 + _GLOBAL(giveup_altivec) 750 + mfmsr r5 751 + oris r5,r5,MSR_VEC@h 752 + mtmsrd r5 /* enable use of VMX now */ 753 + isync 754 + cmpdi 0,r3,0 755 + beqlr- /* if no previous owner, done */ 756 + addi r3,r3,THREAD /* want THREAD of task */ 757 + ld r5,PT_REGS(r3) 758 + cmpdi 0,r5,0 759 + SAVE_32VRS(0,r4,r3) 760 + mfvscr vr0 761 + li r4,THREAD_VSCR 762 + stvx vr0,r4,r3 763 + beq 1f 764 + ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 765 + lis r3,MSR_VEC@h 766 + andc r4,r4,r3 /* disable FP for previous task */ 767 + std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 768 + 1: 769 + #ifndef CONFIG_SMP 770 + li r5,0 771 + ld r4,last_task_used_altivec@got(r2) 772 + std r5,0(r4) 773 + #endif /* CONFIG_SMP */ 774 + blr 775 + 776 + #endif /* CONFIG_ALTIVEC */ 777 + 778 + _GLOBAL(__setup_cpu_power3) 779 + blr 780 + 781 /* kexec_wait(phys_cpu) 782 * 783 * wait for the flag to change, indicating this kernel is going away but
+2 -1
arch/ppc64/kernel/pSeries_iommu.c
··· 45 #include <asm/plpar_wrappers.h> 46 #include <asm/pSeries_reconfig.h> 47 #include <asm/systemcfg.h> 48 #include "pci.h" 49 50 #define DBG(fmt...) ··· 547 } 548 549 if (systemcfg->platform & PLATFORM_LPAR) { 550 - if (cur_cpu_spec->firmware_features & FW_FEATURE_MULTITCE) { 551 ppc_md.tce_build = tce_buildmulti_pSeriesLP; 552 ppc_md.tce_free = tce_freemulti_pSeriesLP; 553 } else {
··· 45 #include <asm/plpar_wrappers.h> 46 #include <asm/pSeries_reconfig.h> 47 #include <asm/systemcfg.h> 48 + #include <asm/firmware.h> 49 #include "pci.h" 50 51 #define DBG(fmt...) ··· 546 } 547 548 if (systemcfg->platform & PLATFORM_LPAR) { 549 + if (firmware_has_feature(FW_FEATURE_MULTITCE)) { 550 ppc_md.tce_build = tce_buildmulti_pSeriesLP; 551 ppc_md.tce_free = tce_freemulti_pSeriesLP; 552 } else {
+1 -3
arch/ppc64/kernel/pSeries_lpar.c
··· 52 EXPORT_SYMBOL(plpar_hcall_norets); 53 EXPORT_SYMBOL(plpar_hcall_8arg_2ret); 54 55 - extern void fw_feature_init(void); 56 extern void pSeries_find_serial_port(void); 57 58 ··· 278 unsigned long va, unsigned long prpn, 279 unsigned long vflags, unsigned long rflags) 280 { 281 - unsigned long arpn = physRpn_to_absRpn(prpn); 282 unsigned long lpar_rc; 283 unsigned long flags; 284 unsigned long slot; ··· 288 if (vflags & HPTE_V_LARGE) 289 hpte_v &= ~(1UL << HPTE_V_AVPN_SHIFT); 290 291 - hpte_r = (arpn << HPTE_R_RPN_SHIFT) | rflags; 292 293 /* Now fill in the actual HPTE */ 294 /* Set CEC cookie to 0 */
··· 52 EXPORT_SYMBOL(plpar_hcall_norets); 53 EXPORT_SYMBOL(plpar_hcall_8arg_2ret); 54 55 extern void pSeries_find_serial_port(void); 56 57 ··· 279 unsigned long va, unsigned long prpn, 280 unsigned long vflags, unsigned long rflags) 281 { 282 unsigned long lpar_rc; 283 unsigned long flags; 284 unsigned long slot; ··· 290 if (vflags & HPTE_V_LARGE) 291 hpte_v &= ~(1UL << HPTE_V_AVPN_SHIFT); 292 293 + hpte_r = (prpn << HPTE_R_RPN_SHIFT) | rflags; 294 295 /* Now fill in the actual HPTE */ 296 /* Set CEC cookie to 0 */
+29 -10
arch/ppc64/kernel/pSeries_setup.c
··· 60 #include <asm/nvram.h> 61 #include <asm/plpar_wrappers.h> 62 #include <asm/xics.h> 63 - #include <asm/cputable.h> 64 65 #include "i8259.h" 66 #include "mpic.h" ··· 188 " MPIC "); 189 } 190 191 static void __init pSeries_setup_arch(void) 192 { 193 /* Fixup ppc_md depending on the type of interrupt controller */ ··· 247 248 pSeries_nvram_init(); 249 250 - if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) 251 - vpa_init(boot_cpuid); 252 - 253 /* Choose an idle loop */ 254 - if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { 255 if (get_paca()->lppaca.shared_proc) { 256 printk(KERN_INFO "Using shared processor idle loop\n"); 257 ppc_md.idle_loop = pseries_shared_idle; ··· 261 printk(KERN_INFO "Using default idle loop\n"); 262 ppc_md.idle_loop = default_idle; 263 } 264 } 265 266 static int __init pSeries_init_panel(void) ··· 279 arch_initcall(pSeries_init_panel); 280 281 282 - /* Build up the firmware_features bitmask field 283 * using contents of device-tree/ibm,hypertas-functions. 284 * Ultimately this functionality may be moved into prom.c prom_init(). 285 */ 286 - void __init fw_feature_init(void) 287 { 288 struct device_node * dn; 289 char * hypertas; ··· 291 292 DBG(" -> fw_feature_init()\n"); 293 294 - cur_cpu_spec->firmware_features = 0; 295 dn = of_find_node_by_path("/rtas"); 296 if (dn == NULL) { 297 printk(KERN_ERR "WARNING ! Cannot find RTAS in device-tree !\n"); ··· 307 if ((firmware_features_table[i].name) && 308 (strcmp(firmware_features_table[i].name,hypertas))==0) { 309 /* we have a match */ 310 - cur_cpu_spec->firmware_features |= 311 (firmware_features_table[i].val); 312 break; 313 } ··· 321 of_node_put(dn); 322 no_rtas: 323 printk(KERN_INFO "firmware_features = 0x%lx\n", 324 - cur_cpu_spec->firmware_features); 325 326 DBG(" <- fw_feature_init()\n"); 327 }
··· 60 #include <asm/nvram.h> 61 #include <asm/plpar_wrappers.h> 62 #include <asm/xics.h> 63 + #include <asm/firmware.h> 64 + #include <asm/pmc.h> 65 66 #include "i8259.h" 67 #include "mpic.h" ··· 187 " MPIC "); 188 } 189 190 + static void pseries_lpar_enable_pmcs(void) 191 + { 192 + unsigned long set, reset; 193 + 194 + power4_enable_pmcs(); 195 + 196 + set = 1UL << 63; 197 + reset = 0; 198 + plpar_hcall_norets(H_PERFMON, set, reset); 199 + 200 + /* instruct hypervisor to maintain PMCs */ 201 + if (firmware_has_feature(FW_FEATURE_SPLPAR)) 202 + get_paca()->lppaca.pmcregs_in_use = 1; 203 + } 204 + 205 static void __init pSeries_setup_arch(void) 206 { 207 /* Fixup ppc_md depending on the type of interrupt controller */ ··· 231 232 pSeries_nvram_init(); 233 234 /* Choose an idle loop */ 235 + if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 236 + vpa_init(boot_cpuid); 237 if (get_paca()->lppaca.shared_proc) { 238 printk(KERN_INFO "Using shared processor idle loop\n"); 239 ppc_md.idle_loop = pseries_shared_idle; ··· 247 printk(KERN_INFO "Using default idle loop\n"); 248 ppc_md.idle_loop = default_idle; 249 } 250 + 251 + if (systemcfg->platform & PLATFORM_LPAR) 252 + ppc_md.enable_pmcs = pseries_lpar_enable_pmcs; 253 + else 254 + ppc_md.enable_pmcs = power4_enable_pmcs; 255 } 256 257 static int __init pSeries_init_panel(void) ··· 260 arch_initcall(pSeries_init_panel); 261 262 263 + /* Build up the ppc64_firmware_features bitmask field 264 * using contents of device-tree/ibm,hypertas-functions. 265 * Ultimately this functionality may be moved into prom.c prom_init(). 266 */ 267 + static void __init fw_feature_init(void) 268 { 269 struct device_node * dn; 270 char * hypertas; ··· 272 273 DBG(" -> fw_feature_init()\n"); 274 275 + ppc64_firmware_features = 0; 276 dn = of_find_node_by_path("/rtas"); 277 if (dn == NULL) { 278 printk(KERN_ERR "WARNING ! Cannot find RTAS in device-tree !\n"); ··· 288 if ((firmware_features_table[i].name) && 289 (strcmp(firmware_features_table[i].name,hypertas))==0) { 290 /* we have a match */ 291 + ppc64_firmware_features |= 292 (firmware_features_table[i].val); 293 break; 294 } ··· 302 of_node_put(dn); 303 no_rtas: 304 printk(KERN_INFO "firmware_features = 0x%lx\n", 305 + ppc64_firmware_features); 306 307 DBG(" <- fw_feature_init()\n"); 308 }
+2 -1
arch/ppc64/kernel/pSeries_smp.c
··· 41 #include <asm/machdep.h> 42 #include <asm/xics.h> 43 #include <asm/cputable.h> 44 #include <asm/system.h> 45 #include <asm/rtas.h> 46 #include <asm/plpar_wrappers.h> ··· 327 if (cpu != boot_cpuid) 328 xics_setup_cpu(); 329 330 - if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) 331 vpa_init(cpu); 332 333 cpu_clear(cpu, of_spin_map);
··· 41 #include <asm/machdep.h> 42 #include <asm/xics.h> 43 #include <asm/cputable.h> 44 + #include <asm/firmware.h> 45 #include <asm/system.h> 46 #include <asm/rtas.h> 47 #include <asm/plpar_wrappers.h> ··· 326 if (cpu != boot_cpuid) 327 xics_setup_cpu(); 328 329 + if (firmware_has_feature(FW_FEATURE_SPLPAR)) 330 vpa_init(cpu); 331 332 cpu_clear(cpu, of_spin_map);
+266
arch/ppc64/kernel/pSeries_vio.c
···
··· 1 + /* 2 + * IBM PowerPC pSeries Virtual I/O Infrastructure Support. 3 + * 4 + * Copyright (c) 2003-2005 IBM Corp. 5 + * Dave Engebretsen engebret@us.ibm.com 6 + * Santiago Leon santil@us.ibm.com 7 + * Hollis Blanchard <hollisb@us.ibm.com> 8 + * Stephen Rothwell 9 + * 10 + * This program is free software; you can redistribute it and/or 11 + * modify it under the terms of the GNU General Public License 12 + * as published by the Free Software Foundation; either version 13 + * 2 of the License, or (at your option) any later version. 14 + */ 15 + 16 + #include <linux/init.h> 17 + #include <linux/module.h> 18 + #include <linux/mm.h> 19 + #include <linux/kobject.h> 20 + #include <asm/iommu.h> 21 + #include <asm/dma.h> 22 + #include <asm/vio.h> 23 + #include <asm/hvcall.h> 24 + 25 + extern struct subsystem devices_subsys; /* needed for vio_find_name() */ 26 + 27 + static void probe_bus_pseries(void) 28 + { 29 + struct device_node *node_vroot, *of_node; 30 + 31 + node_vroot = find_devices("vdevice"); 32 + if ((node_vroot == NULL) || (node_vroot->child == NULL)) 33 + /* this machine doesn't do virtual IO, and that's ok */ 34 + return; 35 + 36 + /* 37 + * Create struct vio_devices for each virtual device in the device tree. 38 + * Drivers will associate with them later. 39 + */ 40 + for (of_node = node_vroot->child; of_node != NULL; 41 + of_node = of_node->sibling) { 42 + printk(KERN_DEBUG "%s: processing %p\n", __FUNCTION__, of_node); 43 + vio_register_device_node(of_node); 44 + } 45 + } 46 + 47 + /** 48 + * vio_match_device_pseries: - Tell if a pSeries VIO device matches a 49 + * vio_device_id 50 + */ 51 + static int vio_match_device_pseries(const struct vio_device_id *id, 52 + const struct vio_dev *dev) 53 + { 54 + return (strncmp(dev->type, id->type, strlen(id->type)) == 0) && 55 + device_is_compatible(dev->dev.platform_data, id->compat); 56 + } 57 + 58 + static void vio_release_device_pseries(struct device *dev) 59 + { 60 + /* XXX free TCE table */ 61 + of_node_put(dev->platform_data); 62 + } 63 + 64 + static ssize_t viodev_show_devspec(struct device *dev, 65 + struct device_attribute *attr, char *buf) 66 + { 67 + struct device_node *of_node = dev->platform_data; 68 + 69 + return sprintf(buf, "%s\n", of_node->full_name); 70 + } 71 + DEVICE_ATTR(devspec, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_devspec, NULL); 72 + 73 + static void vio_unregister_device_pseries(struct vio_dev *viodev) 74 + { 75 + device_remove_file(&viodev->dev, &dev_attr_devspec); 76 + } 77 + 78 + /** 79 + * vio_bus_init_pseries: - Initialize the pSeries virtual IO bus 80 + */ 81 + static int __init vio_bus_init_pseries(void) 82 + { 83 + int err; 84 + 85 + err = vio_bus_init(vio_match_device_pseries, 86 + vio_unregister_device_pseries, 87 + vio_release_device_pseries); 88 + if (err == 0) 89 + probe_bus_pseries(); 90 + return err; 91 + } 92 + 93 + __initcall(vio_bus_init_pseries); 94 + 95 + /** 96 + * vio_build_iommu_table: - gets the dma information from OF and 97 + * builds the TCE tree. 98 + * @dev: the virtual device. 99 + * 100 + * Returns a pointer to the built tce tree, or NULL if it can't 101 + * find property. 102 + */ 103 + static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) 104 + { 105 + unsigned int *dma_window; 106 + struct iommu_table *newTceTable; 107 + unsigned long offset; 108 + int dma_window_property_size; 109 + 110 + dma_window = (unsigned int *) get_property(dev->dev.platform_data, "ibm,my-dma-window", &dma_window_property_size); 111 + if(!dma_window) { 112 + return NULL; 113 + } 114 + 115 + newTceTable = (struct iommu_table *) kmalloc(sizeof(struct iommu_table), GFP_KERNEL); 116 + 117 + /* There should be some code to extract the phys-encoded offset 118 + using prom_n_addr_cells(). However, according to a comment 119 + on earlier versions, it's always zero, so we don't bother */ 120 + offset = dma_window[1] >> PAGE_SHIFT; 121 + 122 + /* TCE table size - measured in tce entries */ 123 + newTceTable->it_size = dma_window[4] >> PAGE_SHIFT; 124 + /* offset for VIO should always be 0 */ 125 + newTceTable->it_offset = offset; 126 + newTceTable->it_busno = 0; 127 + newTceTable->it_index = (unsigned long)dma_window[0]; 128 + newTceTable->it_type = TCE_VB; 129 + 130 + return iommu_init_table(newTceTable); 131 + } 132 + 133 + /** 134 + * vio_register_device_node: - Register a new vio device. 135 + * @of_node: The OF node for this device. 136 + * 137 + * Creates and initializes a vio_dev structure from the data in 138 + * of_node (dev.platform_data) and adds it to the list of virtual devices. 139 + * Returns a pointer to the created vio_dev or NULL if node has 140 + * NULL device_type or compatible fields. 141 + */ 142 + struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node) 143 + { 144 + struct vio_dev *viodev; 145 + unsigned int *unit_address; 146 + unsigned int *irq_p; 147 + 148 + /* we need the 'device_type' property, in order to match with drivers */ 149 + if ((NULL == of_node->type)) { 150 + printk(KERN_WARNING 151 + "%s: node %s missing 'device_type'\n", __FUNCTION__, 152 + of_node->name ? of_node->name : "<unknown>"); 153 + return NULL; 154 + } 155 + 156 + unit_address = (unsigned int *)get_property(of_node, "reg", NULL); 157 + if (!unit_address) { 158 + printk(KERN_WARNING "%s: node %s missing 'reg'\n", __FUNCTION__, 159 + of_node->name ? of_node->name : "<unknown>"); 160 + return NULL; 161 + } 162 + 163 + /* allocate a vio_dev for this node */ 164 + viodev = kmalloc(sizeof(struct vio_dev), GFP_KERNEL); 165 + if (!viodev) { 166 + return NULL; 167 + } 168 + memset(viodev, 0, sizeof(struct vio_dev)); 169 + 170 + viodev->dev.platform_data = of_node_get(of_node); 171 + 172 + viodev->irq = NO_IRQ; 173 + irq_p = (unsigned int *)get_property(of_node, "interrupts", NULL); 174 + if (irq_p) { 175 + int virq = virt_irq_create_mapping(*irq_p); 176 + if (virq == NO_IRQ) { 177 + printk(KERN_ERR "Unable to allocate interrupt " 178 + "number for %s\n", of_node->full_name); 179 + } else 180 + viodev->irq = irq_offset_up(virq); 181 + } 182 + 183 + snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address); 184 + 185 + /* register with generic device framework */ 186 + if (vio_register_device_common(viodev, of_node->name, of_node->type, 187 + *unit_address, vio_build_iommu_table(viodev)) 188 + == NULL) { 189 + /* XXX free TCE table */ 190 + kfree(viodev); 191 + return NULL; 192 + } 193 + device_create_file(&viodev->dev, &dev_attr_devspec); 194 + 195 + return viodev; 196 + } 197 + EXPORT_SYMBOL(vio_register_device_node); 198 + 199 + /** 200 + * vio_get_attribute: - get attribute for virtual device 201 + * @vdev: The vio device to get property. 202 + * @which: The property/attribute to be extracted. 203 + * @length: Pointer to length of returned data size (unused if NULL). 204 + * 205 + * Calls prom.c's get_property() to return the value of the 206 + * attribute specified by the preprocessor constant @which 207 + */ 208 + const void * vio_get_attribute(struct vio_dev *vdev, void* which, int* length) 209 + { 210 + return get_property(vdev->dev.platform_data, (char*)which, length); 211 + } 212 + EXPORT_SYMBOL(vio_get_attribute); 213 + 214 + /* vio_find_name() - internal because only vio.c knows how we formatted the 215 + * kobject name 216 + * XXX once vio_bus_type.devices is actually used as a kset in 217 + * drivers/base/bus.c, this function should be removed in favor of 218 + * "device_find(kobj_name, &vio_bus_type)" 219 + */ 220 + static struct vio_dev *vio_find_name(const char *kobj_name) 221 + { 222 + struct kobject *found; 223 + 224 + found = kset_find_obj(&devices_subsys.kset, kobj_name); 225 + if (!found) 226 + return NULL; 227 + 228 + return to_vio_dev(container_of(found, struct device, kobj)); 229 + } 230 + 231 + /** 232 + * vio_find_node - find an already-registered vio_dev 233 + * @vnode: device_node of the virtual device we're looking for 234 + */ 235 + struct vio_dev *vio_find_node(struct device_node *vnode) 236 + { 237 + uint32_t *unit_address; 238 + char kobj_name[BUS_ID_SIZE]; 239 + 240 + /* construct the kobject name from the device node */ 241 + unit_address = (uint32_t *)get_property(vnode, "reg", NULL); 242 + if (!unit_address) 243 + return NULL; 244 + snprintf(kobj_name, BUS_ID_SIZE, "%x", *unit_address); 245 + 246 + return vio_find_name(kobj_name); 247 + } 248 + EXPORT_SYMBOL(vio_find_node); 249 + 250 + int vio_enable_interrupts(struct vio_dev *dev) 251 + { 252 + int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE); 253 + if (rc != H_Success) 254 + printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc); 255 + return rc; 256 + } 257 + EXPORT_SYMBOL(vio_enable_interrupts); 258 + 259 + int vio_disable_interrupts(struct vio_dev *dev) 260 + { 261 + int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE); 262 + if (rc != H_Success) 263 + printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc); 264 + return rc; 265 + } 266 + EXPORT_SYMBOL(vio_disable_interrupts);
+2 -2
arch/ppc64/kernel/pacaData.c
··· 78 79 #define BOOTCPU_PACA_INIT(number) \ 80 { \ 81 - PACA_INIT_COMMON(number, 1, 0, STAB0_VIRT_ADDR) \ 82 PACA_INIT_ISERIES(number) \ 83 } 84 ··· 90 91 #define BOOTCPU_PACA_INIT(number) \ 92 { \ 93 - PACA_INIT_COMMON(number, 1, STAB0_PHYS_ADDR, STAB0_VIRT_ADDR) \ 94 } 95 #endif 96
··· 78 79 #define BOOTCPU_PACA_INIT(number) \ 80 { \ 81 + PACA_INIT_COMMON(number, 1, 0, (u64)&initial_stab) \ 82 PACA_INIT_ISERIES(number) \ 83 } 84 ··· 90 91 #define BOOTCPU_PACA_INIT(number) \ 92 { \ 93 + PACA_INIT_COMMON(number, 1, STAB0_PHYS_ADDR, (u64)&initial_stab) \ 94 } 95 #endif 96
+2
arch/ppc64/kernel/pmac_setup.c
··· 71 #include <asm/of_device.h> 72 #include <asm/lmb.h> 73 #include <asm/smu.h> 74 75 #include "pmac.h" 76 #include "mpic.h" ··· 512 .progress = pmac_progress, 513 .check_legacy_ioport = pmac_check_legacy_ioport, 514 .idle_loop = native_idle, 515 };
··· 71 #include <asm/of_device.h> 72 #include <asm/lmb.h> 73 #include <asm/smu.h> 74 + #include <asm/pmc.h> 75 76 #include "pmac.h" 77 #include "mpic.h" ··· 511 .progress = pmac_progress, 512 .check_legacy_ioport = pmac_check_legacy_ioport, 513 .idle_loop = native_idle, 514 + .enable_pmcs = power4_enable_pmcs, 515 };
+21
arch/ppc64/kernel/pmc.c
··· 65 spin_unlock(&pmc_owner_lock); 66 } 67 EXPORT_SYMBOL_GPL(release_pmc_hardware);
··· 65 spin_unlock(&pmc_owner_lock); 66 } 67 EXPORT_SYMBOL_GPL(release_pmc_hardware); 68 + 69 + void power4_enable_pmcs(void) 70 + { 71 + unsigned long hid0; 72 + 73 + hid0 = mfspr(HID0); 74 + hid0 |= 1UL << (63 - 20); 75 + 76 + /* POWER4 requires the following sequence */ 77 + asm volatile( 78 + "sync\n" 79 + "mtspr %1, %0\n" 80 + "mfspr %0, %1\n" 81 + "mfspr %0, %1\n" 82 + "mfspr %0, %1\n" 83 + "mfspr %0, %1\n" 84 + "mfspr %0, %1\n" 85 + "mfspr %0, %1\n" 86 + "isync" : "=&r" (hid0) : "i" (HID0), "0" (hid0): 87 + "memory"); 88 + }
+5 -7
arch/ppc64/kernel/process.c
··· 50 #include <asm/machdep.h> 51 #include <asm/iSeries/HvCallHpt.h> 52 #include <asm/cputable.h> 53 #include <asm/sections.h> 54 #include <asm/tlbflush.h> 55 #include <asm/time.h> ··· 203 new_thread = &new->thread; 204 old_thread = &current->thread; 205 206 - /* Collect purr utilization data per process and per processor wise */ 207 - /* purr is nothing but processor time base */ 208 - 209 - #if defined(CONFIG_PPC_PSERIES) 210 - if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { 211 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 212 long unsigned start_tb, current_tb; 213 start_tb = old_thread->start_tb; ··· 214 old_thread->accum_tb += (current_tb - start_tb); 215 new_thread->start_tb = current_tb; 216 } 217 - #endif 218 - 219 220 local_irq_save(flags); 221 last = _switch(old_thread, new_thread);
··· 50 #include <asm/machdep.h> 51 #include <asm/iSeries/HvCallHpt.h> 52 #include <asm/cputable.h> 53 + #include <asm/firmware.h> 54 #include <asm/sections.h> 55 #include <asm/tlbflush.h> 56 #include <asm/time.h> ··· 202 new_thread = &new->thread; 203 old_thread = &current->thread; 204 205 + /* Collect purr utilization data per process and per processor 206 + * wise purr is nothing but processor time base 207 + */ 208 + if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 209 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 210 long unsigned start_tb, current_tb; 211 start_tb = old_thread->start_tb; ··· 214 old_thread->accum_tb += (current_tb - start_tb); 215 new_thread->start_tb = current_tb; 216 } 217 218 local_irq_save(flags); 219 last = _switch(old_thread, new_thread);
+152 -32
arch/ppc64/kernel/prom.c
··· 625 626 static inline char *find_flat_dt_string(u32 offset) 627 { 628 - return ((char *)initial_boot_params) + initial_boot_params->off_dt_strings 629 - + offset; 630 } 631 632 /** ··· 635 * unflatten the tree 636 */ 637 static int __init scan_flat_dt(int (*it)(unsigned long node, 638 - const char *full_path, void *data), 639 void *data) 640 { 641 unsigned long p = ((unsigned long)initial_boot_params) + 642 initial_boot_params->off_dt_struct; 643 int rc = 0; 644 645 do { 646 u32 tag = *((u32 *)p); 647 char *pathp; 648 649 p += 4; 650 - if (tag == OF_DT_END_NODE) 651 continue; 652 if (tag == OF_DT_END) 653 break; 654 if (tag == OF_DT_PROP) { 655 u32 sz = *((u32 *)p); 656 p += 8; 657 - p = _ALIGN(p, sz >= 8 ? 8 : 4); 658 p += sz; 659 p = _ALIGN(p, 4); 660 continue; ··· 671 " device tree !\n", tag); 672 return -EINVAL; 673 } 674 pathp = (char *)p; 675 p = _ALIGN(p + strlen(pathp) + 1, 4); 676 - rc = it(p, pathp, data); 677 if (rc != 0) 678 break; 679 } while(1); ··· 705 const char *nstr; 706 707 p += 4; 708 if (tag != OF_DT_PROP) 709 return NULL; 710 711 sz = *((u32 *)p); 712 noff = *((u32 *)(p + 4)); 713 p += 8; 714 - p = _ALIGN(p, sz >= 8 ? 8 : 4); 715 716 nstr = find_flat_dt_string(noff); 717 if (nstr == NULL) { 718 - printk(KERN_WARNING "Can't find property index name !\n"); 719 return NULL; 720 } 721 if (strcmp(name, nstr) == 0) { ··· 733 } 734 735 static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size, 736 - unsigned long align) 737 { 738 void *res; 739 ··· 747 static unsigned long __init unflatten_dt_node(unsigned long mem, 748 unsigned long *p, 749 struct device_node *dad, 750 - struct device_node ***allnextpp) 751 { 752 struct device_node *np; 753 struct property *pp, **prev_pp = NULL; 754 char *pathp; 755 u32 tag; 756 - unsigned int l; 757 758 tag = *((u32 *)(*p)); 759 if (tag != OF_DT_BEGIN_NODE) { ··· 765 } 766 *p += 4; 767 pathp = (char *)*p; 768 - l = strlen(pathp) + 1; 769 *p = _ALIGN(*p + l, 4); 770 771 - np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + l, 772 __alignof__(struct device_node)); 773 if (allnextpp) { 774 memset(np, 0, sizeof(*np)); 775 np->full_name = ((char*)np) + sizeof(struct device_node); 776 - memcpy(np->full_name, pathp, l); 777 prev_pp = &np->properties; 778 **allnextpp = np; 779 *allnextpp = &np->allnext; 780 if (dad != NULL) { 781 np->parent = dad; 782 - /* we temporarily use the `next' field as `last_child'. */ 783 if (dad->next == 0) 784 dad->child = np; 785 else ··· 834 char *pname; 835 836 tag = *((u32 *)(*p)); 837 if (tag != OF_DT_PROP) 838 break; 839 *p += 4; 840 sz = *((u32 *)(*p)); 841 noff = *((u32 *)((*p) + 4)); 842 - *p = _ALIGN((*p) + 8, sz >= 8 ? 8 : 4); 843 844 pname = find_flat_dt_string(noff); 845 if (pname == NULL) { 846 printk("Can't find property name in list !\n"); 847 break; 848 } 849 l = strlen(pname) + 1; 850 pp = unflatten_dt_alloc(&mem, sizeof(struct property), 851 __alignof__(struct property)); ··· 873 } 874 *p = _ALIGN((*p) + sz, 4); 875 } 876 if (allnextpp) { 877 *prev_pp = NULL; 878 np->name = get_property(np, "name", NULL); ··· 914 np->type = "<NULL>"; 915 } 916 while (tag == OF_DT_BEGIN_NODE) { 917 - mem = unflatten_dt_node(mem, p, np, allnextpp); 918 tag = *((u32 *)(*p)); 919 } 920 if (tag != OF_DT_END_NODE) { 921 - printk("Weird tag at start of node: %x\n", tag); 922 return mem; 923 } 924 *p += 4; ··· 944 /* First pass, scan for size */ 945 start = ((unsigned long)initial_boot_params) + 946 initial_boot_params->off_dt_struct; 947 - size = unflatten_dt_node(0, &start, NULL, NULL); 948 949 DBG(" size is %lx, allocating...\n", size); 950 951 /* Allocate memory for the expanded device tree */ 952 - mem = (unsigned long)abs_to_virt(lmb_alloc(size, 953 - __alignof__(struct device_node))); 954 DBG(" unflattening...\n", mem); 955 956 /* Second pass, do actual unflattening */ 957 start = ((unsigned long)initial_boot_params) + 958 initial_boot_params->off_dt_struct; 959 - unflatten_dt_node(mem, &start, NULL, &allnextp); 960 if (*((u32 *)start) != OF_DT_END) 961 - printk(KERN_WARNING "Weird tag at end of tree: %x\n", *((u32 *)start)); 962 *allnextp = NULL; 963 964 /* Get pointer to OF "/chosen" node for use everywhere */ ··· 993 994 995 static int __init early_init_dt_scan_cpus(unsigned long node, 996 - const char *full_path, void *data) 997 { 998 char *type = get_flat_dt_prop(node, "device_type", NULL); 999 u32 *prop; ··· 1060 } 1061 1062 static int __init early_init_dt_scan_chosen(unsigned long node, 1063 - const char *full_path, void *data) 1064 { 1065 u32 *prop; 1066 u64 *prop64; 1067 extern unsigned long memory_limit, tce_alloc_start, tce_alloc_end; 1068 1069 - if (strcmp(full_path, "/chosen") != 0) 1070 return 0; 1071 1072 /* get platform type */ ··· 1118 } 1119 1120 static int __init early_init_dt_scan_root(unsigned long node, 1121 - const char *full_path, void *data) 1122 { 1123 u32 *prop; 1124 1125 - if (strcmp(full_path, "/") != 0) 1126 return 0; 1127 1128 prop = (u32 *)get_flat_dt_prop(node, "#size-cells", NULL); 1129 dt_root_size_cells = (prop == NULL) ? 1 : *prop; 1130 - 1131 prop = (u32 *)get_flat_dt_prop(node, "#address-cells", NULL); 1132 dt_root_addr_cells = (prop == NULL) ? 2 : *prop; 1133 1134 /* break now */ 1135 return 1; ··· 1159 1160 1161 static int __init early_init_dt_scan_memory(unsigned long node, 1162 - const char *full_path, void *data) 1163 { 1164 char *type = get_flat_dt_prop(node, "device_type", NULL); 1165 cell_t *reg, *endp; ··· 1175 1176 endp = reg + (l / sizeof(cell_t)); 1177 1178 - DBG("memory scan node %s ...\n", full_path); 1179 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { 1180 unsigned long base, size; 1181 ··· 1588 struct device_node *np = allnodes; 1589 1590 read_lock(&devtree_lock); 1591 - for (; np != 0; np = np->allnext) 1592 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0 1593 && of_node_get(np)) 1594 break; 1595 read_unlock(&devtree_lock); 1596 return np; 1597 }
··· 625 626 static inline char *find_flat_dt_string(u32 offset) 627 { 628 + return ((char *)initial_boot_params) + 629 + initial_boot_params->off_dt_strings + offset; 630 } 631 632 /** ··· 635 * unflatten the tree 636 */ 637 static int __init scan_flat_dt(int (*it)(unsigned long node, 638 + const char *uname, int depth, 639 + void *data), 640 void *data) 641 { 642 unsigned long p = ((unsigned long)initial_boot_params) + 643 initial_boot_params->off_dt_struct; 644 int rc = 0; 645 + int depth = -1; 646 647 do { 648 u32 tag = *((u32 *)p); 649 char *pathp; 650 651 p += 4; 652 + if (tag == OF_DT_END_NODE) { 653 + depth --; 654 + continue; 655 + } 656 + if (tag == OF_DT_NOP) 657 continue; 658 if (tag == OF_DT_END) 659 break; 660 if (tag == OF_DT_PROP) { 661 u32 sz = *((u32 *)p); 662 p += 8; 663 + if (initial_boot_params->version < 0x10) 664 + p = _ALIGN(p, sz >= 8 ? 8 : 4); 665 p += sz; 666 p = _ALIGN(p, 4); 667 continue; ··· 664 " device tree !\n", tag); 665 return -EINVAL; 666 } 667 + depth++; 668 pathp = (char *)p; 669 p = _ALIGN(p + strlen(pathp) + 1, 4); 670 + if ((*pathp) == '/') { 671 + char *lp, *np; 672 + for (lp = NULL, np = pathp; *np; np++) 673 + if ((*np) == '/') 674 + lp = np+1; 675 + if (lp != NULL) 676 + pathp = lp; 677 + } 678 + rc = it(p, pathp, depth, data); 679 if (rc != 0) 680 break; 681 } while(1); ··· 689 const char *nstr; 690 691 p += 4; 692 + if (tag == OF_DT_NOP) 693 + continue; 694 if (tag != OF_DT_PROP) 695 return NULL; 696 697 sz = *((u32 *)p); 698 noff = *((u32 *)(p + 4)); 699 p += 8; 700 + if (initial_boot_params->version < 0x10) 701 + p = _ALIGN(p, sz >= 8 ? 8 : 4); 702 703 nstr = find_flat_dt_string(noff); 704 if (nstr == NULL) { 705 + printk(KERN_WARNING "Can't find property index" 706 + " name !\n"); 707 return NULL; 708 } 709 if (strcmp(name, nstr) == 0) { ··· 713 } 714 715 static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size, 716 + unsigned long align) 717 { 718 void *res; 719 ··· 727 static unsigned long __init unflatten_dt_node(unsigned long mem, 728 unsigned long *p, 729 struct device_node *dad, 730 + struct device_node ***allnextpp, 731 + unsigned long fpsize) 732 { 733 struct device_node *np; 734 struct property *pp, **prev_pp = NULL; 735 char *pathp; 736 u32 tag; 737 + unsigned int l, allocl; 738 + int has_name = 0; 739 + int new_format = 0; 740 741 tag = *((u32 *)(*p)); 742 if (tag != OF_DT_BEGIN_NODE) { ··· 742 } 743 *p += 4; 744 pathp = (char *)*p; 745 + l = allocl = strlen(pathp) + 1; 746 *p = _ALIGN(*p + l, 4); 747 748 + /* version 0x10 has a more compact unit name here instead of the full 749 + * path. we accumulate the full path size using "fpsize", we'll rebuild 750 + * it later. We detect this because the first character of the name is 751 + * not '/'. 752 + */ 753 + if ((*pathp) != '/') { 754 + new_format = 1; 755 + if (fpsize == 0) { 756 + /* root node: special case. fpsize accounts for path 757 + * plus terminating zero. root node only has '/', so 758 + * fpsize should be 2, but we want to avoid the first 759 + * level nodes to have two '/' so we use fpsize 1 here 760 + */ 761 + fpsize = 1; 762 + allocl = 2; 763 + } else { 764 + /* account for '/' and path size minus terminal 0 765 + * already in 'l' 766 + */ 767 + fpsize += l; 768 + allocl = fpsize; 769 + } 770 + } 771 + 772 + 773 + np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl, 774 __alignof__(struct device_node)); 775 if (allnextpp) { 776 memset(np, 0, sizeof(*np)); 777 np->full_name = ((char*)np) + sizeof(struct device_node); 778 + if (new_format) { 779 + char *p = np->full_name; 780 + /* rebuild full path for new format */ 781 + if (dad && dad->parent) { 782 + strcpy(p, dad->full_name); 783 + #ifdef DEBUG 784 + if ((strlen(p) + l + 1) != allocl) { 785 + DBG("%s: p: %d, l: %d, a: %d\n", 786 + pathp, strlen(p), l, allocl); 787 + } 788 + #endif 789 + p += strlen(p); 790 + } 791 + *(p++) = '/'; 792 + memcpy(p, pathp, l); 793 + } else 794 + memcpy(np->full_name, pathp, l); 795 prev_pp = &np->properties; 796 **allnextpp = np; 797 *allnextpp = &np->allnext; 798 if (dad != NULL) { 799 np->parent = dad; 800 + /* we temporarily use the next field as `last_child'*/ 801 if (dad->next == 0) 802 dad->child = np; 803 else ··· 770 char *pname; 771 772 tag = *((u32 *)(*p)); 773 + if (tag == OF_DT_NOP) { 774 + *p += 4; 775 + continue; 776 + } 777 if (tag != OF_DT_PROP) 778 break; 779 *p += 4; 780 sz = *((u32 *)(*p)); 781 noff = *((u32 *)((*p) + 4)); 782 + *p += 8; 783 + if (initial_boot_params->version < 0x10) 784 + *p = _ALIGN(*p, sz >= 8 ? 8 : 4); 785 786 pname = find_flat_dt_string(noff); 787 if (pname == NULL) { 788 printk("Can't find property name in list !\n"); 789 break; 790 } 791 + if (strcmp(pname, "name") == 0) 792 + has_name = 1; 793 l = strlen(pname) + 1; 794 pp = unflatten_dt_alloc(&mem, sizeof(struct property), 795 __alignof__(struct property)); ··· 801 } 802 *p = _ALIGN((*p) + sz, 4); 803 } 804 + /* with version 0x10 we may not have the name property, recreate 805 + * it here from the unit name if absent 806 + */ 807 + if (!has_name) { 808 + char *p = pathp, *ps = pathp, *pa = NULL; 809 + int sz; 810 + 811 + while (*p) { 812 + if ((*p) == '@') 813 + pa = p; 814 + if ((*p) == '/') 815 + ps = p + 1; 816 + p++; 817 + } 818 + if (pa < ps) 819 + pa = p; 820 + sz = (pa - ps) + 1; 821 + pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz, 822 + __alignof__(struct property)); 823 + if (allnextpp) { 824 + pp->name = "name"; 825 + pp->length = sz; 826 + pp->value = (unsigned char *)(pp + 1); 827 + *prev_pp = pp; 828 + prev_pp = &pp->next; 829 + memcpy(pp->value, ps, sz - 1); 830 + ((char *)pp->value)[sz - 1] = 0; 831 + DBG("fixed up name for %s -> %s\n", pathp, pp->value); 832 + } 833 + } 834 if (allnextpp) { 835 *prev_pp = NULL; 836 np->name = get_property(np, "name", NULL); ··· 812 np->type = "<NULL>"; 813 } 814 while (tag == OF_DT_BEGIN_NODE) { 815 + mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize); 816 tag = *((u32 *)(*p)); 817 } 818 if (tag != OF_DT_END_NODE) { 819 + printk("Weird tag at end of node: %x\n", tag); 820 return mem; 821 } 822 *p += 4; ··· 842 /* First pass, scan for size */ 843 start = ((unsigned long)initial_boot_params) + 844 initial_boot_params->off_dt_struct; 845 + size = unflatten_dt_node(0, &start, NULL, NULL, 0); 846 + size = (size | 3) + 1; 847 848 DBG(" size is %lx, allocating...\n", size); 849 850 /* Allocate memory for the expanded device tree */ 851 + mem = lmb_alloc(size + 4, __alignof__(struct device_node)); 852 + if (!mem) { 853 + DBG("Couldn't allocate memory with lmb_alloc()!\n"); 854 + panic("Couldn't allocate memory with lmb_alloc()!\n"); 855 + } 856 + mem = (unsigned long)abs_to_virt(mem); 857 + 858 + ((u32 *)mem)[size / 4] = 0xdeadbeef; 859 + 860 DBG(" unflattening...\n", mem); 861 862 /* Second pass, do actual unflattening */ 863 start = ((unsigned long)initial_boot_params) + 864 initial_boot_params->off_dt_struct; 865 + unflatten_dt_node(mem, &start, NULL, &allnextp, 0); 866 if (*((u32 *)start) != OF_DT_END) 867 + printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start)); 868 + if (((u32 *)mem)[size / 4] != 0xdeadbeef) 869 + printk(KERN_WARNING "End of tree marker overwritten: %08x\n", 870 + ((u32 *)mem)[size / 4] ); 871 *allnextp = NULL; 872 873 /* Get pointer to OF "/chosen" node for use everywhere */ ··· 880 881 882 static int __init early_init_dt_scan_cpus(unsigned long node, 883 + const char *uname, int depth, void *data) 884 { 885 char *type = get_flat_dt_prop(node, "device_type", NULL); 886 u32 *prop; ··· 947 } 948 949 static int __init early_init_dt_scan_chosen(unsigned long node, 950 + const char *uname, int depth, void *data) 951 { 952 u32 *prop; 953 u64 *prop64; 954 extern unsigned long memory_limit, tce_alloc_start, tce_alloc_end; 955 956 + DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname); 957 + 958 + if (depth != 1 || strcmp(uname, "chosen") != 0) 959 return 0; 960 961 /* get platform type */ ··· 1003 } 1004 1005 static int __init early_init_dt_scan_root(unsigned long node, 1006 + const char *uname, int depth, void *data) 1007 { 1008 u32 *prop; 1009 1010 + if (depth != 0) 1011 return 0; 1012 1013 prop = (u32 *)get_flat_dt_prop(node, "#size-cells", NULL); 1014 dt_root_size_cells = (prop == NULL) ? 1 : *prop; 1015 + DBG("dt_root_size_cells = %x\n", dt_root_size_cells); 1016 + 1017 prop = (u32 *)get_flat_dt_prop(node, "#address-cells", NULL); 1018 dt_root_addr_cells = (prop == NULL) ? 2 : *prop; 1019 + DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells); 1020 1021 /* break now */ 1022 return 1; ··· 1042 1043 1044 static int __init early_init_dt_scan_memory(unsigned long node, 1045 + const char *uname, int depth, void *data) 1046 { 1047 char *type = get_flat_dt_prop(node, "device_type", NULL); 1048 cell_t *reg, *endp; ··· 1058 1059 endp = reg + (l / sizeof(cell_t)); 1060 1061 + DBG("memory scan node %s ..., reg size %ld, data: %x %x %x %x, ...\n", 1062 + uname, l, reg[0], reg[1], reg[2], reg[3]); 1063 + 1064 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { 1065 unsigned long base, size; 1066 ··· 1469 struct device_node *np = allnodes; 1470 1471 read_lock(&devtree_lock); 1472 + for (; np != 0; np = np->allnext) { 1473 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0 1474 && of_node_get(np)) 1475 break; 1476 + } 1477 read_unlock(&devtree_lock); 1478 return np; 1479 }
+55 -33
arch/ppc64/kernel/prom_init.c
··· 1534 */ 1535 #define MAX_PROPERTY_NAME 64 1536 1537 - static void __init scan_dt_build_strings(phandle node, unsigned long *mem_start, 1538 unsigned long *mem_end) 1539 { 1540 unsigned long offset = reloc_offset(); ··· 1548 /* get and store all property names */ 1549 prev_name = RELOC(""); 1550 for (;;) { 1551 - int rc; 1552 - 1553 /* 64 is max len of name including nul. */ 1554 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1); 1555 - rc = call_prom("nextprop", 3, 1, node, prev_name, namep); 1556 - if (rc != 1) { 1557 /* No more nodes: unwind alloc */ 1558 *mem_start = (unsigned long)namep; 1559 break; 1560 } 1561 soff = dt_find_string(namep); 1562 if (soff != 0) { 1563 *mem_start = (unsigned long)namep; ··· 1577 1578 /* do all our children */ 1579 child = call_prom("child", 1, 1, node); 1580 - while (child != (phandle)0) { 1581 scan_dt_build_strings(child, mem_start, mem_end); 1582 child = call_prom("peer", 1, 1, child); 1583 } ··· 1586 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, 1587 unsigned long *mem_end) 1588 { 1589 - int l, align; 1590 phandle child; 1591 - char *namep, *prev_name, *sstart, *p, *ep; 1592 unsigned long soff; 1593 unsigned char *valp; 1594 unsigned long offset = reloc_offset(); 1595 - char pname[MAX_PROPERTY_NAME]; 1596 - char *path; 1597 - 1598 - path = RELOC(prom_scratch); 1599 1600 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end); 1601 ··· 1602 namep, *mem_end - *mem_start); 1603 if (l >= 0) { 1604 /* Didn't fit? Get more room. */ 1605 - if (l+1 > *mem_end - *mem_start) { 1606 namep = make_room(mem_start, mem_end, l+1, 1); 1607 call_prom("package-to-path", 3, 1, node, namep, l); 1608 } 1609 namep[l] = '\0'; 1610 /* Fixup an Apple bug where they have bogus \0 chars in the 1611 * middle of the path in some properties 1612 */ 1613 for (p = namep, ep = namep + l; p < ep; p++) 1614 if (*p == '\0') { 1615 memmove(p, p+1, ep - p); 1616 - ep--; l--; 1617 } 1618 - *mem_start = _ALIGN(((unsigned long) namep) + strlen(namep) + 1, 4); 1619 } 1620 1621 /* get it again for debugging */ 1622 memset(path, 0, PROM_SCRATCH_SIZE); 1623 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1); 1624 ··· 1636 prev_name = RELOC(""); 1637 sstart = (char *)RELOC(dt_string_start); 1638 for (;;) { 1639 - int rc; 1640 - 1641 - rc = call_prom("nextprop", 3, 1, node, prev_name, pname); 1642 - if (rc != 1) 1643 break; 1644 1645 /* find string offset */ 1646 - soff = dt_find_string(pname); 1647 if (soff == 0) { 1648 - prom_printf("WARNING: Can't find string index for <%s>, node %s\n", 1649 - pname, path); 1650 break; 1651 } 1652 prev_name = sstart + soff; 1653 1654 /* get length */ 1655 - l = call_prom("getproplen", 2, 1, node, pname); 1656 1657 /* sanity checks */ 1658 if (l == PROM_ERROR) ··· 1665 prom_printf("WARNING: ignoring large property "); 1666 /* It seems OF doesn't null-terminate the path :-( */ 1667 prom_printf("[%s] ", path); 1668 - prom_printf("%s length 0x%x\n", pname, l); 1669 continue; 1670 } 1671 ··· 1675 dt_push_token(soff, mem_start, mem_end); 1676 1677 /* push property content */ 1678 - align = (l >= 8) ? 8 : 4; 1679 - valp = make_room(mem_start, mem_end, l, align); 1680 - call_prom("getprop", 4, 1, node, pname, valp, l); 1681 *mem_start = _ALIGN(*mem_start, 4); 1682 } 1683 1684 /* Add a "linux,phandle" property. */ 1685 soff = dt_find_string(RELOC("linux,phandle")); 1686 if (soff == 0) 1687 - prom_printf("WARNING: Can't find string index for <linux-phandle>" 1688 - " node %s\n", path); 1689 else { 1690 dt_push_token(OF_DT_PROP, mem_start, mem_end); 1691 dt_push_token(4, mem_start, mem_end); ··· 1695 1696 /* do all our children */ 1697 child = call_prom("child", 1, 1, node); 1698 - while (child != (phandle)0) { 1699 scan_dt_build_struct(child, mem_start, mem_end); 1700 child = call_prom("peer", 1, 1, child); 1701 } ··· 1734 1735 /* Build header and make room for mem rsv map */ 1736 mem_start = _ALIGN(mem_start, 4); 1737 - hdr = make_room(&mem_start, &mem_end, sizeof(struct boot_param_header), 4); 1738 RELOC(dt_header_start) = (unsigned long)hdr; 1739 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8); 1740 ··· 1748 namep = make_room(&mem_start, &mem_end, 16, 1); 1749 strcpy(namep, RELOC("linux,phandle")); 1750 mem_start = (unsigned long)namep + strlen(namep) + 1; 1751 - RELOC(dt_string_end) = mem_start; 1752 1753 /* Build string array */ 1754 prom_printf("Building dt strings...\n"); 1755 scan_dt_build_strings(root, &mem_start, &mem_end); 1756 1757 /* Build structure */ 1758 mem_start = PAGE_ALIGN(mem_start); ··· 1767 hdr->totalsize = RELOC(dt_struct_end) - RELOC(dt_header_start); 1768 hdr->off_dt_struct = RELOC(dt_struct_start) - RELOC(dt_header_start); 1769 hdr->off_dt_strings = RELOC(dt_string_start) - RELOC(dt_header_start); 1770 hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - RELOC(dt_header_start); 1771 hdr->version = OF_DT_VERSION; 1772 - hdr->last_comp_version = 1; 1773 1774 /* Reserve the whole thing and copy the reserve map in, we 1775 * also bump mem_reserve_cnt to cause further reservations to ··· 1827 /* does it need fixup ? */ 1828 if (prom_getproplen(i2c, "interrupts") > 0) 1829 return; 1830 /* interrupt on this revision of u3 is number 0 and level */ 1831 interrupts[0] = 0; 1832 interrupts[1] = 1;
··· 1534 */ 1535 #define MAX_PROPERTY_NAME 64 1536 1537 + static void __init scan_dt_build_strings(phandle node, 1538 + unsigned long *mem_start, 1539 unsigned long *mem_end) 1540 { 1541 unsigned long offset = reloc_offset(); ··· 1547 /* get and store all property names */ 1548 prev_name = RELOC(""); 1549 for (;;) { 1550 /* 64 is max len of name including nul. */ 1551 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1); 1552 + if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) { 1553 /* No more nodes: unwind alloc */ 1554 *mem_start = (unsigned long)namep; 1555 break; 1556 } 1557 + 1558 + /* skip "name" */ 1559 + if (strcmp(namep, RELOC("name")) == 0) { 1560 + *mem_start = (unsigned long)namep; 1561 + prev_name = RELOC("name"); 1562 + continue; 1563 + } 1564 + /* get/create string entry */ 1565 soff = dt_find_string(namep); 1566 if (soff != 0) { 1567 *mem_start = (unsigned long)namep; ··· 1571 1572 /* do all our children */ 1573 child = call_prom("child", 1, 1, node); 1574 + while (child != 0) { 1575 scan_dt_build_strings(child, mem_start, mem_end); 1576 child = call_prom("peer", 1, 1, child); 1577 } ··· 1580 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, 1581 unsigned long *mem_end) 1582 { 1583 phandle child; 1584 + char *namep, *prev_name, *sstart, *p, *ep, *lp, *path; 1585 unsigned long soff; 1586 unsigned char *valp; 1587 unsigned long offset = reloc_offset(); 1588 + static char pname[MAX_PROPERTY_NAME]; 1589 + int l; 1590 1591 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end); 1592 ··· 1599 namep, *mem_end - *mem_start); 1600 if (l >= 0) { 1601 /* Didn't fit? Get more room. */ 1602 + if ((l+1) > (*mem_end - *mem_start)) { 1603 namep = make_room(mem_start, mem_end, l+1, 1); 1604 call_prom("package-to-path", 3, 1, node, namep, l); 1605 } 1606 namep[l] = '\0'; 1607 + 1608 /* Fixup an Apple bug where they have bogus \0 chars in the 1609 * middle of the path in some properties 1610 */ 1611 for (p = namep, ep = namep + l; p < ep; p++) 1612 if (*p == '\0') { 1613 memmove(p, p+1, ep - p); 1614 + ep--; l--; p--; 1615 } 1616 + 1617 + /* now try to extract the unit name in that mess */ 1618 + for (p = namep, lp = NULL; *p; p++) 1619 + if (*p == '/') 1620 + lp = p + 1; 1621 + if (lp != NULL) 1622 + memmove(namep, lp, strlen(lp) + 1); 1623 + *mem_start = _ALIGN(((unsigned long) namep) + 1624 + strlen(namep) + 1, 4); 1625 } 1626 1627 /* get it again for debugging */ 1628 + path = RELOC(prom_scratch); 1629 memset(path, 0, PROM_SCRATCH_SIZE); 1630 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1); 1631 ··· 1623 prev_name = RELOC(""); 1624 sstart = (char *)RELOC(dt_string_start); 1625 for (;;) { 1626 + if (call_prom("nextprop", 3, 1, node, prev_name, 1627 + RELOC(pname)) != 1) 1628 break; 1629 1630 + /* skip "name" */ 1631 + if (strcmp(RELOC(pname), RELOC("name")) == 0) { 1632 + prev_name = RELOC("name"); 1633 + continue; 1634 + } 1635 + 1636 /* find string offset */ 1637 + soff = dt_find_string(RELOC(pname)); 1638 if (soff == 0) { 1639 + prom_printf("WARNING: Can't find string index for" 1640 + " <%s>, node %s\n", RELOC(pname), path); 1641 break; 1642 } 1643 prev_name = sstart + soff; 1644 1645 /* get length */ 1646 + l = call_prom("getproplen", 2, 1, node, RELOC(pname)); 1647 1648 /* sanity checks */ 1649 if (l == PROM_ERROR) ··· 1648 prom_printf("WARNING: ignoring large property "); 1649 /* It seems OF doesn't null-terminate the path :-( */ 1650 prom_printf("[%s] ", path); 1651 + prom_printf("%s length 0x%x\n", RELOC(pname), l); 1652 continue; 1653 } 1654 ··· 1658 dt_push_token(soff, mem_start, mem_end); 1659 1660 /* push property content */ 1661 + valp = make_room(mem_start, mem_end, l, 4); 1662 + call_prom("getprop", 4, 1, node, RELOC(pname), valp, l); 1663 *mem_start = _ALIGN(*mem_start, 4); 1664 } 1665 1666 /* Add a "linux,phandle" property. */ 1667 soff = dt_find_string(RELOC("linux,phandle")); 1668 if (soff == 0) 1669 + prom_printf("WARNING: Can't find string index for" 1670 + " <linux-phandle> node %s\n", path); 1671 else { 1672 dt_push_token(OF_DT_PROP, mem_start, mem_end); 1673 dt_push_token(4, mem_start, mem_end); ··· 1679 1680 /* do all our children */ 1681 child = call_prom("child", 1, 1, node); 1682 + while (child != 0) { 1683 scan_dt_build_struct(child, mem_start, mem_end); 1684 child = call_prom("peer", 1, 1, child); 1685 } ··· 1718 1719 /* Build header and make room for mem rsv map */ 1720 mem_start = _ALIGN(mem_start, 4); 1721 + hdr = make_room(&mem_start, &mem_end, 1722 + sizeof(struct boot_param_header), 4); 1723 RELOC(dt_header_start) = (unsigned long)hdr; 1724 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8); 1725 ··· 1731 namep = make_room(&mem_start, &mem_end, 16, 1); 1732 strcpy(namep, RELOC("linux,phandle")); 1733 mem_start = (unsigned long)namep + strlen(namep) + 1; 1734 1735 /* Build string array */ 1736 prom_printf("Building dt strings...\n"); 1737 scan_dt_build_strings(root, &mem_start, &mem_end); 1738 + RELOC(dt_string_end) = mem_start; 1739 1740 /* Build structure */ 1741 mem_start = PAGE_ALIGN(mem_start); ··· 1750 hdr->totalsize = RELOC(dt_struct_end) - RELOC(dt_header_start); 1751 hdr->off_dt_struct = RELOC(dt_struct_start) - RELOC(dt_header_start); 1752 hdr->off_dt_strings = RELOC(dt_string_start) - RELOC(dt_header_start); 1753 + hdr->dt_strings_size = RELOC(dt_string_end) - RELOC(dt_string_start); 1754 hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - RELOC(dt_header_start); 1755 hdr->version = OF_DT_VERSION; 1756 + /* Version 16 is not backward compatible */ 1757 + hdr->last_comp_version = 0x10; 1758 1759 /* Reserve the whole thing and copy the reserve map in, we 1760 * also bump mem_reserve_cnt to cause further reservations to ··· 1808 /* does it need fixup ? */ 1809 if (prom_getproplen(i2c, "interrupts") > 0) 1810 return; 1811 + 1812 + prom_printf("fixing up bogus interrupts for u3 i2c...\n"); 1813 + 1814 /* interrupt on this revision of u3 is number 0 and level */ 1815 interrupts[0] = 0; 1816 interrupts[1] = 1;
+17 -2
arch/ppc64/kernel/rtas_pci.c
··· 58 return 0; 59 } 60 61 static int rtas_read_config(struct device_node *dn, int where, int size, u32 *val) 62 { 63 int returnval = -1; ··· 118 119 /* Search only direct children of the bus */ 120 for (dn = busdn->child; dn; dn = dn->sibling) 121 - if (dn->devfn == devfn) 122 return rtas_read_config(dn, where, size, val); 123 return PCIBIOS_DEVICE_NOT_FOUND; 124 } ··· 161 162 /* Search only direct children of the bus */ 163 for (dn = busdn->child; dn; dn = dn->sibling) 164 - if (dn->devfn == devfn) 165 return rtas_write_config(dn, where, size, val); 166 return PCIBIOS_DEVICE_NOT_FOUND; 167 }
··· 58 return 0; 59 } 60 61 + static int of_device_available(struct device_node * dn) 62 + { 63 + char * status; 64 + 65 + status = get_property(dn, "status", NULL); 66 + 67 + if (!status) 68 + return 1; 69 + 70 + if (!strcmp(status, "okay")) 71 + return 1; 72 + 73 + return 0; 74 + } 75 + 76 static int rtas_read_config(struct device_node *dn, int where, int size, u32 *val) 77 { 78 int returnval = -1; ··· 103 104 /* Search only direct children of the bus */ 105 for (dn = busdn->child; dn; dn = dn->sibling) 106 + if (dn->devfn == devfn && of_device_available(dn)) 107 return rtas_read_config(dn, where, size, val); 108 return PCIBIOS_DEVICE_NOT_FOUND; 109 } ··· 146 147 /* Search only direct children of the bus */ 148 for (dn = busdn->child; dn; dn = dn->sibling) 149 + if (dn->devfn == devfn && of_device_available(dn)) 150 return rtas_write_config(dn, where, size, val); 151 return PCIBIOS_DEVICE_NOT_FOUND; 152 }
+17 -11
arch/ppc64/kernel/setup.c
··· 536 537 DBG(" -> check_for_initrd()\n"); 538 539 - prop = (u64 *)get_property(of_chosen, "linux,initrd-start", NULL); 540 - if (prop != NULL) { 541 - initrd_start = (unsigned long)__va(*prop); 542 - prop = (u64 *)get_property(of_chosen, "linux,initrd-end", NULL); 543 if (prop != NULL) { 544 - initrd_end = (unsigned long)__va(*prop); 545 - initrd_below_start_ok = 1; 546 - } else 547 - initrd_start = 0; 548 } 549 550 /* If we were passed an initrd, set the ROOT_DEV properly if the values ··· 631 * Initialize xmon 632 */ 633 #ifdef CONFIG_XMON_DEFAULT 634 - xmon_init(); 635 #endif 636 /* 637 * Register early console ··· 1347 /* ensure xmon is enabled */ 1348 if (p) { 1349 if (strncmp(p, "on", 2) == 0) 1350 - xmon_init(); 1351 if (strncmp(p, "early", 5) != 0) 1352 return 0; 1353 } 1354 - xmon_init(); 1355 debugger(NULL); 1356 1357 return 0;
··· 536 537 DBG(" -> check_for_initrd()\n"); 538 539 + if (of_chosen) { 540 + prop = (u64 *)get_property(of_chosen, 541 + "linux,initrd-start", NULL); 542 if (prop != NULL) { 543 + initrd_start = (unsigned long)__va(*prop); 544 + prop = (u64 *)get_property(of_chosen, 545 + "linux,initrd-end", NULL); 546 + if (prop != NULL) { 547 + initrd_end = (unsigned long)__va(*prop); 548 + initrd_below_start_ok = 1; 549 + } else 550 + initrd_start = 0; 551 + } 552 } 553 554 /* If we were passed an initrd, set the ROOT_DEV properly if the values ··· 627 * Initialize xmon 628 */ 629 #ifdef CONFIG_XMON_DEFAULT 630 + xmon_init(1); 631 #endif 632 /* 633 * Register early console ··· 1343 /* ensure xmon is enabled */ 1344 if (p) { 1345 if (strncmp(p, "on", 2) == 0) 1346 + xmon_init(1); 1347 + if (strncmp(p, "off", 3) == 0) 1348 + xmon_init(0); 1349 if (strncmp(p, "early", 5) != 0) 1350 return 0; 1351 } 1352 + xmon_init(1); 1353 debugger(NULL); 1354 1355 return 0;
+5 -52
arch/ppc64/kernel/sysfs.c
··· 13 #include <asm/current.h> 14 #include <asm/processor.h> 15 #include <asm/cputable.h> 16 #include <asm/hvcall.h> 17 #include <asm/prom.h> 18 #include <asm/systemcfg.h> ··· 101 } 102 __setup("smt-snooze-delay=", setup_smt_snooze_delay); 103 104 /* 105 * Enabling PMCs will slow partition context switch times so we only do 106 * it the first time we write to the PMCs. ··· 112 113 void ppc64_enable_pmcs(void) 114 { 115 - unsigned long hid0; 116 - #ifdef CONFIG_PPC_PSERIES 117 - unsigned long set, reset; 118 - #endif /* CONFIG_PPC_PSERIES */ 119 - 120 /* Only need to enable them once */ 121 if (__get_cpu_var(pmcs_enabled)) 122 return; 123 124 __get_cpu_var(pmcs_enabled) = 1; 125 126 - switch (systemcfg->platform) { 127 - case PLATFORM_PSERIES: 128 - case PLATFORM_POWERMAC: 129 - hid0 = mfspr(HID0); 130 - hid0 |= 1UL << (63 - 20); 131 - 132 - /* POWER4 requires the following sequence */ 133 - asm volatile( 134 - "sync\n" 135 - "mtspr %1, %0\n" 136 - "mfspr %0, %1\n" 137 - "mfspr %0, %1\n" 138 - "mfspr %0, %1\n" 139 - "mfspr %0, %1\n" 140 - "mfspr %0, %1\n" 141 - "mfspr %0, %1\n" 142 - "isync" : "=&r" (hid0) : "i" (HID0), "0" (hid0): 143 - "memory"); 144 - break; 145 - 146 - #ifdef CONFIG_PPC_PSERIES 147 - case PLATFORM_PSERIES_LPAR: 148 - set = 1UL << 63; 149 - reset = 0; 150 - plpar_hcall_norets(H_PERFMON, set, reset); 151 - break; 152 - #endif /* CONFIG_PPC_PSERIES */ 153 - 154 - default: 155 - break; 156 - } 157 - 158 - #ifdef CONFIG_PPC_PSERIES 159 - /* instruct hypervisor to maintain PMCs */ 160 - if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) 161 - get_paca()->lppaca.pmcregs_in_use = 1; 162 - #endif /* CONFIG_PPC_PSERIES */ 163 } 164 - 165 - #else 166 - 167 - /* PMC stuff */ 168 - void ppc64_enable_pmcs(void) 169 - { 170 - /* XXX Implement for iseries */ 171 - } 172 - #endif /* CONFIG_PPC_MULTIPLATFORM */ 173 - 174 EXPORT_SYMBOL(ppc64_enable_pmcs); 175 176 /* XXX convert to rusty's on_one_cpu */
··· 13 #include <asm/current.h> 14 #include <asm/processor.h> 15 #include <asm/cputable.h> 16 + #include <asm/firmware.h> 17 #include <asm/hvcall.h> 18 #include <asm/prom.h> 19 #include <asm/systemcfg.h> ··· 100 } 101 __setup("smt-snooze-delay=", setup_smt_snooze_delay); 102 103 + #endif /* CONFIG_PPC_MULTIPLATFORM */ 104 + 105 /* 106 * Enabling PMCs will slow partition context switch times so we only do 107 * it the first time we write to the PMCs. ··· 109 110 void ppc64_enable_pmcs(void) 111 { 112 /* Only need to enable them once */ 113 if (__get_cpu_var(pmcs_enabled)) 114 return; 115 116 __get_cpu_var(pmcs_enabled) = 1; 117 118 + if (ppc_md.enable_pmcs) 119 + ppc_md.enable_pmcs(); 120 } 121 EXPORT_SYMBOL(ppc64_enable_pmcs); 122 123 /* XXX convert to rusty's on_one_cpu */
+3 -4
arch/ppc64/kernel/time.c
··· 67 #include <asm/prom.h> 68 #include <asm/sections.h> 69 #include <asm/systemcfg.h> 70 71 u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; 72 ··· 371 process_hvlpevents(regs); 372 #endif 373 374 - /* collect purr register values often, for accurate calculations */ 375 - #if defined(CONFIG_PPC_PSERIES) 376 - if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { 377 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 378 cu->current_tb = mfspr(SPRN_PURR); 379 } 380 - #endif 381 382 irq_exit(); 383
··· 67 #include <asm/prom.h> 68 #include <asm/sections.h> 69 #include <asm/systemcfg.h> 70 + #include <asm/firmware.h> 71 72 u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; 73 ··· 370 process_hvlpevents(regs); 371 #endif 372 373 + /* collect purr register values often, for accurate calculations */ 374 + if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 375 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 376 cu->current_tb = mfspr(SPRN_PURR); 377 } 378 379 irq_exit(); 380
+26 -381
arch/ppc64/kernel/vio.c
··· 1 /* 2 * IBM PowerPC Virtual I/O Infrastructure Support. 3 * 4 - * Copyright (c) 2003 IBM Corp. 5 * Dave Engebretsen engebret@us.ibm.com 6 * Santiago Leon santil@us.ibm.com 7 * Hollis Blanchard <hollisb@us.ibm.com> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License ··· 15 16 #include <linux/init.h> 17 #include <linux/console.h> 18 - #include <linux/version.h> 19 #include <linux/module.h> 20 - #include <linux/kobject.h> 21 #include <linux/mm.h> 22 #include <linux/dma-mapping.h> 23 - #include <asm/rtas.h> 24 #include <asm/iommu.h> 25 #include <asm/dma.h> 26 - #include <asm/ppcdebug.h> 27 #include <asm/vio.h> 28 - #include <asm/hvcall.h> 29 - #include <asm/iSeries/vio.h> 30 - #include <asm/iSeries/HvTypes.h> 31 - #include <asm/iSeries/HvCallXm.h> 32 - #include <asm/iSeries/HvLpConfig.h> 33 - 34 - #define DBGENTER() pr_debug("%s entered\n", __FUNCTION__) 35 - 36 - extern struct subsystem devices_subsys; /* needed for vio_find_name() */ 37 38 static const struct vio_device_id *vio_match_device( 39 const struct vio_device_id *, const struct vio_dev *); 40 41 - #ifdef CONFIG_PPC_PSERIES 42 - static struct iommu_table *vio_build_iommu_table(struct vio_dev *); 43 - static int vio_num_address_cells; 44 - #endif 45 - #ifdef CONFIG_PPC_ISERIES 46 - static struct iommu_table veth_iommu_table; 47 - static struct iommu_table vio_iommu_table; 48 - #endif 49 - static struct vio_dev vio_bus_device = { /* fake "parent" device */ 50 .name = vio_bus_device.dev.bus_id, 51 .type = "", 52 - #ifdef CONFIG_PPC_ISERIES 53 - .iommu_table = &vio_iommu_table, 54 - #endif 55 .dev.bus_id = "vio", 56 .dev.bus = &vio_bus_type, 57 }; 58 59 - #ifdef CONFIG_PPC_ISERIES 60 - static struct vio_dev *__init vio_register_device_iseries(char *type, 61 - uint32_t unit_num); 62 - 63 - struct device *iSeries_vio_dev = &vio_bus_device.dev; 64 - EXPORT_SYMBOL(iSeries_vio_dev); 65 - 66 - #define device_is_compatible(a, b) 1 67 - 68 - #endif 69 70 /* convert from struct device to struct vio_dev and pass to driver. 71 * dev->driver has already been set by generic code because vio_bus_match ··· 46 struct vio_driver *viodrv = to_vio_driver(dev->driver); 47 const struct vio_device_id *id; 48 int error = -ENODEV; 49 - 50 - DBGENTER(); 51 52 if (!viodrv->probe) 53 return error; ··· 63 { 64 struct vio_dev *viodev = to_vio_dev(dev); 65 struct vio_driver *viodrv = to_vio_driver(dev->driver); 66 - 67 - DBGENTER(); 68 69 if (viodrv->remove) { 70 return viodrv->remove(viodev); ··· 113 static const struct vio_device_id * vio_match_device(const struct vio_device_id *ids, 114 const struct vio_dev *dev) 115 { 116 - DBGENTER(); 117 - 118 while (ids->type) { 119 - if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) && 120 - device_is_compatible(dev->dev.platform_data, ids->compat)) 121 return ids; 122 ids++; 123 } 124 return NULL; 125 } 126 127 - #ifdef CONFIG_PPC_ISERIES 128 - void __init iommu_vio_init(void) 129 - { 130 - struct iommu_table *t; 131 - struct iommu_table_cb cb; 132 - unsigned long cbp; 133 - unsigned long itc_entries; 134 - 135 - cb.itc_busno = 255; /* Bus 255 is the virtual bus */ 136 - cb.itc_virtbus = 0xff; /* Ask for virtual bus */ 137 - 138 - cbp = virt_to_abs(&cb); 139 - HvCallXm_getTceTableParms(cbp); 140 - 141 - itc_entries = cb.itc_size * PAGE_SIZE / sizeof(union tce_entry); 142 - veth_iommu_table.it_size = itc_entries / 2; 143 - veth_iommu_table.it_busno = cb.itc_busno; 144 - veth_iommu_table.it_offset = cb.itc_offset; 145 - veth_iommu_table.it_index = cb.itc_index; 146 - veth_iommu_table.it_type = TCE_VB; 147 - veth_iommu_table.it_blocksize = 1; 148 - 149 - t = iommu_init_table(&veth_iommu_table); 150 - 151 - if (!t) 152 - printk("Virtual Bus VETH TCE table failed.\n"); 153 - 154 - vio_iommu_table.it_size = itc_entries - veth_iommu_table.it_size; 155 - vio_iommu_table.it_busno = cb.itc_busno; 156 - vio_iommu_table.it_offset = cb.itc_offset + 157 - veth_iommu_table.it_size; 158 - vio_iommu_table.it_index = cb.itc_index; 159 - vio_iommu_table.it_type = TCE_VB; 160 - vio_iommu_table.it_blocksize = 1; 161 - 162 - t = iommu_init_table(&vio_iommu_table); 163 - 164 - if (!t) 165 - printk("Virtual Bus VIO TCE table failed.\n"); 166 - } 167 - #endif 168 - 169 - #ifdef CONFIG_PPC_PSERIES 170 - static void probe_bus_pseries(void) 171 - { 172 - struct device_node *node_vroot, *of_node; 173 - 174 - node_vroot = find_devices("vdevice"); 175 - if ((node_vroot == NULL) || (node_vroot->child == NULL)) 176 - /* this machine doesn't do virtual IO, and that's ok */ 177 - return; 178 - 179 - vio_num_address_cells = prom_n_addr_cells(node_vroot->child); 180 - 181 - /* 182 - * Create struct vio_devices for each virtual device in the device tree. 183 - * Drivers will associate with them later. 184 - */ 185 - for (of_node = node_vroot->child; of_node != NULL; 186 - of_node = of_node->sibling) { 187 - printk(KERN_DEBUG "%s: processing %p\n", __FUNCTION__, of_node); 188 - vio_register_device_node(of_node); 189 - } 190 - } 191 - #endif 192 - 193 - #ifdef CONFIG_PPC_ISERIES 194 - static void probe_bus_iseries(void) 195 - { 196 - HvLpIndexMap vlan_map = HvLpConfig_getVirtualLanIndexMap(); 197 - struct vio_dev *viodev; 198 - int i; 199 - 200 - /* there is only one of each of these */ 201 - vio_register_device_iseries("viocons", 0); 202 - vio_register_device_iseries("vscsi", 0); 203 - 204 - vlan_map = HvLpConfig_getVirtualLanIndexMap(); 205 - for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) { 206 - if ((vlan_map & (0x8000 >> i)) == 0) 207 - continue; 208 - viodev = vio_register_device_iseries("vlan", i); 209 - /* veth is special and has it own iommu_table */ 210 - viodev->iommu_table = &veth_iommu_table; 211 - } 212 - for (i = 0; i < HVMAXARCHITECTEDVIRTUALDISKS; i++) 213 - vio_register_device_iseries("viodasd", i); 214 - for (i = 0; i < HVMAXARCHITECTEDVIRTUALCDROMS; i++) 215 - vio_register_device_iseries("viocd", i); 216 - for (i = 0; i < HVMAXARCHITECTEDVIRTUALTAPES; i++) 217 - vio_register_device_iseries("viotape", i); 218 - } 219 - #endif 220 - 221 /** 222 * vio_bus_init: - Initialize the virtual IO bus 223 */ 224 - static int __init vio_bus_init(void) 225 { 226 int err; 227 228 err = bus_register(&vio_bus_type); 229 if (err) { ··· 141 return err; 142 } 143 144 - /* the fake parent of all vio devices, just to give us a nice directory */ 145 err = device_register(&vio_bus_device.dev); 146 if (err) { 147 - printk(KERN_WARNING "%s: device_register returned %i\n", __FUNCTION__, 148 - err); 149 return err; 150 } 151 - 152 - #ifdef CONFIG_PPC_PSERIES 153 - probe_bus_pseries(); 154 - #endif 155 - #ifdef CONFIG_PPC_ISERIES 156 - probe_bus_iseries(); 157 - #endif 158 159 return 0; 160 } 161 162 - __initcall(vio_bus_init); 163 - 164 /* vio_dev refcount hit 0 */ 165 static void __devinit vio_dev_release(struct device *dev) 166 { 167 - DBGENTER(); 168 - 169 - #ifdef CONFIG_PPC_PSERIES 170 - /* XXX free TCE table */ 171 - of_node_put(dev->platform_data); 172 - #endif 173 kfree(to_vio_dev(dev)); 174 } 175 - 176 - #ifdef CONFIG_PPC_PSERIES 177 - static ssize_t viodev_show_devspec(struct device *dev, struct device_attribute *attr, char *buf) 178 - { 179 - struct device_node *of_node = dev->platform_data; 180 - 181 - return sprintf(buf, "%s\n", of_node->full_name); 182 - } 183 - DEVICE_ATTR(devspec, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_devspec, NULL); 184 - #endif 185 186 static ssize_t viodev_show_name(struct device *dev, struct device_attribute *attr, char *buf) 187 { ··· 168 } 169 DEVICE_ATTR(name, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_name, NULL); 170 171 - static struct vio_dev * __devinit vio_register_device_common( 172 struct vio_dev *viodev, char *name, char *type, 173 uint32_t unit_address, struct iommu_table *iommu_table) 174 { 175 - DBGENTER(); 176 - 177 viodev->name = name; 178 viodev->type = type; 179 viodev->unit_address = unit_address; ··· 192 return viodev; 193 } 194 195 - #ifdef CONFIG_PPC_PSERIES 196 - /** 197 - * vio_register_device_node: - Register a new vio device. 198 - * @of_node: The OF node for this device. 199 - * 200 - * Creates and initializes a vio_dev structure from the data in 201 - * of_node (dev.platform_data) and adds it to the list of virtual devices. 202 - * Returns a pointer to the created vio_dev or NULL if node has 203 - * NULL device_type or compatible fields. 204 - */ 205 - struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node) 206 - { 207 - struct vio_dev *viodev; 208 - unsigned int *unit_address; 209 - unsigned int *irq_p; 210 - 211 - DBGENTER(); 212 - 213 - /* we need the 'device_type' property, in order to match with drivers */ 214 - if ((NULL == of_node->type)) { 215 - printk(KERN_WARNING 216 - "%s: node %s missing 'device_type'\n", __FUNCTION__, 217 - of_node->name ? of_node->name : "<unknown>"); 218 - return NULL; 219 - } 220 - 221 - unit_address = (unsigned int *)get_property(of_node, "reg", NULL); 222 - if (!unit_address) { 223 - printk(KERN_WARNING "%s: node %s missing 'reg'\n", __FUNCTION__, 224 - of_node->name ? of_node->name : "<unknown>"); 225 - return NULL; 226 - } 227 - 228 - /* allocate a vio_dev for this node */ 229 - viodev = kmalloc(sizeof(struct vio_dev), GFP_KERNEL); 230 - if (!viodev) { 231 - return NULL; 232 - } 233 - memset(viodev, 0, sizeof(struct vio_dev)); 234 - 235 - viodev->dev.platform_data = of_node_get(of_node); 236 - 237 - viodev->irq = NO_IRQ; 238 - irq_p = (unsigned int *)get_property(of_node, "interrupts", NULL); 239 - if (irq_p) { 240 - int virq = virt_irq_create_mapping(*irq_p); 241 - if (virq == NO_IRQ) { 242 - printk(KERN_ERR "Unable to allocate interrupt " 243 - "number for %s\n", of_node->full_name); 244 - } else 245 - viodev->irq = irq_offset_up(virq); 246 - } 247 - 248 - snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address); 249 - 250 - /* register with generic device framework */ 251 - if (vio_register_device_common(viodev, of_node->name, of_node->type, 252 - *unit_address, vio_build_iommu_table(viodev)) 253 - == NULL) { 254 - /* XXX free TCE table */ 255 - kfree(viodev); 256 - return NULL; 257 - } 258 - device_create_file(&viodev->dev, &dev_attr_devspec); 259 - 260 - return viodev; 261 - } 262 - EXPORT_SYMBOL(vio_register_device_node); 263 - #endif 264 - 265 - #ifdef CONFIG_PPC_ISERIES 266 - /** 267 - * vio_register_device: - Register a new vio device. 268 - * @voidev: The device to register. 269 - */ 270 - static struct vio_dev *__init vio_register_device_iseries(char *type, 271 - uint32_t unit_num) 272 - { 273 - struct vio_dev *viodev; 274 - 275 - DBGENTER(); 276 - 277 - /* allocate a vio_dev for this node */ 278 - viodev = kmalloc(sizeof(struct vio_dev), GFP_KERNEL); 279 - if (!viodev) 280 - return NULL; 281 - memset(viodev, 0, sizeof(struct vio_dev)); 282 - 283 - snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%s%d", type, unit_num); 284 - 285 - return vio_register_device_common(viodev, viodev->dev.bus_id, type, 286 - unit_num, &vio_iommu_table); 287 - } 288 - #endif 289 - 290 void __devinit vio_unregister_device(struct vio_dev *viodev) 291 { 292 - DBGENTER(); 293 - #ifdef CONFIG_PPC_PSERIES 294 - device_remove_file(&viodev->dev, &dev_attr_devspec); 295 - #endif 296 device_remove_file(&viodev->dev, &dev_attr_name); 297 device_unregister(&viodev->dev); 298 } 299 EXPORT_SYMBOL(vio_unregister_device); 300 - 301 - #ifdef CONFIG_PPC_PSERIES 302 - /** 303 - * vio_get_attribute: - get attribute for virtual device 304 - * @vdev: The vio device to get property. 305 - * @which: The property/attribute to be extracted. 306 - * @length: Pointer to length of returned data size (unused if NULL). 307 - * 308 - * Calls prom.c's get_property() to return the value of the 309 - * attribute specified by the preprocessor constant @which 310 - */ 311 - const void * vio_get_attribute(struct vio_dev *vdev, void* which, int* length) 312 - { 313 - return get_property(vdev->dev.platform_data, (char*)which, length); 314 - } 315 - EXPORT_SYMBOL(vio_get_attribute); 316 - 317 - /* vio_find_name() - internal because only vio.c knows how we formatted the 318 - * kobject name 319 - * XXX once vio_bus_type.devices is actually used as a kset in 320 - * drivers/base/bus.c, this function should be removed in favor of 321 - * "device_find(kobj_name, &vio_bus_type)" 322 - */ 323 - static struct vio_dev *vio_find_name(const char *kobj_name) 324 - { 325 - struct kobject *found; 326 - 327 - found = kset_find_obj(&devices_subsys.kset, kobj_name); 328 - if (!found) 329 - return NULL; 330 - 331 - return to_vio_dev(container_of(found, struct device, kobj)); 332 - } 333 - 334 - /** 335 - * vio_find_node - find an already-registered vio_dev 336 - * @vnode: device_node of the virtual device we're looking for 337 - */ 338 - struct vio_dev *vio_find_node(struct device_node *vnode) 339 - { 340 - uint32_t *unit_address; 341 - char kobj_name[BUS_ID_SIZE]; 342 - 343 - /* construct the kobject name from the device node */ 344 - unit_address = (uint32_t *)get_property(vnode, "reg", NULL); 345 - if (!unit_address) 346 - return NULL; 347 - snprintf(kobj_name, BUS_ID_SIZE, "%x", *unit_address); 348 - 349 - return vio_find_name(kobj_name); 350 - } 351 - EXPORT_SYMBOL(vio_find_node); 352 - 353 - /** 354 - * vio_build_iommu_table: - gets the dma information from OF and builds the TCE tree. 355 - * @dev: the virtual device. 356 - * 357 - * Returns a pointer to the built tce tree, or NULL if it can't 358 - * find property. 359 - */ 360 - static struct iommu_table * vio_build_iommu_table(struct vio_dev *dev) 361 - { 362 - unsigned int *dma_window; 363 - struct iommu_table *newTceTable; 364 - unsigned long offset; 365 - int dma_window_property_size; 366 - 367 - dma_window = (unsigned int *) get_property(dev->dev.platform_data, "ibm,my-dma-window", &dma_window_property_size); 368 - if(!dma_window) { 369 - return NULL; 370 - } 371 - 372 - newTceTable = (struct iommu_table *) kmalloc(sizeof(struct iommu_table), GFP_KERNEL); 373 - 374 - /* There should be some code to extract the phys-encoded offset 375 - using prom_n_addr_cells(). However, according to a comment 376 - on earlier versions, it's always zero, so we don't bother */ 377 - offset = dma_window[1] >> PAGE_SHIFT; 378 - 379 - /* TCE table size - measured in tce entries */ 380 - newTceTable->it_size = dma_window[4] >> PAGE_SHIFT; 381 - /* offset for VIO should always be 0 */ 382 - newTceTable->it_offset = offset; 383 - newTceTable->it_busno = 0; 384 - newTceTable->it_index = (unsigned long)dma_window[0]; 385 - newTceTable->it_type = TCE_VB; 386 - 387 - return iommu_init_table(newTceTable); 388 - } 389 - 390 - int vio_enable_interrupts(struct vio_dev *dev) 391 - { 392 - int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE); 393 - if (rc != H_Success) { 394 - printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc); 395 - } 396 - return rc; 397 - } 398 - EXPORT_SYMBOL(vio_enable_interrupts); 399 - 400 - int vio_disable_interrupts(struct vio_dev *dev) 401 - { 402 - int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE); 403 - if (rc != H_Success) { 404 - printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc); 405 - } 406 - return rc; 407 - } 408 - EXPORT_SYMBOL(vio_disable_interrupts); 409 - #endif 410 411 static dma_addr_t vio_map_single(struct device *dev, void *vaddr, 412 size_t size, enum dma_data_direction direction) ··· 263 struct vio_driver *vio_drv = to_vio_driver(drv); 264 const struct vio_device_id *ids = vio_drv->id_table; 265 const struct vio_device_id *found_id; 266 - 267 - DBGENTER(); 268 269 if (!ids) 270 return 0;
··· 1 /* 2 * IBM PowerPC Virtual I/O Infrastructure Support. 3 * 4 + * Copyright (c) 2003-2005 IBM Corp. 5 * Dave Engebretsen engebret@us.ibm.com 6 * Santiago Leon santil@us.ibm.com 7 * Hollis Blanchard <hollisb@us.ibm.com> 8 + * Stephen Rothwell 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License ··· 14 15 #include <linux/init.h> 16 #include <linux/console.h> 17 #include <linux/module.h> 18 #include <linux/mm.h> 19 #include <linux/dma-mapping.h> 20 #include <asm/iommu.h> 21 #include <asm/dma.h> 22 #include <asm/vio.h> 23 24 static const struct vio_device_id *vio_match_device( 25 const struct vio_device_id *, const struct vio_dev *); 26 27 + struct vio_dev vio_bus_device = { /* fake "parent" device */ 28 .name = vio_bus_device.dev.bus_id, 29 .type = "", 30 .dev.bus_id = "vio", 31 .dev.bus = &vio_bus_type, 32 }; 33 34 + static int (*is_match)(const struct vio_device_id *id, 35 + const struct vio_dev *dev); 36 + static void (*unregister_device_callback)(struct vio_dev *dev); 37 + static void (*release_device_callback)(struct device *dev); 38 39 /* convert from struct device to struct vio_dev and pass to driver. 40 * dev->driver has already been set by generic code because vio_bus_match ··· 75 struct vio_driver *viodrv = to_vio_driver(dev->driver); 76 const struct vio_device_id *id; 77 int error = -ENODEV; 78 79 if (!viodrv->probe) 80 return error; ··· 94 { 95 struct vio_dev *viodev = to_vio_dev(dev); 96 struct vio_driver *viodrv = to_vio_driver(dev->driver); 97 98 if (viodrv->remove) { 99 return viodrv->remove(viodev); ··· 146 static const struct vio_device_id * vio_match_device(const struct vio_device_id *ids, 147 const struct vio_dev *dev) 148 { 149 while (ids->type) { 150 + if (is_match(ids, dev)) 151 return ids; 152 ids++; 153 } 154 return NULL; 155 } 156 157 /** 158 * vio_bus_init: - Initialize the virtual IO bus 159 */ 160 + int __init vio_bus_init(int (*match_func)(const struct vio_device_id *id, 161 + const struct vio_dev *dev), 162 + void (*unregister_dev)(struct vio_dev *), 163 + void (*release_dev)(struct device *)) 164 { 165 int err; 166 + 167 + is_match = match_func; 168 + unregister_device_callback = unregister_dev; 169 + release_device_callback = release_dev; 170 171 err = bus_register(&vio_bus_type); 172 if (err) { ··· 264 return err; 265 } 266 267 + /* the fake parent of all vio devices, just to give us 268 + * a nice directory 269 + */ 270 err = device_register(&vio_bus_device.dev); 271 if (err) { 272 + printk(KERN_WARNING "%s: device_register returned %i\n", 273 + __FUNCTION__, err); 274 return err; 275 } 276 277 return 0; 278 } 279 280 /* vio_dev refcount hit 0 */ 281 static void __devinit vio_dev_release(struct device *dev) 282 { 283 + if (release_device_callback) 284 + release_device_callback(dev); 285 kfree(to_vio_dev(dev)); 286 } 287 288 static ssize_t viodev_show_name(struct device *dev, struct device_attribute *attr, char *buf) 289 { ··· 312 } 313 DEVICE_ATTR(name, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_name, NULL); 314 315 + struct vio_dev * __devinit vio_register_device_common( 316 struct vio_dev *viodev, char *name, char *type, 317 uint32_t unit_address, struct iommu_table *iommu_table) 318 { 319 viodev->name = name; 320 viodev->type = type; 321 viodev->unit_address = unit_address; ··· 338 return viodev; 339 } 340 341 void __devinit vio_unregister_device(struct vio_dev *viodev) 342 { 343 + if (unregister_device_callback) 344 + unregister_device_callback(viodev); 345 device_remove_file(&viodev->dev, &dev_attr_name); 346 device_unregister(&viodev->dev); 347 } 348 EXPORT_SYMBOL(vio_unregister_device); 349 350 static dma_addr_t vio_map_single(struct device *dev, void *vaddr, 351 size_t size, enum dma_data_direction direction) ··· 616 struct vio_driver *vio_drv = to_vio_driver(drv); 617 const struct vio_device_id *ids = vio_drv->id_table; 618 const struct vio_device_id *found_id; 619 620 if (!ids) 621 return 0;
+1 -2
arch/ppc64/mm/hash_native.c
··· 51 unsigned long prpn, unsigned long vflags, 52 unsigned long rflags) 53 { 54 - unsigned long arpn = physRpn_to_absRpn(prpn); 55 hpte_t *hptep = htab_address + hpte_group; 56 unsigned long hpte_v, hpte_r; 57 int i; ··· 73 hpte_v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID; 74 if (vflags & HPTE_V_LARGE) 75 va &= ~(1UL << HPTE_V_AVPN_SHIFT); 76 - hpte_r = (arpn << HPTE_R_RPN_SHIFT) | rflags; 77 78 hptep->r = hpte_r; 79 /* Guarantee the second dword is visible before the valid bit */
··· 51 unsigned long prpn, unsigned long vflags, 52 unsigned long rflags) 53 { 54 hpte_t *hptep = htab_address + hpte_group; 55 unsigned long hpte_v, hpte_r; 56 int i; ··· 74 hpte_v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID; 75 if (vflags & HPTE_V_LARGE) 76 va &= ~(1UL << HPTE_V_AVPN_SHIFT); 77 + hpte_r = (prpn << HPTE_R_RPN_SHIFT) | rflags; 78 79 hptep->r = hpte_r; 80 /* Guarantee the second dword is visible before the valid bit */
+2 -2
arch/ppc64/mm/hash_utils.c
··· 210 211 /* create bolted the linear mapping in the hash table */ 212 for (i=0; i < lmb.memory.cnt; i++) { 213 - base = lmb.memory.region[i].physbase + KERNELBASE; 214 size = lmb.memory.region[i].size; 215 216 DBG("creating mapping for region: %lx : %lx\n", base, size); ··· 302 int local = 0; 303 cpumask_t tmp; 304 305 - if ((ea & ~REGION_MASK) > EADDR_MASK) 306 return 1; 307 308 switch (REGION_ID(ea)) {
··· 210 211 /* create bolted the linear mapping in the hash table */ 212 for (i=0; i < lmb.memory.cnt; i++) { 213 + base = lmb.memory.region[i].base + KERNELBASE; 214 size = lmb.memory.region[i].size; 215 216 DBG("creating mapping for region: %lx : %lx\n", base, size); ··· 302 int local = 0; 303 cpumask_t tmp; 304 305 + if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) 306 return 1; 307 308 switch (REGION_ID(ea)) {
+222 -186
arch/ppc64/mm/hugetlbpage.c
··· 27 28 #include <linux/sysctl.h> 29 30 - #define HUGEPGDIR_SHIFT (HPAGE_SHIFT + PAGE_SHIFT - 3) 31 - #define HUGEPGDIR_SIZE (1UL << HUGEPGDIR_SHIFT) 32 - #define HUGEPGDIR_MASK (~(HUGEPGDIR_SIZE-1)) 33 34 - #define HUGEPTE_INDEX_SIZE 9 35 - #define HUGEPGD_INDEX_SIZE 10 36 - 37 - #define PTRS_PER_HUGEPTE (1 << HUGEPTE_INDEX_SIZE) 38 - #define PTRS_PER_HUGEPGD (1 << HUGEPGD_INDEX_SIZE) 39 - 40 - static inline int hugepgd_index(unsigned long addr) 41 { 42 - return (addr & ~REGION_MASK) >> HUGEPGDIR_SHIFT; 43 - } 44 45 - static pud_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr) 46 - { 47 - int index; 48 - 49 - if (! mm->context.huge_pgdir) 50 - return NULL; 51 - 52 - 53 - index = hugepgd_index(addr); 54 - BUG_ON(index >= PTRS_PER_HUGEPGD); 55 - return (pud_t *)(mm->context.huge_pgdir + index); 56 - } 57 - 58 - static inline pte_t *hugepte_offset(pud_t *dir, unsigned long addr) 59 - { 60 - int index; 61 - 62 - if (pud_none(*dir)) 63 - return NULL; 64 - 65 - index = (addr >> HPAGE_SHIFT) % PTRS_PER_HUGEPTE; 66 - return (pte_t *)pud_page(*dir) + index; 67 - } 68 - 69 - static pud_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr) 70 - { 71 BUG_ON(! in_hugepage_area(mm->context, addr)); 72 73 - if (! mm->context.huge_pgdir) { 74 - pgd_t *new; 75 - spin_unlock(&mm->page_table_lock); 76 - /* Don't use pgd_alloc(), because we want __GFP_REPEAT */ 77 - new = kmem_cache_alloc(zero_cache, GFP_KERNEL | __GFP_REPEAT); 78 - BUG_ON(memcmp(new, empty_zero_page, PAGE_SIZE)); 79 - spin_lock(&mm->page_table_lock); 80 81 - /* 82 - * Because we dropped the lock, we should re-check the 83 - * entry, as somebody else could have populated it.. 84 - */ 85 - if (mm->context.huge_pgdir) 86 - pgd_free(new); 87 - else 88 - mm->context.huge_pgdir = new; 89 - } 90 - return hugepgd_offset(mm, addr); 91 - } 92 - 93 - static pte_t *hugepte_alloc(struct mm_struct *mm, pud_t *dir, unsigned long addr) 94 - { 95 - if (! pud_present(*dir)) { 96 - pte_t *new; 97 - 98 - spin_unlock(&mm->page_table_lock); 99 - new = kmem_cache_alloc(zero_cache, GFP_KERNEL | __GFP_REPEAT); 100 - BUG_ON(memcmp(new, empty_zero_page, PAGE_SIZE)); 101 - spin_lock(&mm->page_table_lock); 102 - /* 103 - * Because we dropped the lock, we should re-check the 104 - * entry, as somebody else could have populated it.. 105 - */ 106 - if (pud_present(*dir)) { 107 - if (new) 108 - kmem_cache_free(zero_cache, new); 109 - } else { 110 - struct page *ptepage; 111 - 112 - if (! new) 113 - return NULL; 114 - ptepage = virt_to_page(new); 115 - ptepage->mapping = (void *) mm; 116 - ptepage->index = addr & HUGEPGDIR_MASK; 117 - pud_populate(mm, dir, new); 118 } 119 } 120 121 - return hugepte_offset(dir, addr); 122 - } 123 - 124 - pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 125 - { 126 - pud_t *pud; 127 - 128 - BUG_ON(! in_hugepage_area(mm->context, addr)); 129 - 130 - pud = hugepgd_offset(mm, addr); 131 - if (! pud) 132 - return NULL; 133 - 134 - return hugepte_offset(pud, addr); 135 } 136 137 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) 138 { 139 - pud_t *pud; 140 141 BUG_ON(! in_hugepage_area(mm->context, addr)); 142 143 - pud = hugepgd_alloc(mm, addr); 144 - if (! pud) 145 - return NULL; 146 147 - return hugepte_alloc(mm, pud, addr); 148 } 149 150 /* ··· 132 return 0; 133 } 134 135 - static void flush_segments(void *parm) 136 { 137 - u16 segs = (unsigned long) parm; 138 unsigned long i; 139 140 asm volatile("isync" : : : "memory"); 141 142 - for (i = 0; i < 16; i++) { 143 - if (! (segs & (1U << i))) 144 continue; 145 asm volatile("slbie %0" : : "r" (i << SID_SHIFT)); 146 } ··· 150 asm volatile("isync" : : : "memory"); 151 } 152 153 - static int prepare_low_seg_for_htlb(struct mm_struct *mm, unsigned long seg) 154 { 155 - unsigned long start = seg << SID_SHIFT; 156 - unsigned long end = (seg+1) << SID_SHIFT; 157 struct vm_area_struct *vma; 158 159 - BUG_ON(seg >= 16); 160 161 /* Check no VMAs are in the region */ 162 vma = find_vma(mm, start); ··· 186 return 0; 187 } 188 189 - static int open_low_hpage_segs(struct mm_struct *mm, u16 newsegs) 190 { 191 unsigned long i; 192 193 - newsegs &= ~(mm->context.htlb_segs); 194 - if (! newsegs) 195 return 0; /* The segments we want are already open */ 196 197 - for (i = 0; i < 16; i++) 198 - if ((1 << i) & newsegs) 199 - if (prepare_low_seg_for_htlb(mm, i) != 0) 200 return -EBUSY; 201 202 - mm->context.htlb_segs |= newsegs; 203 204 /* update the paca copy of the context struct */ 205 get_paca()->context = mm->context; ··· 226 /* the context change must make it to memory before the flush, 227 * so that further SLB misses do the right thing. */ 228 mb(); 229 - on_each_cpu(flush_segments, (void *)(unsigned long)newsegs, 0, 1); 230 231 return 0; 232 } 233 234 int prepare_hugepage_range(unsigned long addr, unsigned long len) 235 { 236 - if (within_hugepage_high_range(addr, len)) 237 - return 0; 238 - else if ((addr < 0x100000000UL) && ((addr+len) < 0x100000000UL)) { 239 - int err; 240 - /* Yes, we need both tests, in case addr+len overflows 241 - * 64-bit arithmetic */ 242 - err = open_low_hpage_segs(current->mm, 243 LOW_ESID_MASK(addr, len)); 244 - if (err) 245 - printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)" 246 - " failed (segs: 0x%04hx)\n", addr, len, 247 - LOW_ESID_MASK(addr, len)); 248 return err; 249 } 250 251 - return -EINVAL; 252 } 253 254 struct page * ··· 354 vma = find_vma(mm, addr); 355 continue; 356 } 357 - if (touches_hugepage_high_range(addr, len)) { 358 - addr = TASK_HPAGE_END; 359 vma = find_vma(mm, addr); 360 continue; 361 } ··· 434 if (touches_hugepage_low_range(mm, addr, len)) { 435 addr = (addr & ((~0) << SID_SHIFT)) - len; 436 goto hugepage_recheck; 437 - } else if (touches_hugepage_high_range(addr, len)) { 438 - addr = TASK_HPAGE_BASE - len; 439 } 440 441 /* ··· 527 return -ENOMEM; 528 } 529 530 - static unsigned long htlb_get_high_area(unsigned long len) 531 { 532 - unsigned long addr = TASK_HPAGE_BASE; 533 struct vm_area_struct *vma; 534 535 vma = find_vma(current->mm, addr); 536 - for (vma = find_vma(current->mm, addr); 537 - addr + len <= TASK_HPAGE_END; 538 - vma = vma->vm_next) { 539 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */ 540 - BUG_ON(! within_hugepage_high_range(addr, len)); 541 542 if (!vma || (addr + len) <= vma->vm_start) 543 return addr; 544 addr = ALIGN(vma->vm_end, HPAGE_SIZE); 545 - /* Because we're in a hugepage region, this alignment 546 - * should not skip us over any VMAs */ 547 } 548 549 return -ENOMEM; ··· 558 unsigned long len, unsigned long pgoff, 559 unsigned long flags) 560 { 561 if (len & ~HPAGE_MASK) 562 return -EINVAL; 563 ··· 568 return -EINVAL; 569 570 if (test_thread_flag(TIF_32BIT)) { 571 - int lastshift = 0; 572 - u16 segmask, cursegs = current->mm->context.htlb_segs; 573 574 /* First see if we can do the mapping in the existing 575 - * low hpage segments */ 576 - addr = htlb_get_low_area(len, cursegs); 577 if (addr != -ENOMEM) 578 return addr; 579 580 - for (segmask = LOW_ESID_MASK(0x100000000UL-len, len); 581 - ! lastshift; segmask >>=1) { 582 - if (segmask & 1) 583 lastshift = 1; 584 585 - addr = htlb_get_low_area(len, cursegs | segmask); 586 if ((addr != -ENOMEM) 587 - && open_low_hpage_segs(current->mm, segmask) == 0) 588 return addr; 589 } 590 - printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open" 591 - " enough segments\n"); 592 - return -ENOMEM; 593 } else { 594 - return htlb_get_high_area(len); 595 - } 596 - } 597 598 - void hugetlb_mm_free_pgd(struct mm_struct *mm) 599 - { 600 - int i; 601 - pgd_t *pgdir; 602 603 - spin_lock(&mm->page_table_lock); 604 605 - pgdir = mm->context.huge_pgdir; 606 - if (! pgdir) 607 - goto out; 608 - 609 - mm->context.huge_pgdir = NULL; 610 - 611 - /* cleanup any hugepte pages leftover */ 612 - for (i = 0; i < PTRS_PER_HUGEPGD; i++) { 613 - pud_t *pud = (pud_t *)(pgdir + i); 614 - 615 - if (! pud_none(*pud)) { 616 - pte_t *pte = (pte_t *)pud_page(*pud); 617 - struct page *ptepage = virt_to_page(pte); 618 - 619 - ptepage->mapping = NULL; 620 - 621 - BUG_ON(memcmp(pte, empty_zero_page, PAGE_SIZE)); 622 - kmem_cache_free(zero_cache, pte); 623 } 624 - pud_clear(pud); 625 } 626 - 627 - BUG_ON(memcmp(pgdir, empty_zero_page, PAGE_SIZE)); 628 - kmem_cache_free(zero_cache, pgdir); 629 - 630 - out: 631 - spin_unlock(&mm->page_table_lock); 632 } 633 634 int hash_huge_page(struct mm_struct *mm, unsigned long access,
··· 27 28 #include <linux/sysctl.h> 29 30 + #define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT) 31 + #define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT) 32 33 + /* Modelled after find_linux_pte() */ 34 + pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 35 { 36 + pgd_t *pg; 37 + pud_t *pu; 38 + pmd_t *pm; 39 + pte_t *pt; 40 41 BUG_ON(! in_hugepage_area(mm->context, addr)); 42 43 + addr &= HPAGE_MASK; 44 45 + pg = pgd_offset(mm, addr); 46 + if (!pgd_none(*pg)) { 47 + pu = pud_offset(pg, addr); 48 + if (!pud_none(*pu)) { 49 + pm = pmd_offset(pu, addr); 50 + pt = (pte_t *)pm; 51 + BUG_ON(!pmd_none(*pm) 52 + && !(pte_present(*pt) && pte_huge(*pt))); 53 + return pt; 54 } 55 } 56 57 + return NULL; 58 } 59 60 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) 61 { 62 + pgd_t *pg; 63 + pud_t *pu; 64 + pmd_t *pm; 65 + pte_t *pt; 66 67 BUG_ON(! in_hugepage_area(mm->context, addr)); 68 69 + addr &= HPAGE_MASK; 70 71 + pg = pgd_offset(mm, addr); 72 + pu = pud_alloc(mm, pg, addr); 73 + 74 + if (pu) { 75 + pm = pmd_alloc(mm, pu, addr); 76 + if (pm) { 77 + pt = (pte_t *)pm; 78 + BUG_ON(!pmd_none(*pm) 79 + && !(pte_present(*pt) && pte_huge(*pt))); 80 + return pt; 81 + } 82 + } 83 + 84 + return NULL; 85 + } 86 + 87 + #define HUGEPTE_BATCH_SIZE (HPAGE_SIZE / PMD_SIZE) 88 + 89 + void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 90 + pte_t *ptep, pte_t pte) 91 + { 92 + int i; 93 + 94 + if (pte_present(*ptep)) { 95 + pte_clear(mm, addr, ptep); 96 + flush_tlb_pending(); 97 + } 98 + 99 + for (i = 0; i < HUGEPTE_BATCH_SIZE; i++) { 100 + *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); 101 + ptep++; 102 + } 103 + } 104 + 105 + pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 106 + pte_t *ptep) 107 + { 108 + unsigned long old = pte_update(ptep, ~0UL); 109 + int i; 110 + 111 + if (old & _PAGE_HASHPTE) 112 + hpte_update(mm, addr, old, 0); 113 + 114 + for (i = 1; i < HUGEPTE_BATCH_SIZE; i++) 115 + ptep[i] = __pte(0); 116 + 117 + return __pte(old); 118 } 119 120 /* ··· 162 return 0; 163 } 164 165 + static void flush_low_segments(void *parm) 166 { 167 + u16 areas = (unsigned long) parm; 168 unsigned long i; 169 170 asm volatile("isync" : : : "memory"); 171 172 + BUILD_BUG_ON((sizeof(areas)*8) != NUM_LOW_AREAS); 173 + 174 + for (i = 0; i < NUM_LOW_AREAS; i++) { 175 + if (! (areas & (1U << i))) 176 continue; 177 asm volatile("slbie %0" : : "r" (i << SID_SHIFT)); 178 } ··· 178 asm volatile("isync" : : : "memory"); 179 } 180 181 + static void flush_high_segments(void *parm) 182 { 183 + u16 areas = (unsigned long) parm; 184 + unsigned long i, j; 185 + 186 + asm volatile("isync" : : : "memory"); 187 + 188 + BUILD_BUG_ON((sizeof(areas)*8) != NUM_HIGH_AREAS); 189 + 190 + for (i = 0; i < NUM_HIGH_AREAS; i++) { 191 + if (! (areas & (1U << i))) 192 + continue; 193 + for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++) 194 + asm volatile("slbie %0" 195 + :: "r" ((i << HTLB_AREA_SHIFT) + (j << SID_SHIFT))); 196 + } 197 + 198 + asm volatile("isync" : : : "memory"); 199 + } 200 + 201 + static int prepare_low_area_for_htlb(struct mm_struct *mm, unsigned long area) 202 + { 203 + unsigned long start = area << SID_SHIFT; 204 + unsigned long end = (area+1) << SID_SHIFT; 205 struct vm_area_struct *vma; 206 207 + BUG_ON(area >= NUM_LOW_AREAS); 208 209 /* Check no VMAs are in the region */ 210 vma = find_vma(mm, start); ··· 194 return 0; 195 } 196 197 + static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area) 198 + { 199 + unsigned long start = area << HTLB_AREA_SHIFT; 200 + unsigned long end = (area+1) << HTLB_AREA_SHIFT; 201 + struct vm_area_struct *vma; 202 + 203 + BUG_ON(area >= NUM_HIGH_AREAS); 204 + 205 + /* Check no VMAs are in the region */ 206 + vma = find_vma(mm, start); 207 + if (vma && (vma->vm_start < end)) 208 + return -EBUSY; 209 + 210 + return 0; 211 + } 212 + 213 + static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas) 214 { 215 unsigned long i; 216 217 + BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS); 218 + BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS); 219 + 220 + newareas &= ~(mm->context.low_htlb_areas); 221 + if (! newareas) 222 return 0; /* The segments we want are already open */ 223 224 + for (i = 0; i < NUM_LOW_AREAS; i++) 225 + if ((1 << i) & newareas) 226 + if (prepare_low_area_for_htlb(mm, i) != 0) 227 return -EBUSY; 228 229 + mm->context.low_htlb_areas |= newareas; 230 231 /* update the paca copy of the context struct */ 232 get_paca()->context = mm->context; ··· 215 /* the context change must make it to memory before the flush, 216 * so that further SLB misses do the right thing. */ 217 mb(); 218 + on_each_cpu(flush_low_segments, (void *)(unsigned long)newareas, 0, 1); 219 + 220 + return 0; 221 + } 222 + 223 + static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas) 224 + { 225 + unsigned long i; 226 + 227 + BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS); 228 + BUILD_BUG_ON((sizeof(mm->context.high_htlb_areas)*8) 229 + != NUM_HIGH_AREAS); 230 + 231 + newareas &= ~(mm->context.high_htlb_areas); 232 + if (! newareas) 233 + return 0; /* The areas we want are already open */ 234 + 235 + for (i = 0; i < NUM_HIGH_AREAS; i++) 236 + if ((1 << i) & newareas) 237 + if (prepare_high_area_for_htlb(mm, i) != 0) 238 + return -EBUSY; 239 + 240 + mm->context.high_htlb_areas |= newareas; 241 + 242 + /* update the paca copy of the context struct */ 243 + get_paca()->context = mm->context; 244 + 245 + /* the context change must make it to memory before the flush, 246 + * so that further SLB misses do the right thing. */ 247 + mb(); 248 + on_each_cpu(flush_high_segments, (void *)(unsigned long)newareas, 0, 1); 249 250 return 0; 251 } 252 253 int prepare_hugepage_range(unsigned long addr, unsigned long len) 254 { 255 + int err; 256 + 257 + if ( (addr+len) < addr ) 258 + return -EINVAL; 259 + 260 + if ((addr + len) < 0x100000000UL) 261 + err = open_low_hpage_areas(current->mm, 262 LOW_ESID_MASK(addr, len)); 263 + else 264 + err = open_high_hpage_areas(current->mm, 265 + HTLB_AREA_MASK(addr, len)); 266 + if (err) { 267 + printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)" 268 + " failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n", 269 + addr, len, 270 + LOW_ESID_MASK(addr, len), HTLB_AREA_MASK(addr, len)); 271 return err; 272 } 273 274 + return 0; 275 } 276 277 struct page * ··· 309 vma = find_vma(mm, addr); 310 continue; 311 } 312 + if (touches_hugepage_high_range(mm, addr, len)) { 313 + addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT); 314 vma = find_vma(mm, addr); 315 continue; 316 } ··· 389 if (touches_hugepage_low_range(mm, addr, len)) { 390 addr = (addr & ((~0) << SID_SHIFT)) - len; 391 goto hugepage_recheck; 392 + } else if (touches_hugepage_high_range(mm, addr, len)) { 393 + addr = (addr & ((~0UL) << HTLB_AREA_SHIFT)) - len; 394 + goto hugepage_recheck; 395 } 396 397 /* ··· 481 return -ENOMEM; 482 } 483 484 + static unsigned long htlb_get_high_area(unsigned long len, u16 areamask) 485 { 486 + unsigned long addr = 0x100000000UL; 487 struct vm_area_struct *vma; 488 489 vma = find_vma(current->mm, addr); 490 + while (addr + len <= TASK_SIZE_USER64) { 491 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */ 492 + 493 + if (! __within_hugepage_high_range(addr, len, areamask)) { 494 + addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT); 495 + vma = find_vma(current->mm, addr); 496 + continue; 497 + } 498 499 if (!vma || (addr + len) <= vma->vm_start) 500 return addr; 501 addr = ALIGN(vma->vm_end, HPAGE_SIZE); 502 + /* Depending on segmask this might not be a confirmed 503 + * hugepage region, so the ALIGN could have skipped 504 + * some VMAs */ 505 + vma = find_vma(current->mm, addr); 506 } 507 508 return -ENOMEM; ··· 507 unsigned long len, unsigned long pgoff, 508 unsigned long flags) 509 { 510 + int lastshift; 511 + u16 areamask, curareas; 512 + 513 if (len & ~HPAGE_MASK) 514 return -EINVAL; 515 ··· 514 return -EINVAL; 515 516 if (test_thread_flag(TIF_32BIT)) { 517 + curareas = current->mm->context.low_htlb_areas; 518 519 /* First see if we can do the mapping in the existing 520 + * low areas */ 521 + addr = htlb_get_low_area(len, curareas); 522 if (addr != -ENOMEM) 523 return addr; 524 525 + lastshift = 0; 526 + for (areamask = LOW_ESID_MASK(0x100000000UL-len, len); 527 + ! lastshift; areamask >>=1) { 528 + if (areamask & 1) 529 lastshift = 1; 530 531 + addr = htlb_get_low_area(len, curareas | areamask); 532 if ((addr != -ENOMEM) 533 + && open_low_hpage_areas(current->mm, areamask) == 0) 534 return addr; 535 } 536 } else { 537 + curareas = current->mm->context.high_htlb_areas; 538 539 + /* First see if we can do the mapping in the existing 540 + * high areas */ 541 + addr = htlb_get_high_area(len, curareas); 542 + if (addr != -ENOMEM) 543 + return addr; 544 545 + lastshift = 0; 546 + for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len); 547 + ! lastshift; areamask >>=1) { 548 + if (areamask & 1) 549 + lastshift = 1; 550 551 + addr = htlb_get_high_area(len, curareas | areamask); 552 + if ((addr != -ENOMEM) 553 + && open_high_hpage_areas(current->mm, areamask) == 0) 554 + return addr; 555 } 556 } 557 + printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open" 558 + " enough areas\n"); 559 + return -ENOMEM; 560 } 561 562 int hash_huge_page(struct mm_struct *mm, unsigned long access,
+1 -1
arch/ppc64/mm/imalloc.c
··· 31 break; 32 if ((unsigned long)tmp->addr >= ioremap_bot) 33 addr = tmp->size + (unsigned long) tmp->addr; 34 - if (addr > IMALLOC_END-size) 35 return 1; 36 } 37 *im_addr = addr;
··· 31 break; 32 if ((unsigned long)tmp->addr >= ioremap_bot) 33 addr = tmp->size + (unsigned long) tmp->addr; 34 + if (addr >= IMALLOC_END-size) 35 return 1; 36 } 37 *im_addr = addr;
+54 -42
arch/ppc64/mm/init.c
··· 42 43 #include <asm/pgalloc.h> 44 #include <asm/page.h> 45 - #include <asm/abs_addr.h> 46 #include <asm/prom.h> 47 #include <asm/lmb.h> 48 #include <asm/rtas.h> ··· 64 #include <asm/abs_addr.h> 65 #include <asm/vdso.h> 66 #include <asm/imalloc.h> 67 68 int mem_init_done; 69 unsigned long ioremap_bot = IMALLOC_BASE; ··· 166 ptep = pte_alloc_kernel(&init_mm, pmdp, ea); 167 if (!ptep) 168 return -ENOMEM; 169 - pa = abs_to_phys(pa); 170 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, 171 __pgprot(flags))); 172 spin_unlock(&init_mm.page_table_lock); ··· 232 * Before that, we map using addresses going 233 * up from ioremap_bot. imalloc will use 234 * the addresses from ioremap_bot through 235 - * IMALLOC_END (0xE000001fffffffff) 236 * 237 */ 238 pa = addr & PAGE_MASK; ··· 423 int index; 424 int err; 425 426 - #ifdef CONFIG_HUGETLB_PAGE 427 - /* We leave htlb_segs as it was, but for a fork, we need to 428 - * clear the huge_pgdir. */ 429 - mm->context.huge_pgdir = NULL; 430 - #endif 431 - 432 again: 433 if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL)) 434 return -ENOMEM; ··· 453 spin_unlock(&mmu_context_lock); 454 455 mm->context.id = NO_CONTEXT; 456 - 457 - hugetlb_mm_free_pgd(mm); 458 } 459 460 /* ··· 482 for (i = 1; i < lmb.memory.cnt; i++) { 483 unsigned long base, prevbase, prevsize; 484 485 - prevbase = lmb.memory.region[i-1].physbase; 486 prevsize = lmb.memory.region[i-1].size; 487 - base = lmb.memory.region[i].physbase; 488 if (base > (prevbase + prevsize)) { 489 io_hole_start = prevbase + prevsize; 490 io_hole_size = base - (prevbase + prevsize); ··· 511 for (i=0; i < lmb.memory.cnt; i++) { 512 unsigned long base; 513 514 - #ifdef CONFIG_MSCHUNKS 515 - base = lmb.memory.region[i].physbase; 516 - #else 517 base = lmb.memory.region[i].base; 518 - #endif 519 if ((paddr >= base) && 520 (paddr < (base + lmb.memory.region[i].size))) { 521 return 1; ··· 542 */ 543 bootmap_pages = bootmem_bootmap_pages(total_pages); 544 545 - start = abs_to_phys(lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE)); 546 BUG_ON(!start); 547 548 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); ··· 553 * present. 554 */ 555 for (i=0; i < lmb.memory.cnt; i++) { 556 - unsigned long physbase, size; 557 unsigned long start_pfn, end_pfn; 558 559 - physbase = lmb.memory.region[i].physbase; 560 size = lmb.memory.region[i].size; 561 562 - start_pfn = physbase >> PAGE_SHIFT; 563 end_pfn = start_pfn + (size >> PAGE_SHIFT); 564 memory_present(0, start_pfn, end_pfn); 565 566 - free_bootmem(physbase, size); 567 } 568 569 /* reserve the sections we're already using */ 570 for (i=0; i < lmb.reserved.cnt; i++) { 571 - unsigned long physbase = lmb.reserved.region[i].physbase; 572 unsigned long size = lmb.reserved.region[i].size; 573 574 - reserve_bootmem(physbase, size); 575 } 576 } 577 ··· 610 int i; 611 612 for (i=0; i < lmb.memory.cnt; i++) { 613 - unsigned long physbase, size; 614 struct kcore_list *kcore_mem; 615 616 - physbase = lmb.memory.region[i].physbase; 617 size = lmb.memory.region[i].size; 618 619 /* GFP_ATOMIC to avoid might_sleep warnings during boot */ ··· 621 if (!kcore_mem) 622 panic("mem_init: kmalloc failed\n"); 623 624 - kclist_add(kcore_mem, __va(physbase), size); 625 } 626 627 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); ··· 681 682 mem_init_done = 1; 683 684 - #ifdef CONFIG_PPC_ISERIES 685 - iommu_vio_init(); 686 - #endif 687 /* Initialize the vDSO */ 688 vdso_init(); 689 } ··· 825 return virt_addr; 826 } 827 828 - kmem_cache_t *zero_cache; 829 - 830 - static void zero_ctor(void *pte, kmem_cache_t *cache, unsigned long flags) 831 { 832 - memset(pte, 0, PAGE_SIZE); 833 } 834 835 void pgtable_cache_init(void) 836 { 837 - zero_cache = kmem_cache_create("zero", 838 - PAGE_SIZE, 839 - 0, 840 - SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, 841 - zero_ctor, 842 - NULL); 843 - if (!zero_cache) 844 - panic("pgtable_cache_init(): could not create zero_cache!\n"); 845 } 846 847 pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
··· 42 43 #include <asm/pgalloc.h> 44 #include <asm/page.h> 45 #include <asm/prom.h> 46 #include <asm/lmb.h> 47 #include <asm/rtas.h> ··· 65 #include <asm/abs_addr.h> 66 #include <asm/vdso.h> 67 #include <asm/imalloc.h> 68 + 69 + #if PGTABLE_RANGE > USER_VSID_RANGE 70 + #warning Limited user VSID range means pagetable space is wasted 71 + #endif 72 + 73 + #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE) 74 + #warning TASK_SIZE is smaller than it needs to be. 75 + #endif 76 77 int mem_init_done; 78 unsigned long ioremap_bot = IMALLOC_BASE; ··· 159 ptep = pte_alloc_kernel(&init_mm, pmdp, ea); 160 if (!ptep) 161 return -ENOMEM; 162 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, 163 __pgprot(flags))); 164 spin_unlock(&init_mm.page_table_lock); ··· 226 * Before that, we map using addresses going 227 * up from ioremap_bot. imalloc will use 228 * the addresses from ioremap_bot through 229 + * IMALLOC_END 230 * 231 */ 232 pa = addr & PAGE_MASK; ··· 417 int index; 418 int err; 419 420 again: 421 if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL)) 422 return -ENOMEM; ··· 453 spin_unlock(&mmu_context_lock); 454 455 mm->context.id = NO_CONTEXT; 456 } 457 458 /* ··· 484 for (i = 1; i < lmb.memory.cnt; i++) { 485 unsigned long base, prevbase, prevsize; 486 487 + prevbase = lmb.memory.region[i-1].base; 488 prevsize = lmb.memory.region[i-1].size; 489 + base = lmb.memory.region[i].base; 490 if (base > (prevbase + prevsize)) { 491 io_hole_start = prevbase + prevsize; 492 io_hole_size = base - (prevbase + prevsize); ··· 513 for (i=0; i < lmb.memory.cnt; i++) { 514 unsigned long base; 515 516 base = lmb.memory.region[i].base; 517 + 518 if ((paddr >= base) && 519 (paddr < (base + lmb.memory.region[i].size))) { 520 return 1; ··· 547 */ 548 bootmap_pages = bootmem_bootmap_pages(total_pages); 549 550 + start = lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE); 551 BUG_ON(!start); 552 553 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); ··· 558 * present. 559 */ 560 for (i=0; i < lmb.memory.cnt; i++) { 561 + unsigned long base, size; 562 unsigned long start_pfn, end_pfn; 563 564 + base = lmb.memory.region[i].base; 565 size = lmb.memory.region[i].size; 566 567 + start_pfn = base >> PAGE_SHIFT; 568 end_pfn = start_pfn + (size >> PAGE_SHIFT); 569 memory_present(0, start_pfn, end_pfn); 570 571 + free_bootmem(base, size); 572 } 573 574 /* reserve the sections we're already using */ 575 for (i=0; i < lmb.reserved.cnt; i++) { 576 + unsigned long base = lmb.reserved.region[i].base; 577 unsigned long size = lmb.reserved.region[i].size; 578 579 + reserve_bootmem(base, size); 580 } 581 } 582 ··· 615 int i; 616 617 for (i=0; i < lmb.memory.cnt; i++) { 618 + unsigned long base, size; 619 struct kcore_list *kcore_mem; 620 621 + base = lmb.memory.region[i].base; 622 size = lmb.memory.region[i].size; 623 624 /* GFP_ATOMIC to avoid might_sleep warnings during boot */ ··· 626 if (!kcore_mem) 627 panic("mem_init: kmalloc failed\n"); 628 629 + kclist_add(kcore_mem, __va(base), size); 630 } 631 632 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); ··· 686 687 mem_init_done = 1; 688 689 /* Initialize the vDSO */ 690 vdso_init(); 691 } ··· 833 return virt_addr; 834 } 835 836 + static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) 837 { 838 + memset(addr, 0, kmem_cache_size(cache)); 839 } 840 + 841 + static const int pgtable_cache_size[2] = { 842 + PTE_TABLE_SIZE, PMD_TABLE_SIZE 843 + }; 844 + static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { 845 + "pgd_pte_cache", "pud_pmd_cache", 846 + }; 847 + 848 + kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)]; 849 850 void pgtable_cache_init(void) 851 { 852 + int i; 853 + 854 + BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]); 855 + BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]); 856 + BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]); 857 + BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]); 858 + 859 + for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) { 860 + int size = pgtable_cache_size[i]; 861 + const char *name = pgtable_cache_name[i]; 862 + 863 + pgtable_cache[i] = kmem_cache_create(name, 864 + size, size, 865 + SLAB_HWCACHE_ALIGN 866 + | SLAB_MUST_HWCACHE_ALIGN, 867 + zero_ctor, 868 + NULL); 869 + if (! pgtable_cache[i]) 870 + panic("pgtable_cache_init(): could not create %s!\n", 871 + name); 872 + } 873 } 874 875 pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
+1 -1
arch/ppc64/mm/numa.c
··· 671 * Mark reserved regions on this node 672 */ 673 for (i = 0; i < lmb.reserved.cnt; i++) { 674 - unsigned long physbase = lmb.reserved.region[i].physbase; 675 unsigned long size = lmb.reserved.region[i].size; 676 677 if (pa_to_nid(physbase) != nid &&
··· 671 * Mark reserved regions on this node 672 */ 673 for (i = 0; i < lmb.reserved.cnt; i++) { 674 + unsigned long physbase = lmb.reserved.region[i].base; 675 unsigned long size = lmb.reserved.region[i].size; 676 677 if (pa_to_nid(physbase) != nid &&
+14 -13
arch/ppc64/mm/slb_low.S
··· 89 b 9f 90 91 0: /* user address: proto-VSID = context<<15 | ESID */ 92 - li r11,SLB_VSID_USER 93 - 94 - srdi. r9,r3,13 95 bne- 8f /* invalid ea bits set */ 96 97 #ifdef CONFIG_HUGETLB_PAGE 98 BEGIN_FTR_SECTION 99 - /* check against the hugepage ranges */ 100 - cmpldi r3,(TASK_HPAGE_END>>SID_SHIFT) 101 - bge 6f /* >= TASK_HPAGE_END */ 102 - cmpldi r3,(TASK_HPAGE_BASE>>SID_SHIFT) 103 - bge 5f /* TASK_HPAGE_BASE..TASK_HPAGE_END */ 104 - cmpldi r3,16 105 - bge 6f /* 4GB..TASK_HPAGE_BASE */ 106 107 - lhz r9,PACAHTLBSEGS(r13) 108 srd r9,r9,r3 109 andi. r9,r9,1 110 beq 6f 111 112 - 5: /* this is a hugepage user address */ 113 - li r11,(SLB_VSID_USER|SLB_VSID_L) 114 END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE) 115 #endif /* CONFIG_HUGETLB_PAGE */ 116
··· 89 b 9f 90 91 0: /* user address: proto-VSID = context<<15 | ESID */ 92 + srdi. r9,r3,USER_ESID_BITS 93 bne- 8f /* invalid ea bits set */ 94 95 #ifdef CONFIG_HUGETLB_PAGE 96 BEGIN_FTR_SECTION 97 + lhz r9,PACAHIGHHTLBAREAS(r13) 98 + srdi r11,r3,(HTLB_AREA_SHIFT-SID_SHIFT) 99 + srd r9,r9,r11 100 + andi. r9,r9,1 101 + bne 5f 102 103 + li r11,SLB_VSID_USER 104 + 105 + cmpldi r3,16 106 + bge 6f 107 + 108 + lhz r9,PACALOWHTLBAREAS(r13) 109 srd r9,r9,r3 110 andi. r9,r9,1 111 + 112 beq 6f 113 114 + 5: li r11,SLB_VSID_USER|SLB_VSID_L 115 END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE) 116 #endif /* CONFIG_HUGETLB_PAGE */ 117
+55 -40
arch/ppc64/mm/tlb.c
··· 41 DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); 42 unsigned long pte_freelist_forced_free; 43 44 - void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage) 45 { 46 /* This is safe as we are holding page_table_lock */ 47 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); ··· 100 101 if (atomic_read(&tlb->mm->mm_users) < 2 || 102 cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { 103 - pte_free(ptepage); 104 return; 105 } 106 107 if (*batchp == NULL) { 108 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); 109 if (*batchp == NULL) { 110 - pte_free_now(ptepage); 111 return; 112 } 113 (*batchp)->index = 0; 114 } 115 - (*batchp)->pages[(*batchp)->index++] = ptepage; 116 if ((*batchp)->index == PTE_FREELIST_SIZE) { 117 pte_free_submit(*batchp); 118 *batchp = NULL; ··· 181 flush_hash_range(batch->context, i, local); 182 batch->index = 0; 183 put_cpu(); 184 - } 185 - 186 - #ifdef CONFIG_SMP 187 - static void pte_free_smp_sync(void *arg) 188 - { 189 - /* Do nothing, just ensure we sync with all CPUs */ 190 - } 191 - #endif 192 - 193 - /* This is only called when we are critically out of memory 194 - * (and fail to get a page in pte_free_tlb). 195 - */ 196 - void pte_free_now(struct page *ptepage) 197 - { 198 - pte_freelist_forced_free++; 199 - 200 - smp_call_function(pte_free_smp_sync, NULL, 0, 1); 201 - 202 - pte_free(ptepage); 203 - } 204 - 205 - static void pte_free_rcu_callback(struct rcu_head *head) 206 - { 207 - struct pte_freelist_batch *batch = 208 - container_of(head, struct pte_freelist_batch, rcu); 209 - unsigned int i; 210 - 211 - for (i = 0; i < batch->index; i++) 212 - pte_free(batch->pages[i]); 213 - free_page((unsigned long)batch); 214 - } 215 - 216 - void pte_free_submit(struct pte_freelist_batch *batch) 217 - { 218 - INIT_RCU_HEAD(&batch->rcu); 219 - call_rcu(&batch->rcu, pte_free_rcu_callback); 220 } 221 222 void pte_free_finish(void)
··· 41 DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); 42 unsigned long pte_freelist_forced_free; 43 44 + struct pte_freelist_batch 45 + { 46 + struct rcu_head rcu; 47 + unsigned int index; 48 + pgtable_free_t tables[0]; 49 + }; 50 + 51 + DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); 52 + unsigned long pte_freelist_forced_free; 53 + 54 + #define PTE_FREELIST_SIZE \ 55 + ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ 56 + / sizeof(pgtable_free_t)) 57 + 58 + #ifdef CONFIG_SMP 59 + static void pte_free_smp_sync(void *arg) 60 + { 61 + /* Do nothing, just ensure we sync with all CPUs */ 62 + } 63 + #endif 64 + 65 + /* This is only called when we are critically out of memory 66 + * (and fail to get a page in pte_free_tlb). 67 + */ 68 + static void pgtable_free_now(pgtable_free_t pgf) 69 + { 70 + pte_freelist_forced_free++; 71 + 72 + smp_call_function(pte_free_smp_sync, NULL, 0, 1); 73 + 74 + pgtable_free(pgf); 75 + } 76 + 77 + static void pte_free_rcu_callback(struct rcu_head *head) 78 + { 79 + struct pte_freelist_batch *batch = 80 + container_of(head, struct pte_freelist_batch, rcu); 81 + unsigned int i; 82 + 83 + for (i = 0; i < batch->index; i++) 84 + pgtable_free(batch->tables[i]); 85 + 86 + free_page((unsigned long)batch); 87 + } 88 + 89 + static void pte_free_submit(struct pte_freelist_batch *batch) 90 + { 91 + INIT_RCU_HEAD(&batch->rcu); 92 + call_rcu(&batch->rcu, pte_free_rcu_callback); 93 + } 94 + 95 + void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) 96 { 97 /* This is safe as we are holding page_table_lock */ 98 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); ··· 49 50 if (atomic_read(&tlb->mm->mm_users) < 2 || 51 cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { 52 + pgtable_free(pgf); 53 return; 54 } 55 56 if (*batchp == NULL) { 57 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); 58 if (*batchp == NULL) { 59 + pgtable_free_now(pgf); 60 return; 61 } 62 (*batchp)->index = 0; 63 } 64 + (*batchp)->tables[(*batchp)->index++] = pgf; 65 if ((*batchp)->index == PTE_FREELIST_SIZE) { 66 pte_free_submit(*batchp); 67 *batchp = NULL; ··· 130 flush_hash_range(batch->context, i, local); 131 batch->index = 0; 132 put_cpu(); 133 } 134 135 void pte_free_finish(void)
+1 -1
arch/ppc64/xmon/start.c
··· 27 struct tty_struct *tty) 28 { 29 /* ensure xmon is enabled */ 30 - xmon_init(); 31 debugger(pt_regs); 32 } 33
··· 27 struct tty_struct *tty) 28 { 29 /* ensure xmon is enabled */ 30 + xmon_init(1); 31 debugger(pt_regs); 32 } 33
+18 -8
arch/ppc64/xmon/xmon.c
··· 2496 } 2497 } 2498 2499 - void xmon_init(void) 2500 { 2501 - __debugger = xmon; 2502 - __debugger_ipi = xmon_ipi; 2503 - __debugger_bpt = xmon_bpt; 2504 - __debugger_sstep = xmon_sstep; 2505 - __debugger_iabr_match = xmon_iabr_match; 2506 - __debugger_dabr_match = xmon_dabr_match; 2507 - __debugger_fault_handler = xmon_fault_handler; 2508 } 2509 2510 void dump_segments(void)
··· 2496 } 2497 } 2498 2499 + void xmon_init(int enable) 2500 { 2501 + if (enable) { 2502 + __debugger = xmon; 2503 + __debugger_ipi = xmon_ipi; 2504 + __debugger_bpt = xmon_bpt; 2505 + __debugger_sstep = xmon_sstep; 2506 + __debugger_iabr_match = xmon_iabr_match; 2507 + __debugger_dabr_match = xmon_dabr_match; 2508 + __debugger_fault_handler = xmon_fault_handler; 2509 + } else { 2510 + __debugger = NULL; 2511 + __debugger_ipi = NULL; 2512 + __debugger_bpt = NULL; 2513 + __debugger_sstep = NULL; 2514 + __debugger_iabr_match = NULL; 2515 + __debugger_dabr_match = NULL; 2516 + __debugger_fault_handler = NULL; 2517 + } 2518 } 2519 2520 void dump_segments(void)
+25 -67
include/asm-ppc64/abs_addr.h
··· 16 #include <asm/page.h> 17 #include <asm/prom.h> 18 #include <asm/lmb.h> 19 20 - typedef u32 msChunks_entry; 21 - struct msChunks { 22 unsigned long num_chunks; 23 unsigned long chunk_size; 24 unsigned long chunk_shift; 25 unsigned long chunk_mask; 26 - msChunks_entry *abs; 27 }; 28 29 - extern struct msChunks msChunks; 30 31 - extern unsigned long msChunks_alloc(unsigned long, unsigned long, unsigned long); 32 - extern unsigned long reloc_offset(void); 33 34 - #ifdef CONFIG_MSCHUNKS 35 - 36 - static inline unsigned long 37 - chunk_to_addr(unsigned long chunk) 38 { 39 - unsigned long offset = reloc_offset(); 40 - struct msChunks *_msChunks = PTRRELOC(&msChunks); 41 - 42 - return chunk << _msChunks->chunk_shift; 43 } 44 45 - static inline unsigned long 46 - addr_to_chunk(unsigned long addr) 47 { 48 - unsigned long offset = reloc_offset(); 49 - struct msChunks *_msChunks = PTRRELOC(&msChunks); 50 - 51 - return addr >> _msChunks->chunk_shift; 52 } 53 54 - static inline unsigned long 55 - chunk_offset(unsigned long addr) 56 { 57 - unsigned long offset = reloc_offset(); 58 - struct msChunks *_msChunks = PTRRELOC(&msChunks); 59 60 - return addr & _msChunks->chunk_mask; 61 } 62 - 63 - static inline unsigned long 64 - abs_chunk(unsigned long pchunk) 65 - { 66 - unsigned long offset = reloc_offset(); 67 - struct msChunks *_msChunks = PTRRELOC(&msChunks); 68 - if ( pchunk >= _msChunks->num_chunks ) { 69 - return pchunk; 70 - } 71 - return PTRRELOC(_msChunks->abs)[pchunk]; 72 - } 73 - 74 - /* A macro so it can take pointers or unsigned long. */ 75 - #define phys_to_abs(pa) \ 76 - ({ unsigned long _pa = (unsigned long)(pa); \ 77 - chunk_to_addr(abs_chunk(addr_to_chunk(_pa))) + chunk_offset(_pa); \ 78 - }) 79 - 80 - static inline unsigned long 81 - physRpn_to_absRpn(unsigned long rpn) 82 - { 83 - unsigned long pa = rpn << PAGE_SHIFT; 84 - unsigned long aa = phys_to_abs(pa); 85 - return (aa >> PAGE_SHIFT); 86 - } 87 - 88 - /* A macro so it can take pointers or unsigned long. */ 89 - #define abs_to_phys(aa) lmb_abs_to_phys((unsigned long)(aa)) 90 - 91 - #else /* !CONFIG_MSCHUNKS */ 92 - 93 - #define chunk_to_addr(chunk) ((unsigned long)(chunk)) 94 - #define addr_to_chunk(addr) (addr) 95 - #define chunk_offset(addr) (0) 96 - #define abs_chunk(pchunk) (pchunk) 97 - 98 - #define phys_to_abs(pa) (pa) 99 - #define physRpn_to_absRpn(rpn) (rpn) 100 - #define abs_to_phys(aa) (aa) 101 - 102 - #endif /* !CONFIG_MSCHUNKS */ 103 104 /* Convenience macros */ 105 #define virt_to_abs(va) phys_to_abs(__pa(va)) 106 - #define abs_to_virt(aa) __va(abs_to_phys(aa)) 107 108 #endif /* _ABS_ADDR_H */
··· 16 #include <asm/page.h> 17 #include <asm/prom.h> 18 #include <asm/lmb.h> 19 + #include <asm/firmware.h> 20 21 + struct mschunks_map { 22 unsigned long num_chunks; 23 unsigned long chunk_size; 24 unsigned long chunk_shift; 25 unsigned long chunk_mask; 26 + u32 *mapping; 27 }; 28 29 + extern struct mschunks_map mschunks_map; 30 31 + /* Chunks are 256 KB */ 32 + #define MSCHUNKS_CHUNK_SHIFT (18) 33 + #define MSCHUNKS_CHUNK_SIZE (1UL << MSCHUNKS_CHUNK_SHIFT) 34 + #define MSCHUNKS_OFFSET_MASK (MSCHUNKS_CHUNK_SIZE - 1) 35 36 + static inline unsigned long chunk_to_addr(unsigned long chunk) 37 { 38 + return chunk << MSCHUNKS_CHUNK_SHIFT; 39 } 40 41 + static inline unsigned long addr_to_chunk(unsigned long addr) 42 { 43 + return addr >> MSCHUNKS_CHUNK_SHIFT; 44 } 45 46 + static inline unsigned long phys_to_abs(unsigned long pa) 47 { 48 + unsigned long chunk; 49 50 + /* This is a no-op on non-iSeries */ 51 + if (!firmware_has_feature(FW_FEATURE_ISERIES)) 52 + return pa; 53 + 54 + chunk = addr_to_chunk(pa); 55 + 56 + if (chunk < mschunks_map.num_chunks) 57 + chunk = mschunks_map.mapping[chunk]; 58 + 59 + return chunk_to_addr(chunk) + (pa & MSCHUNKS_OFFSET_MASK); 60 } 61 62 /* Convenience macros */ 63 #define virt_to_abs(va) phys_to_abs(__pa(va)) 64 + #define abs_to_virt(aa) __va(aa) 65 66 #endif /* _ABS_ADDR_H */
+3 -44
include/asm-ppc64/cputable.h
··· 56 * BHT, SPD, etc... from head.S before branching to identify_machine 57 */ 58 cpu_setup_t cpu_setup; 59 - 60 - /* This is used to identify firmware features which are available 61 - * to the kernel. 62 - */ 63 - unsigned long firmware_features; 64 }; 65 66 extern struct cpu_spec cpu_specs[]; ··· 65 { 66 return cur_cpu_spec->cpu_features & feature; 67 } 68 - 69 - 70 - /* firmware feature bitmask values */ 71 - #define FIRMWARE_MAX_FEATURES 63 72 - 73 - #define FW_FEATURE_PFT (1UL<<0) 74 - #define FW_FEATURE_TCE (1UL<<1) 75 - #define FW_FEATURE_SPRG0 (1UL<<2) 76 - #define FW_FEATURE_DABR (1UL<<3) 77 - #define FW_FEATURE_COPY (1UL<<4) 78 - #define FW_FEATURE_ASR (1UL<<5) 79 - #define FW_FEATURE_DEBUG (1UL<<6) 80 - #define FW_FEATURE_TERM (1UL<<7) 81 - #define FW_FEATURE_PERF (1UL<<8) 82 - #define FW_FEATURE_DUMP (1UL<<9) 83 - #define FW_FEATURE_INTERRUPT (1UL<<10) 84 - #define FW_FEATURE_MIGRATE (1UL<<11) 85 - #define FW_FEATURE_PERFMON (1UL<<12) 86 - #define FW_FEATURE_CRQ (1UL<<13) 87 - #define FW_FEATURE_VIO (1UL<<14) 88 - #define FW_FEATURE_RDMA (1UL<<15) 89 - #define FW_FEATURE_LLAN (1UL<<16) 90 - #define FW_FEATURE_BULK (1UL<<17) 91 - #define FW_FEATURE_XDABR (1UL<<18) 92 - #define FW_FEATURE_MULTITCE (1UL<<19) 93 - #define FW_FEATURE_SPLPAR (1UL<<20) 94 - 95 - typedef struct { 96 - unsigned long val; 97 - char * name; 98 - } firmware_feature_t; 99 - 100 - extern firmware_feature_t firmware_features_table[]; 101 102 #endif /* __ASSEMBLY__ */ 103 ··· 102 #define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0000080000000000) 103 #define CPU_FTR_CTRL ASM_CONST(0x0000100000000000) 104 105 - /* Platform firmware features */ 106 - #define FW_FTR_ ASM_CONST(0x0000000000000001) 107 - 108 #ifndef __ASSEMBLY__ 109 #define COMMON_USER_PPC64 (PPC_FEATURE_32 | PPC_FEATURE_64 | \ 110 PPC_FEATURE_HAS_FPU | PPC_FEATURE_HAS_MMU) 111 ··· 116 #define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_PPCAS_ARCH_V2_BASE) 117 #else 118 #define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_PPCAS_ARCH_V2_BASE | CPU_FTR_16M_PAGE) 119 - #endif 120 121 - #define COMMON_PPC64_FW (0) 122 - #endif 123 124 #ifdef __ASSEMBLY__ 125
··· 56 * BHT, SPD, etc... from head.S before branching to identify_machine 57 */ 58 cpu_setup_t cpu_setup; 59 }; 60 61 extern struct cpu_spec cpu_specs[]; ··· 70 { 71 return cur_cpu_spec->cpu_features & feature; 72 } 73 74 #endif /* __ASSEMBLY__ */ 75 ··· 140 #define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0000080000000000) 141 #define CPU_FTR_CTRL ASM_CONST(0x0000100000000000) 142 143 #ifndef __ASSEMBLY__ 144 + 145 #define COMMON_USER_PPC64 (PPC_FEATURE_32 | PPC_FEATURE_64 | \ 146 PPC_FEATURE_HAS_FPU | PPC_FEATURE_HAS_MMU) 147 ··· 156 #define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_PPCAS_ARCH_V2_BASE) 157 #else 158 #define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_PPCAS_ARCH_V2_BASE | CPU_FTR_16M_PAGE) 159 + #endif /* CONFIG_PPC_ISERIES */ 160 161 + #endif /* __ASSEMBLY */ 162 163 #ifdef __ASSEMBLY__ 164
+101
include/asm-ppc64/firmware.h
···
··· 1 + /* 2 + * include/asm-ppc64/firmware.h 3 + * 4 + * Extracted from include/asm-ppc64/cputable.h 5 + * 6 + * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) 7 + * 8 + * Modifications for ppc64: 9 + * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com> 10 + * 11 + * This program is free software; you can redistribute it and/or 12 + * modify it under the terms of the GNU General Public License 13 + * as published by the Free Software Foundation; either version 14 + * 2 of the License, or (at your option) any later version. 15 + */ 16 + #ifndef __ASM_PPC_FIRMWARE_H 17 + #define __ASM_PPC_FIRMWARE_H 18 + 19 + #ifdef __KERNEL__ 20 + 21 + #ifndef __ASSEMBLY__ 22 + 23 + /* firmware feature bitmask values */ 24 + #define FIRMWARE_MAX_FEATURES 63 25 + 26 + #define FW_FEATURE_PFT (1UL<<0) 27 + #define FW_FEATURE_TCE (1UL<<1) 28 + #define FW_FEATURE_SPRG0 (1UL<<2) 29 + #define FW_FEATURE_DABR (1UL<<3) 30 + #define FW_FEATURE_COPY (1UL<<4) 31 + #define FW_FEATURE_ASR (1UL<<5) 32 + #define FW_FEATURE_DEBUG (1UL<<6) 33 + #define FW_FEATURE_TERM (1UL<<7) 34 + #define FW_FEATURE_PERF (1UL<<8) 35 + #define FW_FEATURE_DUMP (1UL<<9) 36 + #define FW_FEATURE_INTERRUPT (1UL<<10) 37 + #define FW_FEATURE_MIGRATE (1UL<<11) 38 + #define FW_FEATURE_PERFMON (1UL<<12) 39 + #define FW_FEATURE_CRQ (1UL<<13) 40 + #define FW_FEATURE_VIO (1UL<<14) 41 + #define FW_FEATURE_RDMA (1UL<<15) 42 + #define FW_FEATURE_LLAN (1UL<<16) 43 + #define FW_FEATURE_BULK (1UL<<17) 44 + #define FW_FEATURE_XDABR (1UL<<18) 45 + #define FW_FEATURE_MULTITCE (1UL<<19) 46 + #define FW_FEATURE_SPLPAR (1UL<<20) 47 + #define FW_FEATURE_ISERIES (1UL<<21) 48 + 49 + enum { 50 + FW_FEATURE_PSERIES_POSSIBLE = FW_FEATURE_PFT | FW_FEATURE_TCE | 51 + FW_FEATURE_SPRG0 | FW_FEATURE_DABR | FW_FEATURE_COPY | 52 + FW_FEATURE_ASR | FW_FEATURE_DEBUG | FW_FEATURE_TERM | 53 + FW_FEATURE_PERF | FW_FEATURE_DUMP | FW_FEATURE_INTERRUPT | 54 + FW_FEATURE_MIGRATE | FW_FEATURE_PERFMON | FW_FEATURE_CRQ | 55 + FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN | 56 + FW_FEATURE_BULK | FW_FEATURE_XDABR | FW_FEATURE_MULTITCE | 57 + FW_FEATURE_SPLPAR, 58 + FW_FEATURE_PSERIES_ALWAYS = 0, 59 + FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES, 60 + FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES, 61 + FW_FEATURE_POSSIBLE = 62 + #ifdef CONFIG_PPC_PSERIES 63 + FW_FEATURE_PSERIES_POSSIBLE | 64 + #endif 65 + #ifdef CONFIG_PPC_ISERIES 66 + FW_FEATURE_ISERIES_POSSIBLE | 67 + #endif 68 + 0, 69 + FW_FEATURE_ALWAYS = 70 + #ifdef CONFIG_PPC_PSERIES 71 + FW_FEATURE_PSERIES_ALWAYS & 72 + #endif 73 + #ifdef CONFIG_PPC_ISERIES 74 + FW_FEATURE_ISERIES_ALWAYS & 75 + #endif 76 + FW_FEATURE_POSSIBLE, 77 + }; 78 + 79 + /* This is used to identify firmware features which are available 80 + * to the kernel. 81 + */ 82 + extern unsigned long ppc64_firmware_features; 83 + 84 + static inline unsigned long firmware_has_feature(unsigned long feature) 85 + { 86 + return (FW_FEATURE_ALWAYS & feature) || 87 + (FW_FEATURE_POSSIBLE & ppc64_firmware_features & feature); 88 + } 89 + 90 + #ifdef CONFIG_PPC_PSERIES 91 + typedef struct { 92 + unsigned long val; 93 + char * name; 94 + } firmware_feature_t; 95 + 96 + extern firmware_feature_t firmware_features_table[]; 97 + #endif 98 + 99 + #endif /* __ASSEMBLY__ */ 100 + #endif /* __KERNEL__ */ 101 + #endif /* __ASM_PPC_FIRMWARE_H */
+1 -1
include/asm-ppc64/imalloc.h
··· 6 */ 7 #define PHBS_IO_BASE VMALLOC_END 8 #define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */ 9 - #define IMALLOC_END (VMALLOC_START + EADDR_MASK) 10 11 12 /* imalloc region types */
··· 6 */ 7 #define PHBS_IO_BASE VMALLOC_END 8 #define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */ 9 + #define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE) 10 11 12 /* imalloc region types */
-3
include/asm-ppc64/iommu.h
··· 104 105 #ifdef CONFIG_PPC_ISERIES 106 107 - /* Initializes tables for bio buses */ 108 - extern void __init iommu_vio_init(void); 109 - 110 struct iSeries_Device_Node; 111 /* Creates table for an individual device node */ 112 extern void iommu_devnode_init_iSeries(struct iSeries_Device_Node *dn);
··· 104 105 #ifdef CONFIG_PPC_ISERIES 106 107 struct iSeries_Device_Node; 108 /* Creates table for an individual device node */ 109 extern void iommu_devnode_init_iSeries(struct iSeries_Device_Node *dn);
-1
include/asm-ppc64/lmb.h
··· 22 23 struct lmb_property { 24 unsigned long base; 25 - unsigned long physbase; 26 unsigned long size; 27 }; 28
··· 22 23 struct lmb_property { 24 unsigned long base; 25 unsigned long size; 26 }; 27
+3
include/asm-ppc64/machdep.h
··· 140 141 /* Idle loop for this platform, leave empty for default idle loop */ 142 int (*idle_loop)(void); 143 }; 144 145 extern int default_idle(void);
··· 140 141 /* Idle loop for this platform, leave empty for default idle loop */ 142 int (*idle_loop)(void); 143 + 144 + /* Function to enable pmcs for this platform, called once per cpu. */ 145 + void (*enable_pmcs)(void); 146 }; 147 148 extern int default_idle(void);
+10 -6
include/asm-ppc64/mmu.h
··· 28 #define STE_VSID_SHIFT 12 29 30 /* Location of cpu0's segment table */ 31 - #define STAB0_PAGE 0x9 32 #define STAB0_PHYS_ADDR (STAB0_PAGE<<PAGE_SHIFT) 33 - #define STAB0_VIRT_ADDR (KERNELBASE+STAB0_PHYS_ADDR) 34 35 /* 36 * SLB ··· 262 #define VSID_BITS 36 263 #define VSID_MODULUS ((1UL<<VSID_BITS)-1) 264 265 - #define CONTEXT_BITS 20 266 - #define USER_ESID_BITS 15 267 268 /* 269 * This macro generates asm code to compute the VSID scramble ··· 307 typedef struct { 308 mm_context_id_t id; 309 #ifdef CONFIG_HUGETLB_PAGE 310 - pgd_t *huge_pgdir; 311 - u16 htlb_segs; /* bitmask */ 312 #endif 313 } mm_context_t; 314
··· 28 #define STE_VSID_SHIFT 12 29 30 /* Location of cpu0's segment table */ 31 + #define STAB0_PAGE 0x6 32 #define STAB0_PHYS_ADDR (STAB0_PAGE<<PAGE_SHIFT) 33 + 34 + #ifndef __ASSEMBLY__ 35 + extern char initial_stab[]; 36 + #endif /* ! __ASSEMBLY */ 37 38 /* 39 * SLB ··· 259 #define VSID_BITS 36 260 #define VSID_MODULUS ((1UL<<VSID_BITS)-1) 261 262 + #define CONTEXT_BITS 19 263 + #define USER_ESID_BITS 16 264 + 265 + #define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT)) 266 267 /* 268 * This macro generates asm code to compute the VSID scramble ··· 302 typedef struct { 303 mm_context_id_t id; 304 #ifdef CONFIG_HUGETLB_PAGE 305 + u16 low_htlb_areas, high_htlb_areas; 306 #endif 307 } mm_context_t; 308
-7
include/asm-ppc64/naca.h
··· 12 13 #include <asm/types.h> 14 15 - #ifndef __ASSEMBLY__ 16 - 17 struct naca_struct { 18 /* Kernel only data - undefined for user space */ 19 void *xItVpdAreas; /* VPD Data 0x00 */ ··· 20 }; 21 22 extern struct naca_struct naca; 23 - 24 - #endif /* __ASSEMBLY__ */ 25 - 26 - #define NACA_PAGE 0x4 27 - #define NACA_PHYS_ADDR (NACA_PAGE<<PAGE_SHIFT) 28 29 #endif /* _NACA_H */
··· 12 13 #include <asm/types.h> 14 15 struct naca_struct { 16 /* Kernel only data - undefined for user space */ 17 void *xItVpdAreas; /* VPD Data 0x00 */ ··· 22 }; 23 24 extern struct naca_struct naca; 25 26 #endif /* _NACA_H */
+32 -23
include/asm-ppc64/page.h
··· 37 38 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 39 40 - /* For 64-bit processes the hugepage range is 1T-1.5T */ 41 - #define TASK_HPAGE_BASE ASM_CONST(0x0000010000000000) 42 - #define TASK_HPAGE_END ASM_CONST(0x0000018000000000) 43 44 #define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \ 45 - (1U << GET_ESID(addr))) & 0xffff) 46 47 #define ARCH_HAS_HUGEPAGE_ONLY_RANGE 48 #define ARCH_HAS_PREPARE_HUGEPAGE_RANGE 49 50 #define touches_hugepage_low_range(mm, addr, len) \ 51 - (LOW_ESID_MASK((addr), (len)) & mm->context.htlb_segs) 52 - #define touches_hugepage_high_range(addr, len) \ 53 - (((addr) > (TASK_HPAGE_BASE-(len))) && ((addr) < TASK_HPAGE_END)) 54 55 #define __within_hugepage_low_range(addr, len, segmask) \ 56 ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask)) 57 #define within_hugepage_low_range(addr, len) \ 58 __within_hugepage_low_range((addr), (len), \ 59 - current->mm->context.htlb_segs) 60 - #define within_hugepage_high_range(addr, len) (((addr) >= TASK_HPAGE_BASE) \ 61 - && ((addr)+(len) <= TASK_HPAGE_END) && ((addr)+(len) >= (addr))) 62 63 #define is_hugepage_only_range(mm, addr, len) \ 64 - (touches_hugepage_high_range((addr), (len)) || \ 65 touches_hugepage_low_range((mm), (addr), (len))) 66 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 67 68 #define in_hugepage_area(context, addr) \ 69 (cpu_has_feature(CPU_FTR_16M_PAGE) && \ 70 - ( (((addr) >= TASK_HPAGE_BASE) && ((addr) < TASK_HPAGE_END)) || \ 71 ( ((addr) < 0x100000000L) && \ 72 - ((1 << GET_ESID(addr)) & (context).htlb_segs) ) ) ) 73 74 #else /* !CONFIG_HUGETLB_PAGE */ 75 ··· 131 * Entries in the pte table are 64b, while entries in the pgd & pmd are 32b. 132 */ 133 typedef struct { unsigned long pte; } pte_t; 134 - typedef struct { unsigned int pmd; } pmd_t; 135 - typedef struct { unsigned int pgd; } pgd_t; 136 typedef struct { unsigned long pgprot; } pgprot_t; 137 138 #define pte_val(x) ((x).pte) 139 #define pmd_val(x) ((x).pmd) 140 #define pgd_val(x) ((x).pgd) 141 #define pgprot_val(x) ((x).pgprot) 142 143 - #define __pte(x) ((pte_t) { (x) } ) 144 - #define __pmd(x) ((pmd_t) { (x) } ) 145 - #define __pgd(x) ((pgd_t) { (x) } ) 146 - #define __pgprot(x) ((pgprot_t) { (x) } ) 147 148 #else 149 /* 150 * .. while these make it easier on the compiler 151 */ 152 typedef unsigned long pte_t; 153 - typedef unsigned int pmd_t; 154 - typedef unsigned int pgd_t; 155 typedef unsigned long pgprot_t; 156 157 #define pte_val(x) (x) 158 #define pmd_val(x) (x) 159 #define pgd_val(x) (x) 160 #define pgprot_val(x) (x) 161 162 #define __pte(x) (x) 163 #define __pmd(x) (x) 164 #define __pgd(x) (x) 165 #define __pgprot(x) (x) 166 ··· 219 #define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT) 220 #define USER_REGION_ID (0UL) 221 #define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) 222 - 223 - #define __bpn_to_ba(x) ((((unsigned long)(x)) << PAGE_SHIFT) + KERNELBASE) 224 - #define __ba_to_bpn(x) ((((unsigned long)(x)) & ~REGION_MASK) >> PAGE_SHIFT) 225 226 #define __va(x) ((void *)((unsigned long)(x) + KERNELBASE)) 227
··· 37 38 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 39 40 + #define HTLB_AREA_SHIFT 40 41 + #define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT) 42 + #define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT) 43 44 #define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \ 45 - (1U << GET_ESID(addr))) & 0xffff) 46 + #define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \ 47 + - (1U << GET_HTLB_AREA(addr))) & 0xffff) 48 49 #define ARCH_HAS_HUGEPAGE_ONLY_RANGE 50 #define ARCH_HAS_PREPARE_HUGEPAGE_RANGE 51 + #define ARCH_HAS_SETCLEAR_HUGE_PTE 52 53 #define touches_hugepage_low_range(mm, addr, len) \ 54 + (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas) 55 + #define touches_hugepage_high_range(mm, addr, len) \ 56 + (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas) 57 58 #define __within_hugepage_low_range(addr, len, segmask) \ 59 ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask)) 60 #define within_hugepage_low_range(addr, len) \ 61 __within_hugepage_low_range((addr), (len), \ 62 + current->mm->context.low_htlb_areas) 63 + #define __within_hugepage_high_range(addr, len, zonemask) \ 64 + ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask)) 65 + #define within_hugepage_high_range(addr, len) \ 66 + __within_hugepage_high_range((addr), (len), \ 67 + current->mm->context.high_htlb_areas) 68 69 #define is_hugepage_only_range(mm, addr, len) \ 70 + (touches_hugepage_high_range((mm), (addr), (len)) || \ 71 touches_hugepage_low_range((mm), (addr), (len))) 72 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 73 74 #define in_hugepage_area(context, addr) \ 75 (cpu_has_feature(CPU_FTR_16M_PAGE) && \ 76 + ( ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) || \ 77 ( ((addr) < 0x100000000L) && \ 78 + ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) ) ) 79 80 #else /* !CONFIG_HUGETLB_PAGE */ 81 ··· 125 * Entries in the pte table are 64b, while entries in the pgd & pmd are 32b. 126 */ 127 typedef struct { unsigned long pte; } pte_t; 128 + typedef struct { unsigned long pmd; } pmd_t; 129 + typedef struct { unsigned long pud; } pud_t; 130 + typedef struct { unsigned long pgd; } pgd_t; 131 typedef struct { unsigned long pgprot; } pgprot_t; 132 133 #define pte_val(x) ((x).pte) 134 #define pmd_val(x) ((x).pmd) 135 + #define pud_val(x) ((x).pud) 136 #define pgd_val(x) ((x).pgd) 137 #define pgprot_val(x) ((x).pgprot) 138 139 + #define __pte(x) ((pte_t) { (x) }) 140 + #define __pmd(x) ((pmd_t) { (x) }) 141 + #define __pud(x) ((pud_t) { (x) }) 142 + #define __pgd(x) ((pgd_t) { (x) }) 143 + #define __pgprot(x) ((pgprot_t) { (x) }) 144 145 #else 146 /* 147 * .. while these make it easier on the compiler 148 */ 149 typedef unsigned long pte_t; 150 + typedef unsigned long pmd_t; 151 + typedef unsigned long pud_t; 152 + typedef unsigned long pgd_t; 153 typedef unsigned long pgprot_t; 154 155 #define pte_val(x) (x) 156 #define pmd_val(x) (x) 157 + #define pud_val(x) (x) 158 #define pgd_val(x) (x) 159 #define pgprot_val(x) (x) 160 161 #define __pte(x) (x) 162 #define __pmd(x) (x) 163 + #define __pud(x) (x) 164 #define __pgd(x) (x) 165 #define __pgprot(x) (x) 166 ··· 207 #define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT) 208 #define USER_REGION_ID (0UL) 209 #define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) 210 211 #define __va(x) ((void *)((unsigned long)(x) + KERNELBASE)) 212
+61 -32
include/asm-ppc64/pgalloc.h
··· 6 #include <linux/cpumask.h> 7 #include <linux/percpu.h> 8 9 - extern kmem_cache_t *zero_cache; 10 11 /* 12 * This program is free software; you can redistribute it and/or ··· 20 * 2 of the License, or (at your option) any later version. 21 */ 22 23 - static inline pgd_t * 24 - pgd_alloc(struct mm_struct *mm) 25 { 26 - return kmem_cache_alloc(zero_cache, GFP_KERNEL); 27 } 28 29 - static inline void 30 - pgd_free(pgd_t *pgd) 31 { 32 - kmem_cache_free(zero_cache, pgd); 33 } 34 35 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) 36 37 - static inline pmd_t * 38 - pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 39 { 40 - return kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT); 41 } 42 43 - static inline void 44 - pmd_free(pmd_t *pmd) 45 { 46 - kmem_cache_free(zero_cache, pmd); 47 } 48 49 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte) ··· 62 63 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 64 { 65 - return kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT); 66 } 67 68 static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) 69 { 70 - pte_t *pte = kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT); 71 - if (pte) 72 - return virt_to_page(pte); 73 - return NULL; 74 } 75 76 static inline void pte_free_kernel(pte_t *pte) 77 { 78 - kmem_cache_free(zero_cache, pte); 79 } 80 81 static inline void pte_free(struct page *ptepage) 82 { 83 - kmem_cache_free(zero_cache, page_address(ptepage)); 84 } 85 86 - struct pte_freelist_batch 87 { 88 - struct rcu_head rcu; 89 - unsigned int index; 90 - struct page * pages[0]; 91 - }; 92 93 - #define PTE_FREELIST_SIZE ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) / \ 94 - sizeof(struct page *)) 95 96 - extern void pte_free_now(struct page *ptepage); 97 - extern void pte_free_submit(struct pte_freelist_batch *batch); 98 99 - DECLARE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); 100 101 - void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage); 102 - #define __pmd_free_tlb(tlb, pmd) __pte_free_tlb(tlb, virt_to_page(pmd)) 103 104 #define check_pgt_cache() do { } while (0) 105
··· 6 #include <linux/cpumask.h> 7 #include <linux/percpu.h> 8 9 + extern kmem_cache_t *pgtable_cache[]; 10 + 11 + #define PTE_CACHE_NUM 0 12 + #define PMD_CACHE_NUM 1 13 + #define PUD_CACHE_NUM 1 14 + #define PGD_CACHE_NUM 0 15 16 /* 17 * This program is free software; you can redistribute it and/or ··· 15 * 2 of the License, or (at your option) any later version. 16 */ 17 18 + static inline pgd_t *pgd_alloc(struct mm_struct *mm) 19 { 20 + return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL); 21 } 22 23 + static inline void pgd_free(pgd_t *pgd) 24 { 25 + kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd); 26 + } 27 + 28 + #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD) 29 + 30 + static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 31 + { 32 + return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM], 33 + GFP_KERNEL|__GFP_REPEAT); 34 + } 35 + 36 + static inline void pud_free(pud_t *pud) 37 + { 38 + kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud); 39 } 40 41 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) 42 43 + static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 44 { 45 + return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM], 46 + GFP_KERNEL|__GFP_REPEAT); 47 } 48 49 + static inline void pmd_free(pmd_t *pmd) 50 { 51 + kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd); 52 } 53 54 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte) ··· 47 48 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 49 { 50 + return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM], 51 + GFP_KERNEL|__GFP_REPEAT); 52 } 53 54 static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) 55 { 56 + return virt_to_page(pte_alloc_one_kernel(mm, address)); 57 } 58 59 static inline void pte_free_kernel(pte_t *pte) 60 { 61 + kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte); 62 } 63 64 static inline void pte_free(struct page *ptepage) 65 { 66 + pte_free_kernel(page_address(ptepage)); 67 } 68 69 + #define PGF_CACHENUM_MASK 0xf 70 + 71 + typedef struct pgtable_free { 72 + unsigned long val; 73 + } pgtable_free_t; 74 + 75 + static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum, 76 + unsigned long mask) 77 { 78 + BUG_ON(cachenum > PGF_CACHENUM_MASK); 79 80 + return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum}; 81 + } 82 83 + static inline void pgtable_free(pgtable_free_t pgf) 84 + { 85 + void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); 86 + int cachenum = pgf.val & PGF_CACHENUM_MASK; 87 88 + kmem_cache_free(pgtable_cache[cachenum], p); 89 + } 90 91 + void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); 92 + 93 + #define __pte_free_tlb(tlb, ptepage) \ 94 + pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ 95 + PTE_CACHE_NUM, PTE_TABLE_SIZE-1)) 96 + #define __pmd_free_tlb(tlb, pmd) \ 97 + pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ 98 + PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) 99 + #define __pud_free_tlb(tlb, pmd) \ 100 + pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ 101 + PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) 102 103 #define check_pgt_cache() do { } while (0) 104
+54 -38
include/asm-ppc64/pgtable.h
··· 15 #include <asm/tlbflush.h> 16 #endif /* __ASSEMBLY__ */ 17 18 - #include <asm-generic/pgtable-nopud.h> 19 - 20 /* 21 * Entries per page directory level. The PTE level must use a 64b record 22 * for each page table entry. The PMD and PGD level use a 32b record for 23 * each entry by assuming that each entry is page aligned. 24 */ 25 #define PTE_INDEX_SIZE 9 26 - #define PMD_INDEX_SIZE 10 27 - #define PGD_INDEX_SIZE 10 28 29 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) 30 #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) 31 #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) 32 33 /* PMD_SHIFT determines what a second-level page table entry can map */ ··· 40 #define PMD_SIZE (1UL << PMD_SHIFT) 41 #define PMD_MASK (~(PMD_SIZE-1)) 42 43 - /* PGDIR_SHIFT determines what a third-level page table entry can map */ 44 - #define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) 45 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 46 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 47 ··· 55 /* 56 * Size of EA range mapped by our pagetables. 57 */ 58 - #define EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ 59 - PGD_INDEX_SIZE + PAGE_SHIFT) 60 - #define EADDR_MASK ((1UL << EADDR_SIZE) - 1) 61 62 /* 63 * Define the address range of the vmalloc VM area. 64 */ 65 #define VMALLOC_START (0xD000000000000000ul) 66 - #define VMALLOC_SIZE (0x10000000000UL) 67 #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) 68 69 /* ··· 172 #ifndef __ASSEMBLY__ 173 int hash_huge_page(struct mm_struct *mm, unsigned long access, 174 unsigned long ea, unsigned long vsid, int local); 175 - 176 - void hugetlb_mm_free_pgd(struct mm_struct *mm); 177 #endif /* __ASSEMBLY__ */ 178 179 #define HAVE_ARCH_UNMAPPED_AREA ··· 179 #else 180 181 #define hash_huge_page(mm,a,ea,vsid,local) -1 182 - #define hugetlb_mm_free_pgd(mm) do {} while (0) 183 184 #endif 185 ··· 212 #define pte_pfn(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT))) 213 #define pte_page(x) pfn_to_page(pte_pfn(x)) 214 215 - #define pmd_set(pmdp, ptep) \ 216 - (pmd_val(*(pmdp)) = __ba_to_bpn(ptep)) 217 #define pmd_none(pmd) (!pmd_val(pmd)) 218 #define pmd_bad(pmd) (pmd_val(pmd) == 0) 219 #define pmd_present(pmd) (pmd_val(pmd) != 0) 220 #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) 221 - #define pmd_page_kernel(pmd) (__bpn_to_ba(pmd_val(pmd))) 222 #define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) 223 224 - #define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (__ba_to_bpn(pmdp))) 225 #define pud_none(pud) (!pud_val(pud)) 226 - #define pud_bad(pud) ((pud_val(pud)) == 0UL) 227 - #define pud_present(pud) (pud_val(pud) != 0UL) 228 - #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) 229 - #define pud_page(pud) (__bpn_to_ba(pud_val(pud))) 230 231 /* 232 * Find an entry in a page-table-directory. We combine the address region 233 * (the high order N bits) and the pgd portion of the address. 234 */ 235 /* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */ 236 - #define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x7ff) 237 238 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 239 240 - /* Find an entry in the second-level page table.. */ 241 - #define pmd_offset(pudp,addr) \ 242 - ((pmd_t *) pud_page(*(pudp)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) 243 244 - /* Find an entry in the third-level page table.. */ 245 #define pte_offset_kernel(dir,addr) \ 246 - ((pte_t *) pmd_page_kernel(*(dir)) \ 247 - + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) 248 249 #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) 250 #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) ··· 479 #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) 480 481 #define pmd_ERROR(e) \ 482 - printk("%s:%d: bad pmd %08x.\n", __FILE__, __LINE__, pmd_val(e)) 483 #define pgd_ERROR(e) \ 484 - printk("%s:%d: bad pgd %08x.\n", __FILE__, __LINE__, pgd_val(e)) 485 486 extern pgd_t swapper_pg_dir[]; 487 488 extern void paging_init(void); 489 490 - /* 491 - * Because the huge pgtables are only 2 level, they can take 492 - * at most around 4M, much less than one hugepage which the 493 - * process is presumably entitled to use. So we don't bother 494 - * freeing up the pagetables on unmap, and wait until 495 - * destroy_context() to clean up the lot. 496 - */ 497 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ 498 - do { } while (0) 499 500 /* 501 * This gets called at the end of handling a page fault, when
··· 15 #include <asm/tlbflush.h> 16 #endif /* __ASSEMBLY__ */ 17 18 /* 19 * Entries per page directory level. The PTE level must use a 64b record 20 * for each page table entry. The PMD and PGD level use a 32b record for 21 * each entry by assuming that each entry is page aligned. 22 */ 23 #define PTE_INDEX_SIZE 9 24 + #define PMD_INDEX_SIZE 7 25 + #define PUD_INDEX_SIZE 7 26 + #define PGD_INDEX_SIZE 9 27 + 28 + #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) 29 + #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) 30 + #define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE) 31 + #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) 32 33 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) 34 #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) 35 + #define PTRS_PER_PUD (1 << PMD_INDEX_SIZE) 36 #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) 37 38 /* PMD_SHIFT determines what a second-level page table entry can map */ ··· 35 #define PMD_SIZE (1UL << PMD_SHIFT) 36 #define PMD_MASK (~(PMD_SIZE-1)) 37 38 + /* PUD_SHIFT determines what a third-level page table entry can map */ 39 + #define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) 40 + #define PUD_SIZE (1UL << PUD_SHIFT) 41 + #define PUD_MASK (~(PUD_SIZE-1)) 42 + 43 + /* PGDIR_SHIFT determines what a fourth-level page table entry can map */ 44 + #define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE) 45 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 46 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 47 ··· 45 /* 46 * Size of EA range mapped by our pagetables. 47 */ 48 + #define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ 49 + PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) 50 + #define PGTABLE_RANGE (1UL << PGTABLE_EADDR_SIZE) 51 + 52 + #if TASK_SIZE_USER64 > PGTABLE_RANGE 53 + #error TASK_SIZE_USER64 exceeds pagetable range 54 + #endif 55 + 56 + #if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) 57 + #error TASK_SIZE_USER64 exceeds user VSID range 58 + #endif 59 60 /* 61 * Define the address range of the vmalloc VM area. 62 */ 63 #define VMALLOC_START (0xD000000000000000ul) 64 + #define VMALLOC_SIZE (0x80000000000UL) 65 #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) 66 67 /* ··· 154 #ifndef __ASSEMBLY__ 155 int hash_huge_page(struct mm_struct *mm, unsigned long access, 156 unsigned long ea, unsigned long vsid, int local); 157 #endif /* __ASSEMBLY__ */ 158 159 #define HAVE_ARCH_UNMAPPED_AREA ··· 163 #else 164 165 #define hash_huge_page(mm,a,ea,vsid,local) -1 166 167 #endif 168 ··· 197 #define pte_pfn(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT))) 198 #define pte_page(x) pfn_to_page(pte_pfn(x)) 199 200 + #define pmd_set(pmdp, ptep) ({BUG_ON((u64)ptep < KERNELBASE); pmd_val(*(pmdp)) = (unsigned long)(ptep);}) 201 #define pmd_none(pmd) (!pmd_val(pmd)) 202 #define pmd_bad(pmd) (pmd_val(pmd) == 0) 203 #define pmd_present(pmd) (pmd_val(pmd) != 0) 204 #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) 205 + #define pmd_page_kernel(pmd) (pmd_val(pmd)) 206 #define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) 207 208 + #define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (unsigned long)(pmdp)) 209 #define pud_none(pud) (!pud_val(pud)) 210 + #define pud_bad(pud) ((pud_val(pud)) == 0) 211 + #define pud_present(pud) (pud_val(pud) != 0) 212 + #define pud_clear(pudp) (pud_val(*(pudp)) = 0) 213 + #define pud_page(pud) (pud_val(pud)) 214 + 215 + #define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) 216 + #define pgd_none(pgd) (!pgd_val(pgd)) 217 + #define pgd_bad(pgd) (pgd_val(pgd) == 0) 218 + #define pgd_present(pgd) (pgd_val(pgd) != 0) 219 + #define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0) 220 + #define pgd_page(pgd) (pgd_val(pgd)) 221 222 /* 223 * Find an entry in a page-table-directory. We combine the address region 224 * (the high order N bits) and the pgd portion of the address. 225 */ 226 /* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */ 227 + #define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff) 228 229 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 230 231 + #define pud_offset(pgdp, addr) \ 232 + (((pud_t *) pgd_page(*(pgdp))) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) 233 234 + #define pmd_offset(pudp,addr) \ 235 + (((pmd_t *) pud_page(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) 236 + 237 #define pte_offset_kernel(dir,addr) \ 238 + (((pte_t *) pmd_page_kernel(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) 239 240 #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) 241 #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) ··· 458 #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) 459 460 #define pmd_ERROR(e) \ 461 + printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) 462 + #define pud_ERROR(e) \ 463 + printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pud_val(e)) 464 #define pgd_ERROR(e) \ 465 + printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 466 467 extern pgd_t swapper_pg_dir[]; 468 469 extern void paging_init(void); 470 471 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ 472 + free_pgd_range(tlb, addr, end, floor, ceiling) 473 474 /* 475 * This gets called at the end of handling a page fault, when
+2
include/asm-ppc64/pmc.h
··· 26 int reserve_pmc_hardware(perf_irq_t new_perf_irq); 27 void release_pmc_hardware(void); 28 29 #endif /* _PPC64_PMC_H */
··· 26 int reserve_pmc_hardware(perf_irq_t new_perf_irq); 27 void release_pmc_hardware(void); 28 29 + void power4_enable_pmcs(void); 30 + 31 #endif /* _PPC64_PMC_H */
+2 -2
include/asm-ppc64/processor.h
··· 382 extern struct task_struct *last_task_used_math; 383 extern struct task_struct *last_task_used_altivec; 384 385 - /* 64-bit user address space is 41-bits (2TBs user VM) */ 386 - #define TASK_SIZE_USER64 (0x0000020000000000UL) 387 388 /* 389 * 32-bit user address space is 4GB - 1 page
··· 382 extern struct task_struct *last_task_used_math; 383 extern struct task_struct *last_task_used_altivec; 384 385 + /* 64-bit user address space is 44-bits (16TB user VM) */ 386 + #define TASK_SIZE_USER64 (0x0000100000000000UL) 387 388 /* 389 * 32-bit user address space is 4GB - 1 page
+9 -5
include/asm-ppc64/prom.h
··· 22 #define RELOC(x) (*PTRRELOC(&(x))) 23 24 /* Definitions used by the flattened device tree */ 25 - #define OF_DT_HEADER 0xd00dfeed /* 4: version, 4: total size */ 26 - #define OF_DT_BEGIN_NODE 0x1 /* Start node: full name */ 27 #define OF_DT_END_NODE 0x2 /* End node */ 28 - #define OF_DT_PROP 0x3 /* Property: name off, size, content */ 29 #define OF_DT_END 0x9 30 31 - #define OF_DT_VERSION 1 32 33 /* 34 * This is what gets passed to the kernel by prom_init or kexec ··· 56 u32 version; /* format version */ 57 u32 last_comp_version; /* last compatible version */ 58 /* version 2 fields below */ 59 - u32 boot_cpuid_phys; /* Which physical CPU id we're booting on */ 60 }; 61 62
··· 22 #define RELOC(x) (*PTRRELOC(&(x))) 23 24 /* Definitions used by the flattened device tree */ 25 + #define OF_DT_HEADER 0xd00dfeed /* marker */ 26 + #define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */ 27 #define OF_DT_END_NODE 0x2 /* End node */ 28 + #define OF_DT_PROP 0x3 /* Property: name off, size, 29 + * content */ 30 + #define OF_DT_NOP 0x4 /* nop */ 31 #define OF_DT_END 0x9 32 33 + #define OF_DT_VERSION 0x10 34 35 /* 36 * This is what gets passed to the kernel by prom_init or kexec ··· 54 u32 version; /* format version */ 55 u32 last_comp_version; /* last compatible version */ 56 /* version 2 fields below */ 57 + u32 boot_cpuid_phys; /* Physical CPU id we're booting on */ 58 + /* version 3 fields below */ 59 + u32 dt_strings_size; /* size of the DT strings block */ 60 }; 61 62
+3 -1
include/asm-ppc64/system.h
··· 88 DEBUGGER_BOILERPLATE(debugger_fault_handler) 89 90 #ifdef CONFIG_XMON 91 - extern void xmon_init(void); 92 #endif 93 94 #else ··· 301 #define NET_IP_ALIGN 0 302 303 #define arch_align_stack(x) (x) 304 305 #endif /* __KERNEL__ */ 306 #endif
··· 88 DEBUGGER_BOILERPLATE(debugger_fault_handler) 89 90 #ifdef CONFIG_XMON 91 + extern void xmon_init(int enable); 92 #endif 93 94 #else ··· 301 #define NET_IP_ALIGN 0 302 303 #define arch_align_stack(x) (x) 304 + 305 + extern unsigned long reloc_offset(void); 306 307 #endif /* __KERNEL__ */ 308 #endif
+10
include/asm-ppc64/vio.h
··· 56 int vio_get_irq(struct vio_dev *dev); 57 int vio_enable_interrupts(struct vio_dev *dev); 58 int vio_disable_interrupts(struct vio_dev *dev); 59 60 extern struct dma_mapping_ops vio_dma_ops; 61 ··· 98 struct device dev; 99 }; 100 101 static inline struct vio_dev *to_vio_dev(struct device *dev) 102 { 103 return container_of(dev, struct vio_dev, dev); 104 } 105 106 #endif /* _ASM_VIO_H */
··· 56 int vio_get_irq(struct vio_dev *dev); 57 int vio_enable_interrupts(struct vio_dev *dev); 58 int vio_disable_interrupts(struct vio_dev *dev); 59 + extern struct vio_dev * __devinit vio_register_device_common( 60 + struct vio_dev *viodev, char *name, char *type, 61 + uint32_t unit_address, struct iommu_table *iommu_table); 62 63 extern struct dma_mapping_ops vio_dma_ops; 64 ··· 95 struct device dev; 96 }; 97 98 + extern struct vio_dev vio_bus_device; 99 + 100 static inline struct vio_dev *to_vio_dev(struct device *dev) 101 { 102 return container_of(dev, struct vio_dev, dev); 103 } 104 + 105 + extern int vio_bus_init(int (*is_match)(const struct vio_device_id *id, 106 + const struct vio_dev *dev), 107 + void (*)(struct vio_dev *), 108 + void (*)(struct device *)); 109 110 #endif /* _ASM_VIO_H */