Merge HEAD from master.kernel.org:/pub/scm/linux/kernel/git/paulus/ppc64-2.6

+2245 -2061
-203
arch/ppc/boot/utils/addRamDisk.c
··· 1 - #include <stdio.h> 2 - #include <stdlib.h> 3 - #include <netinet/in.h> 4 - #include <unistd.h> 5 - #include <sys/types.h> 6 - #include <sys/stat.h> 7 - #include <string.h> 8 - 9 - #define ElfHeaderSize (64 * 1024) 10 - #define ElfPages (ElfHeaderSize / 4096) 11 - #define KERNELBASE (0xc0000000) 12 - 13 - void get4k(FILE *file, char *buf ) 14 - { 15 - unsigned j; 16 - unsigned num = fread(buf, 1, 4096, file); 17 - for ( j=num; j<4096; ++j ) 18 - buf[j] = 0; 19 - } 20 - 21 - void put4k(FILE *file, char *buf ) 22 - { 23 - fwrite(buf, 1, 4096, file); 24 - } 25 - 26 - void death(const char *msg, FILE *fdesc, const char *fname) 27 - { 28 - printf(msg); 29 - fclose(fdesc); 30 - unlink(fname); 31 - exit(1); 32 - } 33 - 34 - int main(int argc, char **argv) 35 - { 36 - char inbuf[4096]; 37 - FILE *ramDisk = NULL; 38 - FILE *inputVmlinux = NULL; 39 - FILE *outputVmlinux = NULL; 40 - unsigned i = 0; 41 - u_int32_t ramFileLen = 0; 42 - u_int32_t ramLen = 0; 43 - u_int32_t roundR = 0; 44 - u_int32_t kernelLen = 0; 45 - u_int32_t actualKernelLen = 0; 46 - u_int32_t round = 0; 47 - u_int32_t roundedKernelLen = 0; 48 - u_int32_t ramStartOffs = 0; 49 - u_int32_t ramPages = 0; 50 - u_int32_t roundedKernelPages = 0; 51 - u_int32_t hvReleaseData = 0; 52 - u_int32_t eyeCatcher = 0xc8a5d9c4; 53 - u_int32_t naca = 0; 54 - u_int32_t xRamDisk = 0; 55 - u_int32_t xRamDiskSize = 0; 56 - if ( argc < 2 ) { 57 - printf("Name of RAM disk file missing.\n"); 58 - exit(1); 59 - } 60 - 61 - if ( argc < 3 ) { 62 - printf("Name of vmlinux file missing.\n"); 63 - exit(1); 64 - } 65 - 66 - if ( argc < 4 ) { 67 - printf("Name of vmlinux output file missing.\n"); 68 - exit(1); 69 - } 70 - 71 - ramDisk = fopen(argv[1], "r"); 72 - if ( ! ramDisk ) { 73 - printf("RAM disk file \"%s\" failed to open.\n", argv[1]); 74 - exit(1); 75 - } 76 - inputVmlinux = fopen(argv[2], "r"); 77 - if ( ! inputVmlinux ) { 78 - printf("vmlinux file \"%s\" failed to open.\n", argv[2]); 79 - exit(1); 80 - } 81 - outputVmlinux = fopen(argv[3], "w+"); 82 - if ( ! outputVmlinux ) { 83 - printf("output vmlinux file \"%s\" failed to open.\n", argv[3]); 84 - exit(1); 85 - } 86 - fseek(ramDisk, 0, SEEK_END); 87 - ramFileLen = ftell(ramDisk); 88 - fseek(ramDisk, 0, SEEK_SET); 89 - printf("%s file size = %d\n", argv[1], ramFileLen); 90 - 91 - ramLen = ramFileLen; 92 - 93 - roundR = 4096 - (ramLen % 4096); 94 - if ( roundR ) { 95 - printf("Rounding RAM disk file up to a multiple of 4096, adding %d\n", roundR); 96 - ramLen += roundR; 97 - } 98 - 99 - printf("Rounded RAM disk size is %d\n", ramLen); 100 - fseek(inputVmlinux, 0, SEEK_END); 101 - kernelLen = ftell(inputVmlinux); 102 - fseek(inputVmlinux, 0, SEEK_SET); 103 - printf("kernel file size = %d\n", kernelLen); 104 - if ( kernelLen == 0 ) { 105 - printf("You must have a linux kernel specified as argv[2]\n"); 106 - exit(1); 107 - } 108 - 109 - actualKernelLen = kernelLen - ElfHeaderSize; 110 - 111 - printf("actual kernel length (minus ELF header) = %d\n", actualKernelLen); 112 - 113 - round = actualKernelLen % 4096; 114 - roundedKernelLen = actualKernelLen; 115 - if ( round ) 116 - roundedKernelLen += (4096 - round); 117 - 118 - printf("actual kernel length rounded up to a 4k multiple = %d\n", roundedKernelLen); 119 - 120 - ramStartOffs = roundedKernelLen; 121 - ramPages = ramLen / 4096; 122 - 123 - printf("RAM disk pages to copy = %d\n", ramPages); 124 - 125 - // Copy 64K ELF header 126 - for (i=0; i<(ElfPages); ++i) { 127 - get4k( inputVmlinux, inbuf ); 128 - put4k( outputVmlinux, inbuf ); 129 - } 130 - 131 - roundedKernelPages = roundedKernelLen / 4096; 132 - 133 - fseek(inputVmlinux, ElfHeaderSize, SEEK_SET); 134 - 135 - for ( i=0; i<roundedKernelPages; ++i ) { 136 - get4k( inputVmlinux, inbuf ); 137 - put4k( outputVmlinux, inbuf ); 138 - } 139 - 140 - for ( i=0; i<ramPages; ++i ) { 141 - get4k( ramDisk, inbuf ); 142 - put4k( outputVmlinux, inbuf ); 143 - } 144 - 145 - /* Close the input files */ 146 - fclose(ramDisk); 147 - fclose(inputVmlinux); 148 - /* And flush the written output file */ 149 - fflush(outputVmlinux); 150 - 151 - /* fseek to the hvReleaseData pointer */ 152 - fseek(outputVmlinux, ElfHeaderSize + 0x24, SEEK_SET); 153 - if (fread(&hvReleaseData, 4, 1, outputVmlinux) != 1) { 154 - death("Could not read hvReleaseData pointer\n", outputVmlinux, argv[3]); 155 - } 156 - hvReleaseData = ntohl(hvReleaseData); /* Convert to native int */ 157 - printf("hvReleaseData is at %08x\n", hvReleaseData); 158 - 159 - /* fseek to the hvReleaseData */ 160 - fseek(outputVmlinux, ElfHeaderSize + hvReleaseData, SEEK_SET); 161 - if (fread(inbuf, 0x40, 1, outputVmlinux) != 1) { 162 - death("Could not read hvReleaseData\n", outputVmlinux, argv[3]); 163 - } 164 - /* Check hvReleaseData sanity */ 165 - if (memcmp(inbuf, &eyeCatcher, 4) != 0) { 166 - death("hvReleaseData is invalid\n", outputVmlinux, argv[3]); 167 - } 168 - /* Get the naca pointer */ 169 - naca = ntohl(*((u_int32_t *) &inbuf[0x0c])) - KERNELBASE; 170 - printf("naca is at %08x\n", naca); 171 - 172 - /* fseek to the naca */ 173 - fseek(outputVmlinux, ElfHeaderSize + naca, SEEK_SET); 174 - if (fread(inbuf, 0x18, 1, outputVmlinux) != 1) { 175 - death("Could not read naca\n", outputVmlinux, argv[3]); 176 - } 177 - xRamDisk = ntohl(*((u_int32_t *) &inbuf[0x0c])); 178 - xRamDiskSize = ntohl(*((u_int32_t *) &inbuf[0x14])); 179 - /* Make sure a RAM disk isn't already present */ 180 - if ((xRamDisk != 0) || (xRamDiskSize != 0)) { 181 - death("RAM disk is already attached to this kernel\n", outputVmlinux, argv[3]); 182 - } 183 - /* Fill in the values */ 184 - *((u_int32_t *) &inbuf[0x0c]) = htonl(ramStartOffs); 185 - *((u_int32_t *) &inbuf[0x14]) = htonl(ramPages); 186 - 187 - /* Write out the new naca */ 188 - fflush(outputVmlinux); 189 - fseek(outputVmlinux, ElfHeaderSize + naca, SEEK_SET); 190 - if (fwrite(inbuf, 0x18, 1, outputVmlinux) != 1) { 191 - death("Could not write naca\n", outputVmlinux, argv[3]); 192 - } 193 - printf("RAM Disk of 0x%x pages size is attached to the kernel at offset 0x%08x\n", 194 - ramPages, ramStartOffs); 195 - 196 - /* Done */ 197 - fclose(outputVmlinux); 198 - /* Set permission to executable */ 199 - chmod(argv[3], S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH); 200 - 201 - return 0; 202 - } 203 -
+34 -40
arch/ppc64/Kconfig
··· 302 302 bool 303 303 default y 304 304 305 - config MSCHUNKS 306 - bool 307 - depends on PPC_ISERIES 308 - default y 309 - 310 - 311 305 config PPC_RTAS 312 306 bool 313 307 depends on PPC_PSERIES || PPC_BPA ··· 344 350 345 351 If unsure, say Y. Only embedded should say N here. 346 352 353 + source "fs/Kconfig.binfmt" 354 + 355 + config HOTPLUG_CPU 356 + bool "Support for hot-pluggable CPUs" 357 + depends on SMP && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC) 358 + select HOTPLUG 359 + ---help--- 360 + Say Y here to be able to turn CPUs off and on. 361 + 362 + Say N if you are unsure. 363 + 364 + config PROC_DEVICETREE 365 + bool "Support for Open Firmware device tree in /proc" 366 + depends on !PPC_ISERIES 367 + help 368 + This option adds a device-tree directory under /proc which contains 369 + an image of the device tree that the kernel copies from Open 370 + Firmware. If unsure, say Y here. 371 + 372 + config CMDLINE_BOOL 373 + bool "Default bootloader kernel arguments" 374 + depends on !PPC_ISERIES 375 + 376 + config CMDLINE 377 + string "Initial kernel command string" 378 + depends on CMDLINE_BOOL 379 + default "console=ttyS0,9600 console=tty0 root=/dev/sda2" 380 + help 381 + On some platforms, there is currently no way for the boot loader to 382 + pass arguments to the kernel. For these platforms, you can supply 383 + some command-line options at build time by entering them here. In 384 + most cases you will need to specify the root device here. 385 + 347 386 endmenu 348 387 349 388 config ISA_DMA_API 350 389 bool 351 390 default y 352 391 353 - menu "General setup" 392 + menu "Bus Options" 354 393 355 394 config ISA 356 395 bool ··· 416 389 bool 417 390 default PCI 418 391 419 - source "fs/Kconfig.binfmt" 420 - 421 392 source "drivers/pci/Kconfig" 422 - 423 - config HOTPLUG_CPU 424 - bool "Support for hot-pluggable CPUs" 425 - depends on SMP && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC) 426 - select HOTPLUG 427 - ---help--- 428 - Say Y here to be able to turn CPUs off and on. 429 - 430 - Say N if you are unsure. 431 393 432 394 source "drivers/pcmcia/Kconfig" 433 395 434 396 source "drivers/pci/hotplug/Kconfig" 435 - 436 - config PROC_DEVICETREE 437 - bool "Support for Open Firmware device tree in /proc" 438 - depends on !PPC_ISERIES 439 - help 440 - This option adds a device-tree directory under /proc which contains 441 - an image of the device tree that the kernel copies from Open 442 - Firmware. If unsure, say Y here. 443 - 444 - config CMDLINE_BOOL 445 - bool "Default bootloader kernel arguments" 446 - depends on !PPC_ISERIES 447 - 448 - config CMDLINE 449 - string "Initial kernel command string" 450 - depends on CMDLINE_BOOL 451 - default "console=ttyS0,9600 console=tty0 root=/dev/sda2" 452 - help 453 - On some platforms, there is currently no way for the boot loader to 454 - pass arguments to the kernel. For these platforms, you can supply 455 - some command-line options at build time by entering them here. In 456 - most cases you will need to specify the root device here. 457 397 458 398 endmenu 459 399
+2 -2
arch/ppc64/boot/Makefile
··· 22 22 23 23 24 24 HOSTCC := gcc 25 - BOOTCFLAGS := $(HOSTCFLAGS) $(LINUXINCLUDE) -fno-builtin 26 - BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional 25 + BOOTCFLAGS := $(HOSTCFLAGS) -fno-builtin -nostdinc -isystem $(shell $(CROSS32CC) -print-file-name=include) 26 + BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc 27 27 BOOTLFLAGS := -Ttext 0x00400000 -e _start -T $(srctree)/$(src)/zImage.lds 28 28 OBJCOPYFLAGS := contents,alloc,load,readonly,data 29 29
+2 -2
arch/ppc64/boot/addnote.c
··· 157 157 PUT_32BE(ns, strlen(arch) + 1); 158 158 PUT_32BE(ns + 4, N_DESCR * 4); 159 159 PUT_32BE(ns + 8, 0x1275); 160 - strcpy(&buf[ns + 12], arch); 160 + strcpy((char *) &buf[ns + 12], arch); 161 161 ns += 12 + strlen(arch) + 1; 162 162 for (i = 0; i < N_DESCR; ++i, ns += 4) 163 163 PUT_32BE(ns, descr[i]); ··· 172 172 PUT_32BE(ns, strlen(rpaname) + 1); 173 173 PUT_32BE(ns + 4, sizeof(rpanote)); 174 174 PUT_32BE(ns + 8, 0x12759999); 175 - strcpy(&buf[ns + 12], rpaname); 175 + strcpy((char *) &buf[ns + 12], rpaname); 176 176 ns += 12 + ROUNDUP(strlen(rpaname) + 1); 177 177 for (i = 0; i < N_RPA_DESCR; ++i, ns += 4) 178 178 PUT_32BE(ns, rpanote[i]);
+1 -1
arch/ppc64/boot/crt0.S
··· 9 9 * NOTE: this code runs in 32 bit mode and is packaged as ELF32. 10 10 */ 11 11 12 - #include <asm/ppc_asm.h> 12 + #include "ppc_asm.h" 13 13 14 14 .text 15 15 .globl _start
+1 -1
arch/ppc64/boot/div64.S
··· 13 13 * as published by the Free Software Foundation; either version 14 14 * 2 of the License, or (at your option) any later version. 15 15 */ 16 - #include <asm/ppc_asm.h> 16 + #include "ppc_asm.h" 17 17 18 18 .globl __div64_32 19 19 __div64_32:
+149
arch/ppc64/boot/elf.h
··· 1 + #ifndef _PPC_BOOT_ELF_H_ 2 + #define _PPC_BOOT_ELF_H_ 3 + 4 + /* 32-bit ELF base types. */ 5 + typedef unsigned int Elf32_Addr; 6 + typedef unsigned short Elf32_Half; 7 + typedef unsigned int Elf32_Off; 8 + typedef signed int Elf32_Sword; 9 + typedef unsigned int Elf32_Word; 10 + 11 + /* 64-bit ELF base types. */ 12 + typedef unsigned long long Elf64_Addr; 13 + typedef unsigned short Elf64_Half; 14 + typedef signed short Elf64_SHalf; 15 + typedef unsigned long long Elf64_Off; 16 + typedef signed int Elf64_Sword; 17 + typedef unsigned int Elf64_Word; 18 + typedef unsigned long long Elf64_Xword; 19 + typedef signed long long Elf64_Sxword; 20 + 21 + /* These constants are for the segment types stored in the image headers */ 22 + #define PT_NULL 0 23 + #define PT_LOAD 1 24 + #define PT_DYNAMIC 2 25 + #define PT_INTERP 3 26 + #define PT_NOTE 4 27 + #define PT_SHLIB 5 28 + #define PT_PHDR 6 29 + #define PT_TLS 7 /* Thread local storage segment */ 30 + #define PT_LOOS 0x60000000 /* OS-specific */ 31 + #define PT_HIOS 0x6fffffff /* OS-specific */ 32 + #define PT_LOPROC 0x70000000 33 + #define PT_HIPROC 0x7fffffff 34 + #define PT_GNU_EH_FRAME 0x6474e550 35 + 36 + #define PT_GNU_STACK (PT_LOOS + 0x474e551) 37 + 38 + /* These constants define the different elf file types */ 39 + #define ET_NONE 0 40 + #define ET_REL 1 41 + #define ET_EXEC 2 42 + #define ET_DYN 3 43 + #define ET_CORE 4 44 + #define ET_LOPROC 0xff00 45 + #define ET_HIPROC 0xffff 46 + 47 + /* These constants define the various ELF target machines */ 48 + #define EM_NONE 0 49 + #define EM_PPC 20 /* PowerPC */ 50 + #define EM_PPC64 21 /* PowerPC64 */ 51 + 52 + #define EI_NIDENT 16 53 + 54 + typedef struct elf32_hdr { 55 + unsigned char e_ident[EI_NIDENT]; 56 + Elf32_Half e_type; 57 + Elf32_Half e_machine; 58 + Elf32_Word e_version; 59 + Elf32_Addr e_entry; /* Entry point */ 60 + Elf32_Off e_phoff; 61 + Elf32_Off e_shoff; 62 + Elf32_Word e_flags; 63 + Elf32_Half e_ehsize; 64 + Elf32_Half e_phentsize; 65 + Elf32_Half e_phnum; 66 + Elf32_Half e_shentsize; 67 + Elf32_Half e_shnum; 68 + Elf32_Half e_shstrndx; 69 + } Elf32_Ehdr; 70 + 71 + typedef struct elf64_hdr { 72 + unsigned char e_ident[16]; /* ELF "magic number" */ 73 + Elf64_Half e_type; 74 + Elf64_Half e_machine; 75 + Elf64_Word e_version; 76 + Elf64_Addr e_entry; /* Entry point virtual address */ 77 + Elf64_Off e_phoff; /* Program header table file offset */ 78 + Elf64_Off e_shoff; /* Section header table file offset */ 79 + Elf64_Word e_flags; 80 + Elf64_Half e_ehsize; 81 + Elf64_Half e_phentsize; 82 + Elf64_Half e_phnum; 83 + Elf64_Half e_shentsize; 84 + Elf64_Half e_shnum; 85 + Elf64_Half e_shstrndx; 86 + } Elf64_Ehdr; 87 + 88 + /* These constants define the permissions on sections in the program 89 + header, p_flags. */ 90 + #define PF_R 0x4 91 + #define PF_W 0x2 92 + #define PF_X 0x1 93 + 94 + typedef struct elf32_phdr { 95 + Elf32_Word p_type; 96 + Elf32_Off p_offset; 97 + Elf32_Addr p_vaddr; 98 + Elf32_Addr p_paddr; 99 + Elf32_Word p_filesz; 100 + Elf32_Word p_memsz; 101 + Elf32_Word p_flags; 102 + Elf32_Word p_align; 103 + } Elf32_Phdr; 104 + 105 + typedef struct elf64_phdr { 106 + Elf64_Word p_type; 107 + Elf64_Word p_flags; 108 + Elf64_Off p_offset; /* Segment file offset */ 109 + Elf64_Addr p_vaddr; /* Segment virtual address */ 110 + Elf64_Addr p_paddr; /* Segment physical address */ 111 + Elf64_Xword p_filesz; /* Segment size in file */ 112 + Elf64_Xword p_memsz; /* Segment size in memory */ 113 + Elf64_Xword p_align; /* Segment alignment, file & memory */ 114 + } Elf64_Phdr; 115 + 116 + #define EI_MAG0 0 /* e_ident[] indexes */ 117 + #define EI_MAG1 1 118 + #define EI_MAG2 2 119 + #define EI_MAG3 3 120 + #define EI_CLASS 4 121 + #define EI_DATA 5 122 + #define EI_VERSION 6 123 + #define EI_OSABI 7 124 + #define EI_PAD 8 125 + 126 + #define ELFMAG0 0x7f /* EI_MAG */ 127 + #define ELFMAG1 'E' 128 + #define ELFMAG2 'L' 129 + #define ELFMAG3 'F' 130 + #define ELFMAG "\177ELF" 131 + #define SELFMAG 4 132 + 133 + #define ELFCLASSNONE 0 /* EI_CLASS */ 134 + #define ELFCLASS32 1 135 + #define ELFCLASS64 2 136 + #define ELFCLASSNUM 3 137 + 138 + #define ELFDATANONE 0 /* e_ident[EI_DATA] */ 139 + #define ELFDATA2LSB 1 140 + #define ELFDATA2MSB 2 141 + 142 + #define EV_NONE 0 /* e_version, EI_VERSION */ 143 + #define EV_CURRENT 1 144 + #define EV_NUM 2 145 + 146 + #define ELFOSABI_NONE 0 147 + #define ELFOSABI_LINUX 3 148 + 149 + #endif /* _PPC_BOOT_ELF_H_ */
+18 -33
arch/ppc64/boot/main.c
··· 8 8 * as published by the Free Software Foundation; either version 9 9 * 2 of the License, or (at your option) any later version. 10 10 */ 11 - #include "ppc32-types.h" 11 + #include <stdarg.h> 12 + #include <stddef.h> 13 + #include "elf.h" 14 + #include "page.h" 15 + #include "string.h" 16 + #include "stdio.h" 17 + #include "prom.h" 12 18 #include "zlib.h" 13 - #include <linux/elf.h> 14 - #include <linux/string.h> 15 - #include <asm/processor.h> 16 - #include <asm/page.h> 17 19 18 - extern void *finddevice(const char *); 19 - extern int getprop(void *, const char *, void *, int); 20 - extern void printf(const char *fmt, ...); 21 - extern int sprintf(char *buf, const char *fmt, ...); 22 - void gunzip(void *, int, unsigned char *, int *); 23 - void *claim(unsigned int, unsigned int, unsigned int); 24 - void flush_cache(void *, unsigned long); 25 - void pause(void); 26 - extern void exit(void); 20 + static void gunzip(void *, int, unsigned char *, int *); 21 + extern void flush_cache(void *, unsigned long); 27 22 28 - unsigned long strlen(const char *s); 29 - void *memmove(void *dest, const void *src, unsigned long n); 30 - void *memcpy(void *dest, const void *src, unsigned long n); 31 23 32 24 /* Value picked to match that used by yaboot */ 33 25 #define PROG_START 0x01400000 34 26 #define RAM_END (256<<20) // Fixme: use OF */ 35 27 36 - char *avail_ram; 37 - char *begin_avail, *end_avail; 38 - char *avail_high; 39 - unsigned int heap_use; 40 - unsigned int heap_max; 28 + static char *avail_ram; 29 + static char *begin_avail, *end_avail; 30 + static char *avail_high; 31 + static unsigned int heap_use; 32 + static unsigned int heap_max; 41 33 42 34 extern char _start[]; 43 35 extern char _vmlinux_start[]; ··· 44 52 unsigned long size; 45 53 unsigned long memsize; 46 54 }; 47 - struct addr_range vmlinux = {0, 0, 0}; 48 - struct addr_range vmlinuz = {0, 0, 0}; 49 - struct addr_range initrd = {0, 0, 0}; 55 + static struct addr_range vmlinux = {0, 0, 0}; 56 + static struct addr_range vmlinuz = {0, 0, 0}; 57 + static struct addr_range initrd = {0, 0, 0}; 50 58 51 59 static char scratch[128<<10]; /* 128kB of scratch space for gunzip */ 52 60 ··· 55 63 void *, 56 64 void *); 57 65 58 - 59 - int (*prom)(void *); 60 - 61 - void *chosen_handle; 62 - void *stdin; 63 - void *stdout; 64 - void *stderr; 65 66 66 67 #undef DEBUG 67 68 ··· 262 277 263 278 #define DEFLATED 8 264 279 265 - void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp) 280 + static void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp) 266 281 { 267 282 z_stream s; 268 283 int r, i, flags;
+34
arch/ppc64/boot/page.h
··· 1 + #ifndef _PPC_BOOT_PAGE_H 2 + #define _PPC_BOOT_PAGE_H 3 + /* 4 + * Copyright (C) 2001 PPC64 Team, IBM Corp 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License 8 + * as published by the Free Software Foundation; either version 9 + * 2 of the License, or (at your option) any later version. 10 + */ 11 + 12 + #ifdef __ASSEMBLY__ 13 + #define ASM_CONST(x) x 14 + #else 15 + #define __ASM_CONST(x) x##UL 16 + #define ASM_CONST(x) __ASM_CONST(x) 17 + #endif 18 + 19 + /* PAGE_SHIFT determines the page size */ 20 + #define PAGE_SHIFT 12 21 + #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT) 22 + #define PAGE_MASK (~(PAGE_SIZE-1)) 23 + 24 + /* align addr on a size boundary - adjust address up/down if needed */ 25 + #define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1))) 26 + #define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1))) 27 + 28 + /* align addr on a size boundary - adjust address up if needed */ 29 + #define _ALIGN(addr,size) _ALIGN_UP(addr,size) 30 + 31 + /* to align the pointer to the (next) page boundary */ 32 + #define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE) 33 + 34 + #endif /* _PPC_BOOT_PAGE_H */
-36
arch/ppc64/boot/ppc32-types.h
··· 1 - #ifndef _PPC64_TYPES_H 2 - #define _PPC64_TYPES_H 3 - 4 - typedef __signed__ char __s8; 5 - typedef unsigned char __u8; 6 - 7 - typedef __signed__ short __s16; 8 - typedef unsigned short __u16; 9 - 10 - typedef __signed__ int __s32; 11 - typedef unsigned int __u32; 12 - 13 - typedef __signed__ long long __s64; 14 - typedef unsigned long long __u64; 15 - 16 - typedef signed char s8; 17 - typedef unsigned char u8; 18 - 19 - typedef signed short s16; 20 - typedef unsigned short u16; 21 - 22 - typedef signed int s32; 23 - typedef unsigned int u32; 24 - 25 - typedef signed long long s64; 26 - typedef unsigned long long u64; 27 - 28 - typedef struct { 29 - __u32 u[4]; 30 - } __attribute((aligned(16))) __vector128; 31 - 32 - #define BITS_PER_LONG 32 33 - 34 - typedef __vector128 vector128; 35 - 36 - #endif /* _PPC64_TYPES_H */
+62
arch/ppc64/boot/ppc_asm.h
··· 1 + #ifndef _PPC64_PPC_ASM_H 2 + #define _PPC64_PPC_ASM_H 3 + /* 4 + * 5 + * Definitions used by various bits of low-level assembly code on PowerPC. 6 + * 7 + * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan. 8 + * 9 + * This program is free software; you can redistribute it and/or 10 + * modify it under the terms of the GNU General Public License 11 + * as published by the Free Software Foundation; either version 12 + * 2 of the License, or (at your option) any later version. 13 + */ 14 + 15 + /* Condition Register Bit Fields */ 16 + 17 + #define cr0 0 18 + #define cr1 1 19 + #define cr2 2 20 + #define cr3 3 21 + #define cr4 4 22 + #define cr5 5 23 + #define cr6 6 24 + #define cr7 7 25 + 26 + 27 + /* General Purpose Registers (GPRs) */ 28 + 29 + #define r0 0 30 + #define r1 1 31 + #define r2 2 32 + #define r3 3 33 + #define r4 4 34 + #define r5 5 35 + #define r6 6 36 + #define r7 7 37 + #define r8 8 38 + #define r9 9 39 + #define r10 10 40 + #define r11 11 41 + #define r12 12 42 + #define r13 13 43 + #define r14 14 44 + #define r15 15 45 + #define r16 16 46 + #define r17 17 47 + #define r18 18 48 + #define r19 19 49 + #define r20 20 50 + #define r21 21 51 + #define r22 22 52 + #define r23 23 53 + #define r24 24 54 + #define r25 25 55 + #define r26 26 56 + #define r27 27 57 + #define r28 28 58 + #define r29 29 59 + #define r30 30 60 + #define r31 31 61 + 62 + #endif /* _PPC64_PPC_ASM_H */
+27 -169
arch/ppc64/boot/prom.c
··· 7 7 * 2 of the License, or (at your option) any later version. 8 8 */ 9 9 #include <stdarg.h> 10 - #include <linux/types.h> 11 - #include <linux/string.h> 12 - #include <linux/ctype.h> 13 - 14 - extern __u32 __div64_32(unsigned long long *dividend, __u32 divisor); 15 - 16 - /* The unnecessary pointer compare is there 17 - * to check for type safety (n must be 64bit) 18 - */ 19 - # define do_div(n,base) ({ \ 20 - __u32 __base = (base); \ 21 - __u32 __rem; \ 22 - (void)(((typeof((n)) *)0) == ((unsigned long long *)0)); \ 23 - if (((n) >> 32) == 0) { \ 24 - __rem = (__u32)(n) % __base; \ 25 - (n) = (__u32)(n) / __base; \ 26 - } else \ 27 - __rem = __div64_32(&(n), __base); \ 28 - __rem; \ 29 - }) 10 + #include <stddef.h> 11 + #include "string.h" 12 + #include "stdio.h" 13 + #include "prom.h" 30 14 31 15 int (*prom)(void *); 32 16 33 17 void *chosen_handle; 18 + 34 19 void *stdin; 35 20 void *stdout; 36 21 void *stderr; 37 22 38 - void exit(void); 39 - void *finddevice(const char *name); 40 - int getprop(void *phandle, const char *name, void *buf, int buflen); 41 - void chrpboot(int a1, int a2, void *prom); /* in main.c */ 42 - 43 - int printf(char *fmt, ...); 44 - 45 - /* there is no convenient header to get this from... -- paulus */ 46 - extern unsigned long strlen(const char *); 47 23 48 24 int 49 25 write(void *handle, void *ptr, int nb) ··· 186 210 return write(f, str, n) == n? 0: -1; 187 211 } 188 212 189 - int 190 - readchar(void) 191 - { 192 - char ch; 193 - 194 - for (;;) { 195 - switch (read(stdin, &ch, 1)) { 196 - case 1: 197 - return ch; 198 - case -1: 199 - printf("read(stdin) returned -1\r\n"); 200 - return -1; 201 - } 202 - } 203 - } 204 - 205 - static char line[256]; 206 - static char *lineptr; 207 - static int lineleft; 208 - 209 - int 210 - getchar(void) 211 - { 212 - int c; 213 - 214 - if (lineleft == 0) { 215 - lineptr = line; 216 - for (;;) { 217 - c = readchar(); 218 - if (c == -1 || c == 4) 219 - break; 220 - if (c == '\r' || c == '\n') { 221 - *lineptr++ = '\n'; 222 - putchar('\n'); 223 - break; 224 - } 225 - switch (c) { 226 - case 0177: 227 - case '\b': 228 - if (lineptr > line) { 229 - putchar('\b'); 230 - putchar(' '); 231 - putchar('\b'); 232 - --lineptr; 233 - } 234 - break; 235 - case 'U' & 0x1F: 236 - while (lineptr > line) { 237 - putchar('\b'); 238 - putchar(' '); 239 - putchar('\b'); 240 - --lineptr; 241 - } 242 - break; 243 - default: 244 - if (lineptr >= &line[sizeof(line) - 1]) 245 - putchar('\a'); 246 - else { 247 - putchar(c); 248 - *lineptr++ = c; 249 - } 250 - } 251 - } 252 - lineleft = lineptr - line; 253 - lineptr = line; 254 - } 255 - if (lineleft == 0) 256 - return -1; 257 - --lineleft; 258 - return *lineptr++; 259 - } 260 - 261 - 262 - 263 - /* String functions lifted from lib/vsprintf.c and lib/ctype.c */ 264 - unsigned char _ctype[] = { 265 - _C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */ 266 - _C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */ 267 - _C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */ 268 - _C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */ 269 - _S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */ 270 - _P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */ 271 - _D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */ 272 - _D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */ 273 - _P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */ 274 - _U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */ 275 - _U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */ 276 - _U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */ 277 - _P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */ 278 - _L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */ 279 - _L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */ 280 - _L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */ 281 - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */ 282 - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */ 283 - _S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */ 284 - _P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */ 285 - _U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */ 286 - _U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */ 287 - _L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */ 288 - _L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */ 289 - 290 213 size_t strnlen(const char * s, size_t count) 291 214 { 292 215 const char *sc; ··· 195 320 return sc - s; 196 321 } 197 322 198 - unsigned long simple_strtoul(const char *cp,char **endp,unsigned int base) 199 - { 200 - unsigned long result = 0,value; 323 + extern unsigned int __div64_32(unsigned long long *dividend, 324 + unsigned int divisor); 201 325 202 - if (!base) { 203 - base = 10; 204 - if (*cp == '0') { 205 - base = 8; 206 - cp++; 207 - if ((*cp == 'x') && isxdigit(cp[1])) { 208 - cp++; 209 - base = 16; 210 - } 211 - } 212 - } 213 - while (isxdigit(*cp) && 214 - (value = isdigit(*cp) ? *cp-'0' : toupper(*cp)-'A'+10) < base) { 215 - result = result*base + value; 216 - cp++; 217 - } 218 - if (endp) 219 - *endp = (char *)cp; 220 - return result; 221 - } 222 - 223 - long simple_strtol(const char *cp,char **endp,unsigned int base) 224 - { 225 - if(*cp=='-') 226 - return -simple_strtoul(cp+1,endp,base); 227 - return simple_strtoul(cp,endp,base); 228 - } 326 + /* The unnecessary pointer compare is there 327 + * to check for type safety (n must be 64bit) 328 + */ 329 + # define do_div(n,base) ({ \ 330 + unsigned int __base = (base); \ 331 + unsigned int __rem; \ 332 + (void)(((typeof((n)) *)0) == ((unsigned long long *)0)); \ 333 + if (((n) >> 32) == 0) { \ 334 + __rem = (unsigned int)(n) % __base; \ 335 + (n) = (unsigned int)(n) / __base; \ 336 + } else \ 337 + __rem = __div64_32(&(n), __base); \ 338 + __rem; \ 339 + }) 229 340 230 341 static int skip_atoi(const char **s) 231 342 { 232 - int i=0; 343 + int i, c; 233 344 234 - while (isdigit(**s)) 235 - i = i*10 + *((*s)++) - '0'; 345 + for (i = 0; '0' <= (c = **s) && c <= '9'; ++*s) 346 + i = i*10 + c - '0'; 236 347 return i; 237 348 } 238 349 ··· 297 436 return str; 298 437 } 299 438 300 - /* Forward decl. needed for IP address printing stuff... */ 301 - int sprintf(char * buf, const char *fmt, ...); 302 - 303 439 int vsprintf(char *buf, const char *fmt, va_list args) 304 440 { 305 441 int len; ··· 335 477 336 478 /* get field width */ 337 479 field_width = -1; 338 - if (isdigit(*fmt)) 480 + if ('0' <= *fmt && *fmt <= '9') 339 481 field_width = skip_atoi(&fmt); 340 482 else if (*fmt == '*') { 341 483 ++fmt; ··· 351 493 precision = -1; 352 494 if (*fmt == '.') { 353 495 ++fmt; 354 - if (isdigit(*fmt)) 496 + if ('0' <= *fmt && *fmt <= '9') 355 497 precision = skip_atoi(&fmt); 356 498 else if (*fmt == '*') { 357 499 ++fmt; ··· 486 628 static char sprint_buf[1024]; 487 629 488 630 int 489 - printf(char *fmt, ...) 631 + printf(const char *fmt, ...) 490 632 { 491 633 va_list args; 492 634 int n;
+18
arch/ppc64/boot/prom.h
··· 1 + #ifndef _PPC_BOOT_PROM_H_ 2 + #define _PPC_BOOT_PROM_H_ 3 + 4 + extern int (*prom) (void *); 5 + extern void *chosen_handle; 6 + 7 + extern void *stdin; 8 + extern void *stdout; 9 + extern void *stderr; 10 + 11 + extern int write(void *handle, void *ptr, int nb); 12 + extern int read(void *handle, void *ptr, int nb); 13 + extern void exit(void); 14 + extern void pause(void); 15 + extern void *finddevice(const char *); 16 + extern void *claim(unsigned long virt, unsigned long size, unsigned long align); 17 + extern int getprop(void *phandle, const char *name, void *buf, int buflen); 18 + #endif /* _PPC_BOOT_PROM_H_ */
+16
arch/ppc64/boot/stdio.h
··· 1 + #ifndef _PPC_BOOT_STDIO_H_ 2 + #define _PPC_BOOT_STDIO_H_ 3 + 4 + extern int printf(const char *fmt, ...); 5 + 6 + extern int sprintf(char *buf, const char *fmt, ...); 7 + 8 + extern int vsprintf(char *buf, const char *fmt, va_list args); 9 + 10 + extern int putc(int c, void *f); 11 + extern int putchar(int c); 12 + extern int getchar(void); 13 + 14 + extern int fputs(char *str, void *f); 15 + 16 + #endif /* _PPC_BOOT_STDIO_H_ */
+1 -1
arch/ppc64/boot/string.S
··· 9 9 * NOTE: this code runs in 32 bit mode and is packaged as ELF32. 10 10 */ 11 11 12 - #include <asm/ppc_asm.h> 12 + #include "ppc_asm.h" 13 13 14 14 .text 15 15 .globl strcpy
+16
arch/ppc64/boot/string.h
··· 1 + #ifndef _PPC_BOOT_STRING_H_ 2 + #define _PPC_BOOT_STRING_H_ 3 + 4 + extern char *strcpy(char *dest, const char *src); 5 + extern char *strncpy(char *dest, const char *src, size_t n); 6 + extern char *strcat(char *dest, const char *src); 7 + extern int strcmp(const char *s1, const char *s2); 8 + extern size_t strlen(const char *s); 9 + extern size_t strnlen(const char *s, size_t count); 10 + 11 + extern void *memset(void *s, int c, size_t n); 12 + extern void *memmove(void *dest, const void *src, unsigned long n); 13 + extern void *memcpy(void *dest, const void *src, unsigned long n); 14 + extern int memcmp(const void *s1, const void *s2, size_t n); 15 + 16 + #endif /* _PPC_BOOT_STRING_H_ */
+1 -1
arch/ppc64/boot/zlib.c
··· 107 107 108 108 /* Diagnostic functions */ 109 109 #ifdef DEBUG_ZLIB 110 - # include <stdio.h> 110 + # include "stdio.h" 111 111 # ifndef verbose 112 112 # define verbose 0 113 113 # endif
-1
arch/ppc64/configs/iSeries_defconfig
··· 99 99 # CONFIG_HZ_1000 is not set 100 100 CONFIG_HZ=100 101 101 CONFIG_GENERIC_HARDIRQS=y 102 - CONFIG_MSCHUNKS=y 103 102 CONFIG_LPARCFG=y 104 103 CONFIG_SECCOMP=y 105 104 CONFIG_ISA_DMA_API=y
+11 -26
arch/ppc64/kernel/LparData.c
··· 51 51 0xf4, 0x4b, 0xf6, 0xf4 }, 52 52 }; 53 53 54 + /* 55 + * The NACA. The first dword of the naca is required by the iSeries 56 + * hypervisor to point to itVpdAreas. The hypervisor finds the NACA 57 + * through the pointer in hvReleaseData. 58 + */ 59 + struct naca_struct naca = { 60 + .xItVpdAreas = &itVpdAreas, 61 + .xRamDisk = 0, 62 + .xRamDiskSize = 0, 63 + }; 64 + 54 65 extern void system_reset_iSeries(void); 55 66 extern void machine_check_iSeries(void); 56 67 extern void data_access_iSeries(void); ··· 225 214 0,0 226 215 } 227 216 }; 228 - 229 - struct msChunks msChunks; 230 - EXPORT_SYMBOL(msChunks); 231 - 232 - /* Depending on whether this is called from iSeries or pSeries setup 233 - * code, the location of the msChunks struct may or may not have 234 - * to be reloc'd, so we force the caller to do that for us by passing 235 - * in a pointer to the structure. 236 - */ 237 - unsigned long 238 - msChunks_alloc(unsigned long mem, unsigned long num_chunks, unsigned long chunk_size) 239 - { 240 - unsigned long offset = reloc_offset(); 241 - struct msChunks *_msChunks = PTRRELOC(&msChunks); 242 - 243 - _msChunks->num_chunks = num_chunks; 244 - _msChunks->chunk_size = chunk_size; 245 - _msChunks->chunk_shift = __ilog2(chunk_size); 246 - _msChunks->chunk_mask = (1UL<<_msChunks->chunk_shift)-1; 247 - 248 - mem = _ALIGN(mem, sizeof(msChunks_entry)); 249 - _msChunks->abs = (msChunks_entry *)(mem + offset); 250 - mem += num_chunks * sizeof(msChunks_entry); 251 - 252 - return mem; 253 - }
+5 -2
arch/ppc64/kernel/Makefile
··· 11 11 udbg.o binfmt_elf32.o sys_ppc32.o ioctl32.o \ 12 12 ptrace32.o signal32.o rtc.o init_task.o \ 13 13 lmb.o cputable.o cpu_setup_power4.o idle_power4.o \ 14 - iommu.o sysfs.o vdso.o pmc.o 14 + iommu.o sysfs.o vdso.o pmc.o firmware.o 15 15 obj-y += vdso32/ vdso64/ 16 16 17 17 obj-$(CONFIG_PPC_OF) += of_device.o ··· 50 50 obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o 51 51 obj-$(CONFIG_BOOTX_TEXT) += btext.o 52 52 obj-$(CONFIG_HVCS) += hvcserver.o 53 - obj-$(CONFIG_IBMVIO) += vio.o 53 + 54 + vio-obj-$(CONFIG_PPC_PSERIES) += pSeries_vio.o 55 + vio-obj-$(CONFIG_PPC_ISERIES) += iSeries_vio.o 56 + obj-$(CONFIG_IBMVIO) += vio.o $(vio-obj-y) 54 57 obj-$(CONFIG_XICS) += xics.o 55 58 obj-$(CONFIG_MPIC) += mpic.o 56 59
+2 -1
arch/ppc64/kernel/asm-offsets.c
··· 94 94 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); 95 95 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); 96 96 #ifdef CONFIG_HUGETLB_PAGE 97 - DEFINE(PACAHTLBSEGS, offsetof(struct paca_struct, context.htlb_segs)); 97 + DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas)); 98 + DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas)); 98 99 #endif /* CONFIG_HUGETLB_PAGE */ 99 100 DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr)); 100 101 DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
+1 -39
arch/ppc64/kernel/cputable.c
··· 5 5 * 6 6 * Modifications for ppc64: 7 7 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com> 8 - * 8 + * 9 9 * This program is free software; you can redistribute it and/or 10 10 * modify it under the terms of the GNU General Public License 11 11 * as published by the Free Software Foundation; either version ··· 60 60 .icache_bsize = 128, 61 61 .dcache_bsize = 128, 62 62 .cpu_setup = __setup_cpu_power3, 63 - .firmware_features = COMMON_PPC64_FW, 64 63 }, 65 64 { /* Power3+ */ 66 65 .pvr_mask = 0xffff0000, ··· 72 73 .icache_bsize = 128, 73 74 .dcache_bsize = 128, 74 75 .cpu_setup = __setup_cpu_power3, 75 - .firmware_features = COMMON_PPC64_FW, 76 76 }, 77 77 { /* Northstar */ 78 78 .pvr_mask = 0xffff0000, ··· 84 86 .icache_bsize = 128, 85 87 .dcache_bsize = 128, 86 88 .cpu_setup = __setup_cpu_power3, 87 - .firmware_features = COMMON_PPC64_FW, 88 89 }, 89 90 { /* Pulsar */ 90 91 .pvr_mask = 0xffff0000, ··· 96 99 .icache_bsize = 128, 97 100 .dcache_bsize = 128, 98 101 .cpu_setup = __setup_cpu_power3, 99 - .firmware_features = COMMON_PPC64_FW, 100 102 }, 101 103 { /* I-star */ 102 104 .pvr_mask = 0xffff0000, ··· 108 112 .icache_bsize = 128, 109 113 .dcache_bsize = 128, 110 114 .cpu_setup = __setup_cpu_power3, 111 - .firmware_features = COMMON_PPC64_FW, 112 115 }, 113 116 { /* S-star */ 114 117 .pvr_mask = 0xffff0000, ··· 120 125 .icache_bsize = 128, 121 126 .dcache_bsize = 128, 122 127 .cpu_setup = __setup_cpu_power3, 123 - .firmware_features = COMMON_PPC64_FW, 124 128 }, 125 129 { /* Power4 */ 126 130 .pvr_mask = 0xffff0000, ··· 132 138 .icache_bsize = 128, 133 139 .dcache_bsize = 128, 134 140 .cpu_setup = __setup_cpu_power4, 135 - .firmware_features = COMMON_PPC64_FW, 136 141 }, 137 142 { /* Power4+ */ 138 143 .pvr_mask = 0xffff0000, ··· 144 151 .icache_bsize = 128, 145 152 .dcache_bsize = 128, 146 153 .cpu_setup = __setup_cpu_power4, 147 - .firmware_features = COMMON_PPC64_FW, 148 154 }, 149 155 { /* PPC970 */ 150 156 .pvr_mask = 0xffff0000, ··· 158 166 .icache_bsize = 128, 159 167 .dcache_bsize = 128, 160 168 .cpu_setup = __setup_cpu_ppc970, 161 - .firmware_features = COMMON_PPC64_FW, 162 169 }, 163 170 { /* PPC970FX */ 164 171 .pvr_mask = 0xffff0000, ··· 172 181 .icache_bsize = 128, 173 182 .dcache_bsize = 128, 174 183 .cpu_setup = __setup_cpu_ppc970, 175 - .firmware_features = COMMON_PPC64_FW, 176 184 }, 177 185 { /* PPC970MP */ 178 186 .pvr_mask = 0xffff0000, ··· 186 196 .icache_bsize = 128, 187 197 .dcache_bsize = 128, 188 198 .cpu_setup = __setup_cpu_ppc970, 189 - .firmware_features = COMMON_PPC64_FW, 190 199 }, 191 200 { /* Power5 */ 192 201 .pvr_mask = 0xffff0000, ··· 200 211 .icache_bsize = 128, 201 212 .dcache_bsize = 128, 202 213 .cpu_setup = __setup_cpu_power4, 203 - .firmware_features = COMMON_PPC64_FW, 204 214 }, 205 215 { /* Power5 */ 206 216 .pvr_mask = 0xffff0000, ··· 214 226 .icache_bsize = 128, 215 227 .dcache_bsize = 128, 216 228 .cpu_setup = __setup_cpu_power4, 217 - .firmware_features = COMMON_PPC64_FW, 218 229 }, 219 230 { /* BE DD1.x */ 220 231 .pvr_mask = 0xffff0000, ··· 228 241 .icache_bsize = 128, 229 242 .dcache_bsize = 128, 230 243 .cpu_setup = __setup_cpu_be, 231 - .firmware_features = COMMON_PPC64_FW, 232 244 }, 233 245 { /* default match */ 234 246 .pvr_mask = 0x00000000, ··· 240 254 .icache_bsize = 128, 241 255 .dcache_bsize = 128, 242 256 .cpu_setup = __setup_cpu_power4, 243 - .firmware_features = COMMON_PPC64_FW, 244 257 } 245 - }; 246 - 247 - firmware_feature_t firmware_features_table[FIRMWARE_MAX_FEATURES] = { 248 - {FW_FEATURE_PFT, "hcall-pft"}, 249 - {FW_FEATURE_TCE, "hcall-tce"}, 250 - {FW_FEATURE_SPRG0, "hcall-sprg0"}, 251 - {FW_FEATURE_DABR, "hcall-dabr"}, 252 - {FW_FEATURE_COPY, "hcall-copy"}, 253 - {FW_FEATURE_ASR, "hcall-asr"}, 254 - {FW_FEATURE_DEBUG, "hcall-debug"}, 255 - {FW_FEATURE_PERF, "hcall-perf"}, 256 - {FW_FEATURE_DUMP, "hcall-dump"}, 257 - {FW_FEATURE_INTERRUPT, "hcall-interrupt"}, 258 - {FW_FEATURE_MIGRATE, "hcall-migrate"}, 259 - {FW_FEATURE_PERFMON, "hcall-perfmon"}, 260 - {FW_FEATURE_CRQ, "hcall-crq"}, 261 - {FW_FEATURE_VIO, "hcall-vio"}, 262 - {FW_FEATURE_RDMA, "hcall-rdma"}, 263 - {FW_FEATURE_LLAN, "hcall-lLAN"}, 264 - {FW_FEATURE_BULK, "hcall-bulk"}, 265 - {FW_FEATURE_XDABR, "hcall-xdabr"}, 266 - {FW_FEATURE_MULTITCE, "hcall-multi-tce"}, 267 - {FW_FEATURE_SPLPAR, "hcall-splpar"}, 268 258 };
+47
arch/ppc64/kernel/firmware.c
··· 1 + /* 2 + * arch/ppc64/kernel/firmware.c 3 + * 4 + * Extracted from cputable.c 5 + * 6 + * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) 7 + * 8 + * Modifications for ppc64: 9 + * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com> 10 + * Copyright (C) 2005 Stephen Rothwell, IBM Corporation 11 + * 12 + * This program is free software; you can redistribute it and/or 13 + * modify it under the terms of the GNU General Public License 14 + * as published by the Free Software Foundation; either version 15 + * 2 of the License, or (at your option) any later version. 16 + */ 17 + 18 + #include <linux/config.h> 19 + 20 + #include <asm/firmware.h> 21 + 22 + unsigned long ppc64_firmware_features; 23 + 24 + #ifdef CONFIG_PPC_PSERIES 25 + firmware_feature_t firmware_features_table[FIRMWARE_MAX_FEATURES] = { 26 + {FW_FEATURE_PFT, "hcall-pft"}, 27 + {FW_FEATURE_TCE, "hcall-tce"}, 28 + {FW_FEATURE_SPRG0, "hcall-sprg0"}, 29 + {FW_FEATURE_DABR, "hcall-dabr"}, 30 + {FW_FEATURE_COPY, "hcall-copy"}, 31 + {FW_FEATURE_ASR, "hcall-asr"}, 32 + {FW_FEATURE_DEBUG, "hcall-debug"}, 33 + {FW_FEATURE_PERF, "hcall-perf"}, 34 + {FW_FEATURE_DUMP, "hcall-dump"}, 35 + {FW_FEATURE_INTERRUPT, "hcall-interrupt"}, 36 + {FW_FEATURE_MIGRATE, "hcall-migrate"}, 37 + {FW_FEATURE_PERFMON, "hcall-perfmon"}, 38 + {FW_FEATURE_CRQ, "hcall-crq"}, 39 + {FW_FEATURE_VIO, "hcall-vio"}, 40 + {FW_FEATURE_RDMA, "hcall-rdma"}, 41 + {FW_FEATURE_LLAN, "hcall-lLAN"}, 42 + {FW_FEATURE_BULK, "hcall-bulk"}, 43 + {FW_FEATURE_XDABR, "hcall-xdabr"}, 44 + {FW_FEATURE_MULTITCE, "hcall-multi-tce"}, 45 + {FW_FEATURE_SPLPAR, "hcall-splpar"}, 46 + }; 47 + #endif
+194 -319
arch/ppc64/kernel/head.S
··· 23 23 * 2 of the License, or (at your option) any later version. 24 24 */ 25 25 26 - #define SECONDARY_PROCESSORS 27 - 28 26 #include <linux/config.h> 29 27 #include <linux/threads.h> 30 28 #include <asm/processor.h> 31 29 #include <asm/page.h> 32 30 #include <asm/mmu.h> 33 - #include <asm/naca.h> 34 31 #include <asm/systemcfg.h> 35 32 #include <asm/ppc_asm.h> 36 33 #include <asm/offsets.h> ··· 42 45 #endif 43 46 44 47 /* 45 - * hcall interface to pSeries LPAR 46 - */ 47 - #define H_SET_ASR 0x30 48 - 49 - /* 50 48 * We layout physical memory as follows: 51 49 * 0x0000 - 0x00ff : Secondary processor spin code 52 50 * 0x0100 - 0x2fff : pSeries Interrupt prologs 53 - * 0x3000 - 0x3fff : Interrupt support 54 - * 0x4000 - 0x4fff : NACA 55 - * 0x6000 : iSeries and common interrupt prologs 56 - * 0x9000 - 0x9fff : Initial segment table 51 + * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs 52 + * 0x6000 - 0x6fff : Initial (CPU0) segment table 53 + * 0x7000 - 0x7fff : FWNMI data area 54 + * 0x8000 - : Early init and support code 57 55 */ 58 56 59 57 /* ··· 86 94 87 95 /* Catch branch to 0 in real mode */ 88 96 trap 97 + 89 98 #ifdef CONFIG_PPC_ISERIES 90 99 /* 91 100 * At offset 0x20, there is a pointer to iSeries LPAR data. ··· 96 103 .llong hvReleaseData-KERNELBASE 97 104 98 105 /* 99 - * At offset 0x28 and 0x30 are offsets to the msChunks 106 + * At offset 0x28 and 0x30 are offsets to the mschunks_map 100 107 * array (used by the iSeries LPAR debugger to do translation 101 108 * between physical addresses and absolute addresses) and 102 109 * to the pidhash table (also used by the debugger) 103 110 */ 104 - .llong msChunks-KERNELBASE 111 + .llong mschunks_map-KERNELBASE 105 112 .llong 0 /* pidhash-KERNELBASE SFRXXX */ 106 113 107 114 /* Offset 0x38 - Pointer to start of embedded System.map */ ··· 113 120 embedded_sysmap_end: 114 121 .llong 0 115 122 116 - #else /* CONFIG_PPC_ISERIES */ 123 + #endif /* CONFIG_PPC_ISERIES */ 117 124 118 125 /* Secondary processors spin on this value until it goes to 1. */ 119 126 .globl __secondary_hold_spinloop ··· 148 155 std r24,__secondary_hold_acknowledge@l(0) 149 156 sync 150 157 151 - /* All secondary cpu's wait here until told to start. */ 158 + /* All secondary cpus wait here until told to start. */ 152 159 100: ld r4,__secondary_hold_spinloop@l(0) 153 160 cmpdi 0,r4,1 154 161 bne 100b ··· 161 168 b .pSeries_secondary_smp_init 162 169 #else 163 170 BUG_OPCODE 164 - #endif 165 171 #endif 166 172 #endif 167 173 ··· 494 502 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) 495 503 STD_EXCEPTION_PSERIES(0x1700, altivec_assist) 496 504 497 - /* moved from 0xf00 */ 498 - STD_EXCEPTION_PSERIES(0x3000, performance_monitor) 505 + . = 0x3000 499 506 500 - . = 0x3100 507 + /*** pSeries interrupt support ***/ 508 + 509 + /* moved from 0xf00 */ 510 + STD_EXCEPTION_PSERIES(., performance_monitor) 511 + 512 + .align 7 501 513 _GLOBAL(do_stab_bolted_pSeries) 502 514 mtcrf 0x80,r12 503 515 mfspr r12,SPRG2 504 516 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) 505 517 506 - 507 - /* Space for the naca. Architected to be located at real address 508 - * NACA_PHYS_ADDR. Various tools rely on this location being fixed. 509 - * The first dword of the naca is required by iSeries LPAR to 510 - * point to itVpdAreas. On pSeries native, this value is not used. 511 - */ 512 - . = NACA_PHYS_ADDR 513 - .globl __end_interrupts 514 - __end_interrupts: 518 + /* 519 + * Vectors for the FWNMI option. Share common code. 520 + */ 521 + .globl system_reset_fwnmi 522 + system_reset_fwnmi: 523 + HMT_MEDIUM 524 + mtspr SPRG1,r13 /* save r13 */ 525 + RUNLATCH_ON(r13) 526 + EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) 527 + 528 + .globl machine_check_fwnmi 529 + machine_check_fwnmi: 530 + HMT_MEDIUM 531 + mtspr SPRG1,r13 /* save r13 */ 532 + RUNLATCH_ON(r13) 533 + EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 534 + 515 535 #ifdef CONFIG_PPC_ISERIES 516 - .globl naca 517 - naca: 518 - .llong itVpdAreas 519 - .llong 0 /* xRamDisk */ 520 - .llong 0 /* xRamDiskSize */ 521 - 522 - . = 0x6100 523 - 524 536 /*** ISeries-LPAR interrupt handlers ***/ 525 537 526 538 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC) ··· 622 626 623 627 cmpwi 0,r23,0 624 628 beq iSeries_secondary_smp_loop /* Loop until told to go */ 625 - #ifdef SECONDARY_PROCESSORS 626 629 bne .__secondary_start /* Loop until told to go */ 627 - #endif 628 630 iSeries_secondary_smp_loop: 629 631 /* Let the Hypervisor know we are alive */ 630 632 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ ··· 665 671 ld r13,PACA_EXGEN+EX_R13(r13) 666 672 rfid 667 673 b . /* prevent speculative execution */ 668 - #endif 669 - 670 - /* 671 - * Data area reserved for FWNMI option. 672 - */ 673 - .= 0x7000 674 - .globl fwnmi_data_area 675 - fwnmi_data_area: 676 - 677 - #ifdef CONFIG_PPC_ISERIES 678 - . = LPARMAP_PHYS 679 - #include "lparmap.s" 680 674 #endif /* CONFIG_PPC_ISERIES */ 681 - 682 - /* 683 - * Vectors for the FWNMI option. Share common code. 684 - */ 685 - . = 0x8000 686 - .globl system_reset_fwnmi 687 - system_reset_fwnmi: 688 - HMT_MEDIUM 689 - mtspr SPRG1,r13 /* save r13 */ 690 - RUNLATCH_ON(r13) 691 - EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) 692 - .globl machine_check_fwnmi 693 - machine_check_fwnmi: 694 - HMT_MEDIUM 695 - mtspr SPRG1,r13 /* save r13 */ 696 - RUNLATCH_ON(r13) 697 - EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 698 - 699 - /* 700 - * Space for the initial segment table 701 - * For LPAR, the hypervisor must fill in at least one entry 702 - * before we get control (with relocate on) 703 - */ 704 - . = STAB0_PHYS_ADDR 705 - .globl __start_stab 706 - __start_stab: 707 - 708 - . = (STAB0_PHYS_ADDR + PAGE_SIZE) 709 - .globl __end_stab 710 - __end_stab: 711 - 712 675 713 676 /*** Common interrupt handlers ***/ 714 677 ··· 703 752 * R9 contains the saved CR, r13 points to the paca, 704 753 * r10 contains the (bad) kernel stack pointer, 705 754 * r11 and r12 contain the saved SRR0 and SRR1. 706 - * We switch to using the paca guard page as an emergency stack, 707 - * save the registers there, and call kernel_bad_stack(), which panics. 755 + * We switch to using an emergency stack, save the registers there, 756 + * and call kernel_bad_stack(), which panics. 708 757 */ 709 758 bad_stack: 710 759 ld r1,PACAEMERGSP(r13) ··· 857 906 bl .kernel_fp_unavailable_exception 858 907 BUG_OPCODE 859 908 909 + /* 910 + * load_up_fpu(unused, unused, tsk) 911 + * Disable FP for the task which had the FPU previously, 912 + * and save its floating-point registers in its thread_struct. 913 + * Enables the FPU for use in the kernel on return. 914 + * On SMP we know the fpu is free, since we give it up every 915 + * switch (ie, no lazy save of the FP registers). 916 + * On entry: r13 == 'current' && last_task_used_math != 'current' 917 + */ 918 + _STATIC(load_up_fpu) 919 + mfmsr r5 /* grab the current MSR */ 920 + ori r5,r5,MSR_FP 921 + mtmsrd r5 /* enable use of fpu now */ 922 + isync 923 + /* 924 + * For SMP, we don't do lazy FPU switching because it just gets too 925 + * horrendously complex, especially when a task switches from one CPU 926 + * to another. Instead we call giveup_fpu in switch_to. 927 + * 928 + */ 929 + #ifndef CONFIG_SMP 930 + ld r3,last_task_used_math@got(r2) 931 + ld r4,0(r3) 932 + cmpdi 0,r4,0 933 + beq 1f 934 + /* Save FP state to last_task_used_math's THREAD struct */ 935 + addi r4,r4,THREAD 936 + SAVE_32FPRS(0, r4) 937 + mffs fr0 938 + stfd fr0,THREAD_FPSCR(r4) 939 + /* Disable FP for last_task_used_math */ 940 + ld r5,PT_REGS(r4) 941 + ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 942 + li r6,MSR_FP|MSR_FE0|MSR_FE1 943 + andc r4,r4,r6 944 + std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 945 + 1: 946 + #endif /* CONFIG_SMP */ 947 + /* enable use of FP after return */ 948 + ld r4,PACACURRENT(r13) 949 + addi r5,r4,THREAD /* Get THREAD */ 950 + ld r4,THREAD_FPEXC_MODE(r5) 951 + ori r12,r12,MSR_FP 952 + or r12,r12,r4 953 + std r12,_MSR(r1) 954 + lfd fr0,THREAD_FPSCR(r5) 955 + mtfsf 0xff,fr0 956 + REST_32FPRS(0, r5) 957 + #ifndef CONFIG_SMP 958 + /* Update last_task_used_math to 'current' */ 959 + subi r4,r5,THREAD /* Back to 'current' */ 960 + std r4,0(r3) 961 + #endif /* CONFIG_SMP */ 962 + /* restore registers and return */ 963 + b fast_exception_return 964 + 860 965 .align 7 861 966 .globl altivec_unavailable_common 862 967 altivec_unavailable_common: ··· 927 920 ENABLE_INTS 928 921 bl .altivec_unavailable_exception 929 922 b .ret_from_except 923 + 924 + #ifdef CONFIG_ALTIVEC 925 + /* 926 + * load_up_altivec(unused, unused, tsk) 927 + * Disable VMX for the task which had it previously, 928 + * and save its vector registers in its thread_struct. 929 + * Enables the VMX for use in the kernel on return. 930 + * On SMP we know the VMX is free, since we give it up every 931 + * switch (ie, no lazy save of the vector registers). 932 + * On entry: r13 == 'current' && last_task_used_altivec != 'current' 933 + */ 934 + _STATIC(load_up_altivec) 935 + mfmsr r5 /* grab the current MSR */ 936 + oris r5,r5,MSR_VEC@h 937 + mtmsrd r5 /* enable use of VMX now */ 938 + isync 939 + 940 + /* 941 + * For SMP, we don't do lazy VMX switching because it just gets too 942 + * horrendously complex, especially when a task switches from one CPU 943 + * to another. Instead we call giveup_altvec in switch_to. 944 + * VRSAVE isn't dealt with here, that is done in the normal context 945 + * switch code. Note that we could rely on vrsave value to eventually 946 + * avoid saving all of the VREGs here... 947 + */ 948 + #ifndef CONFIG_SMP 949 + ld r3,last_task_used_altivec@got(r2) 950 + ld r4,0(r3) 951 + cmpdi 0,r4,0 952 + beq 1f 953 + /* Save VMX state to last_task_used_altivec's THREAD struct */ 954 + addi r4,r4,THREAD 955 + SAVE_32VRS(0,r5,r4) 956 + mfvscr vr0 957 + li r10,THREAD_VSCR 958 + stvx vr0,r10,r4 959 + /* Disable VMX for last_task_used_altivec */ 960 + ld r5,PT_REGS(r4) 961 + ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 962 + lis r6,MSR_VEC@h 963 + andc r4,r4,r6 964 + std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 965 + 1: 966 + #endif /* CONFIG_SMP */ 967 + /* Hack: if we get an altivec unavailable trap with VRSAVE 968 + * set to all zeros, we assume this is a broken application 969 + * that fails to set it properly, and thus we switch it to 970 + * all 1's 971 + */ 972 + mfspr r4,SPRN_VRSAVE 973 + cmpdi 0,r4,0 974 + bne+ 1f 975 + li r4,-1 976 + mtspr SPRN_VRSAVE,r4 977 + 1: 978 + /* enable use of VMX after return */ 979 + ld r4,PACACURRENT(r13) 980 + addi r5,r4,THREAD /* Get THREAD */ 981 + oris r12,r12,MSR_VEC@h 982 + std r12,_MSR(r1) 983 + li r4,1 984 + li r10,THREAD_VSCR 985 + stw r4,THREAD_USED_VR(r5) 986 + lvx vr0,r10,r5 987 + mtvscr vr0 988 + REST_32VRS(0,r4,r5) 989 + #ifndef CONFIG_SMP 990 + /* Update last_task_used_math to 'current' */ 991 + subi r4,r5,THREAD /* Back to 'current' */ 992 + std r4,0(r3) 993 + #endif /* CONFIG_SMP */ 994 + /* restore registers and return */ 995 + b fast_exception_return 996 + #endif /* CONFIG_ALTIVEC */ 930 997 931 998 /* 932 999 * Hash table stuff ··· 1248 1167 bl .unrecoverable_exception 1249 1168 b 1b 1250 1169 1170 + /* 1171 + * Space for CPU0's segment table. 1172 + * 1173 + * On iSeries, the hypervisor must fill in at least one entry before 1174 + * we get control (with relocate on). The address is give to the hv 1175 + * as a page number (see xLparMap in LparData.c), so this must be at a 1176 + * fixed address (the linker can't compute (u64)&initial_stab >> 1177 + * PAGE_SHIFT). 1178 + */ 1179 + . = STAB0_PHYS_ADDR /* 0x6000 */ 1180 + .globl initial_stab 1181 + initial_stab: 1182 + .space 4096 1183 + 1184 + /* 1185 + * Data area reserved for FWNMI option. 1186 + * This address (0x7000) is fixed by the RPA. 1187 + */ 1188 + .= 0x7000 1189 + .globl fwnmi_data_area 1190 + fwnmi_data_area: 1191 + .space PAGE_SIZE 1251 1192 1252 1193 /* 1253 1194 * On pSeries, secondary processors spin in the following code. ··· 1303 1200 b .kexec_wait /* next kernel might do better */ 1304 1201 1305 1202 2: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 1306 - /* From now on, r24 is expected to be logica cpuid */ 1203 + /* From now on, r24 is expected to be logical cpuid */ 1307 1204 mr r24,r5 1308 1205 3: HMT_LOW 1309 1206 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ ··· 1316 1213 1317 1214 cmpwi 0,r23,0 1318 1215 #ifdef CONFIG_SMP 1319 - #ifdef SECONDARY_PROCESSORS 1320 1216 bne .__secondary_start 1321 - #endif 1322 1217 #endif 1323 1218 b 3b /* Loop until told to go */ 1324 1219 ··· 1530 1429 1531 1430 .align 8 1532 1431 copy_to_here: 1533 - 1534 - /* 1535 - * load_up_fpu(unused, unused, tsk) 1536 - * Disable FP for the task which had the FPU previously, 1537 - * and save its floating-point registers in its thread_struct. 1538 - * Enables the FPU for use in the kernel on return. 1539 - * On SMP we know the fpu is free, since we give it up every 1540 - * switch (ie, no lazy save of the FP registers). 1541 - * On entry: r13 == 'current' && last_task_used_math != 'current' 1542 - */ 1543 - _STATIC(load_up_fpu) 1544 - mfmsr r5 /* grab the current MSR */ 1545 - ori r5,r5,MSR_FP 1546 - mtmsrd r5 /* enable use of fpu now */ 1547 - isync 1548 - /* 1549 - * For SMP, we don't do lazy FPU switching because it just gets too 1550 - * horrendously complex, especially when a task switches from one CPU 1551 - * to another. Instead we call giveup_fpu in switch_to. 1552 - * 1553 - */ 1554 - #ifndef CONFIG_SMP 1555 - ld r3,last_task_used_math@got(r2) 1556 - ld r4,0(r3) 1557 - cmpdi 0,r4,0 1558 - beq 1f 1559 - /* Save FP state to last_task_used_math's THREAD struct */ 1560 - addi r4,r4,THREAD 1561 - SAVE_32FPRS(0, r4) 1562 - mffs fr0 1563 - stfd fr0,THREAD_FPSCR(r4) 1564 - /* Disable FP for last_task_used_math */ 1565 - ld r5,PT_REGS(r4) 1566 - ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1567 - li r6,MSR_FP|MSR_FE0|MSR_FE1 1568 - andc r4,r4,r6 1569 - std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1570 - 1: 1571 - #endif /* CONFIG_SMP */ 1572 - /* enable use of FP after return */ 1573 - ld r4,PACACURRENT(r13) 1574 - addi r5,r4,THREAD /* Get THREAD */ 1575 - ld r4,THREAD_FPEXC_MODE(r5) 1576 - ori r12,r12,MSR_FP 1577 - or r12,r12,r4 1578 - std r12,_MSR(r1) 1579 - lfd fr0,THREAD_FPSCR(r5) 1580 - mtfsf 0xff,fr0 1581 - REST_32FPRS(0, r5) 1582 - #ifndef CONFIG_SMP 1583 - /* Update last_task_used_math to 'current' */ 1584 - subi r4,r5,THREAD /* Back to 'current' */ 1585 - std r4,0(r3) 1586 - #endif /* CONFIG_SMP */ 1587 - /* restore registers and return */ 1588 - b fast_exception_return 1589 - 1590 - /* 1591 - * disable_kernel_fp() 1592 - * Disable the FPU. 1593 - */ 1594 - _GLOBAL(disable_kernel_fp) 1595 - mfmsr r3 1596 - rldicl r0,r3,(63-MSR_FP_LG),1 1597 - rldicl r3,r0,(MSR_FP_LG+1),0 1598 - mtmsrd r3 /* disable use of fpu now */ 1599 - isync 1600 - blr 1601 - 1602 - /* 1603 - * giveup_fpu(tsk) 1604 - * Disable FP for the task given as the argument, 1605 - * and save the floating-point registers in its thread_struct. 1606 - * Enables the FPU for use in the kernel on return. 1607 - */ 1608 - _GLOBAL(giveup_fpu) 1609 - mfmsr r5 1610 - ori r5,r5,MSR_FP 1611 - mtmsrd r5 /* enable use of fpu now */ 1612 - isync 1613 - cmpdi 0,r3,0 1614 - beqlr- /* if no previous owner, done */ 1615 - addi r3,r3,THREAD /* want THREAD of task */ 1616 - ld r5,PT_REGS(r3) 1617 - cmpdi 0,r5,0 1618 - SAVE_32FPRS(0, r3) 1619 - mffs fr0 1620 - stfd fr0,THREAD_FPSCR(r3) 1621 - beq 1f 1622 - ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1623 - li r3,MSR_FP|MSR_FE0|MSR_FE1 1624 - andc r4,r4,r3 /* disable FP for previous task */ 1625 - std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1626 - 1: 1627 - #ifndef CONFIG_SMP 1628 - li r5,0 1629 - ld r4,last_task_used_math@got(r2) 1630 - std r5,0(r4) 1631 - #endif /* CONFIG_SMP */ 1632 - blr 1633 - 1634 - 1635 - #ifdef CONFIG_ALTIVEC 1636 - 1637 - /* 1638 - * load_up_altivec(unused, unused, tsk) 1639 - * Disable VMX for the task which had it previously, 1640 - * and save its vector registers in its thread_struct. 1641 - * Enables the VMX for use in the kernel on return. 1642 - * On SMP we know the VMX is free, since we give it up every 1643 - * switch (ie, no lazy save of the vector registers). 1644 - * On entry: r13 == 'current' && last_task_used_altivec != 'current' 1645 - */ 1646 - _STATIC(load_up_altivec) 1647 - mfmsr r5 /* grab the current MSR */ 1648 - oris r5,r5,MSR_VEC@h 1649 - mtmsrd r5 /* enable use of VMX now */ 1650 - isync 1651 - 1652 - /* 1653 - * For SMP, we don't do lazy VMX switching because it just gets too 1654 - * horrendously complex, especially when a task switches from one CPU 1655 - * to another. Instead we call giveup_altvec in switch_to. 1656 - * VRSAVE isn't dealt with here, that is done in the normal context 1657 - * switch code. Note that we could rely on vrsave value to eventually 1658 - * avoid saving all of the VREGs here... 1659 - */ 1660 - #ifndef CONFIG_SMP 1661 - ld r3,last_task_used_altivec@got(r2) 1662 - ld r4,0(r3) 1663 - cmpdi 0,r4,0 1664 - beq 1f 1665 - /* Save VMX state to last_task_used_altivec's THREAD struct */ 1666 - addi r4,r4,THREAD 1667 - SAVE_32VRS(0,r5,r4) 1668 - mfvscr vr0 1669 - li r10,THREAD_VSCR 1670 - stvx vr0,r10,r4 1671 - /* Disable VMX for last_task_used_altivec */ 1672 - ld r5,PT_REGS(r4) 1673 - ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1674 - lis r6,MSR_VEC@h 1675 - andc r4,r4,r6 1676 - std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1677 - 1: 1678 - #endif /* CONFIG_SMP */ 1679 - /* Hack: if we get an altivec unavailable trap with VRSAVE 1680 - * set to all zeros, we assume this is a broken application 1681 - * that fails to set it properly, and thus we switch it to 1682 - * all 1's 1683 - */ 1684 - mfspr r4,SPRN_VRSAVE 1685 - cmpdi 0,r4,0 1686 - bne+ 1f 1687 - li r4,-1 1688 - mtspr SPRN_VRSAVE,r4 1689 - 1: 1690 - /* enable use of VMX after return */ 1691 - ld r4,PACACURRENT(r13) 1692 - addi r5,r4,THREAD /* Get THREAD */ 1693 - oris r12,r12,MSR_VEC@h 1694 - std r12,_MSR(r1) 1695 - li r4,1 1696 - li r10,THREAD_VSCR 1697 - stw r4,THREAD_USED_VR(r5) 1698 - lvx vr0,r10,r5 1699 - mtvscr vr0 1700 - REST_32VRS(0,r4,r5) 1701 - #ifndef CONFIG_SMP 1702 - /* Update last_task_used_math to 'current' */ 1703 - subi r4,r5,THREAD /* Back to 'current' */ 1704 - std r4,0(r3) 1705 - #endif /* CONFIG_SMP */ 1706 - /* restore registers and return */ 1707 - b fast_exception_return 1708 - 1709 - /* 1710 - * disable_kernel_altivec() 1711 - * Disable the VMX. 1712 - */ 1713 - _GLOBAL(disable_kernel_altivec) 1714 - mfmsr r3 1715 - rldicl r0,r3,(63-MSR_VEC_LG),1 1716 - rldicl r3,r0,(MSR_VEC_LG+1),0 1717 - mtmsrd r3 /* disable use of VMX now */ 1718 - isync 1719 - blr 1720 - 1721 - /* 1722 - * giveup_altivec(tsk) 1723 - * Disable VMX for the task given as the argument, 1724 - * and save the vector registers in its thread_struct. 1725 - * Enables the VMX for use in the kernel on return. 1726 - */ 1727 - _GLOBAL(giveup_altivec) 1728 - mfmsr r5 1729 - oris r5,r5,MSR_VEC@h 1730 - mtmsrd r5 /* enable use of VMX now */ 1731 - isync 1732 - cmpdi 0,r3,0 1733 - beqlr- /* if no previous owner, done */ 1734 - addi r3,r3,THREAD /* want THREAD of task */ 1735 - ld r5,PT_REGS(r3) 1736 - cmpdi 0,r5,0 1737 - SAVE_32VRS(0,r4,r3) 1738 - mfvscr vr0 1739 - li r4,THREAD_VSCR 1740 - stvx vr0,r4,r3 1741 - beq 1f 1742 - ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1743 - lis r3,MSR_VEC@h 1744 - andc r4,r4,r3 /* disable FP for previous task */ 1745 - std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1746 - 1: 1747 - #ifndef CONFIG_SMP 1748 - li r5,0 1749 - ld r4,last_task_used_altivec@got(r2) 1750 - std r5,0(r4) 1751 - #endif /* CONFIG_SMP */ 1752 - blr 1753 - 1754 - #endif /* CONFIG_ALTIVEC */ 1755 1432 1756 1433 #ifdef CONFIG_SMP 1757 1434 #ifdef CONFIG_PPC_PMAC ··· 1881 2002 1882 2003 bl .start_kernel 1883 2004 1884 - _GLOBAL(__setup_cpu_power3) 1885 - blr 1886 - 1887 2005 _GLOBAL(hmt_init) 1888 2006 #ifdef CONFIG_HMT 1889 2007 LOADADDR(r5, hmt_thread_data) ··· 1971 2095 1972 2096 /* 1973 2097 * We put a few things here that have to be page-aligned. 1974 - * This stuff goes at the beginning of the data segment, 1975 - * which is page-aligned. 2098 + * This stuff goes at the beginning of the bss, which is page-aligned. 1976 2099 */ 1977 - .data 2100 + .section ".bss" 2101 + 1978 2102 .align 12 1979 - .globl sdata 1980 - sdata: 2103 + 1981 2104 .globl empty_zero_page 1982 2105 empty_zero_page: 1983 - .space 4096 2106 + .space PAGE_SIZE 1984 2107 1985 2108 .globl swapper_pg_dir 1986 2109 swapper_pg_dir: 1987 - .space 4096 2110 + .space PAGE_SIZE 1988 2111 1989 2112 /* 1990 2113 * This space gets a copy of optional info passed to us by the bootstrap
+4 -1
arch/ppc64/kernel/iSeries_htab.c
··· 41 41 unsigned long prpn, unsigned long vflags, 42 42 unsigned long rflags) 43 43 { 44 + unsigned long arpn; 44 45 long slot; 45 46 hpte_t lhpte; 46 47 int secondary = 0; ··· 71 70 slot &= 0x7fffffffffffffff; 72 71 } 73 72 73 + arpn = phys_to_abs(prpn << PAGE_SHIFT) >> PAGE_SHIFT; 74 + 74 75 lhpte.v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID; 75 - lhpte.r = (physRpn_to_absRpn(prpn) << HPTE_R_RPN_SHIFT) | rflags; 76 + lhpte.r = (arpn << HPTE_R_RPN_SHIFT) | rflags; 76 77 77 78 /* Now fill in the actual HPTE */ 78 79 HvCallHpt_addValidate(slot, secondary, &lhpte);
+26 -4
arch/ppc64/kernel/iSeries_setup.c
··· 39 39 #include <asm/cputable.h> 40 40 #include <asm/sections.h> 41 41 #include <asm/iommu.h> 42 + #include <asm/firmware.h> 42 43 43 44 #include <asm/time.h> 44 45 #include "iSeries_setup.h" ··· 315 314 316 315 DBG(" -> iSeries_init_early()\n"); 317 316 317 + ppc64_firmware_features = FW_FEATURE_ISERIES; 318 + 318 319 ppcdbg_initialize(); 319 320 320 321 #if defined(CONFIG_BLK_DEV_INITRD) ··· 415 412 DBG(" <- iSeries_init_early()\n"); 416 413 } 417 414 415 + struct mschunks_map mschunks_map = { 416 + /* XXX We don't use these, but Piranha might need them. */ 417 + .chunk_size = MSCHUNKS_CHUNK_SIZE, 418 + .chunk_shift = MSCHUNKS_CHUNK_SHIFT, 419 + .chunk_mask = MSCHUNKS_OFFSET_MASK, 420 + }; 421 + EXPORT_SYMBOL(mschunks_map); 422 + 423 + void mschunks_alloc(unsigned long num_chunks) 424 + { 425 + klimit = _ALIGN(klimit, sizeof(u32)); 426 + mschunks_map.mapping = (u32 *)klimit; 427 + klimit += num_chunks * sizeof(u32); 428 + mschunks_map.num_chunks = num_chunks; 429 + } 430 + 418 431 /* 419 432 * The iSeries may have very large memories ( > 128 GB ) and a partition 420 433 * may get memory in "chunks" that may be anywhere in the 2**52 real ··· 468 449 469 450 /* Chunk size on iSeries is 256K bytes */ 470 451 totalChunks = (u32)HvLpConfig_getMsChunks(); 471 - klimit = msChunks_alloc(klimit, totalChunks, 1UL << 18); 452 + mschunks_alloc(totalChunks); 472 453 473 454 /* 474 455 * Get absolute address of our load area ··· 505 486 printk("Load area size %dK\n", loadAreaSize * 256); 506 487 507 488 for (nextPhysChunk = 0; nextPhysChunk < loadAreaSize; ++nextPhysChunk) 508 - msChunks.abs[nextPhysChunk] = 489 + mschunks_map.mapping[nextPhysChunk] = 509 490 loadAreaFirstChunk + nextPhysChunk; 510 491 511 492 /* ··· 514 495 */ 515 496 hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress()); 516 497 hptSizePages = (u32)HvCallHpt_getHptPages(); 517 - hptSizeChunks = hptSizePages >> (msChunks.chunk_shift - PAGE_SHIFT); 498 + hptSizeChunks = hptSizePages >> (MSCHUNKS_CHUNK_SHIFT - PAGE_SHIFT); 518 499 hptLastChunk = hptFirstChunk + hptSizeChunks - 1; 519 500 520 501 printk("HPT absolute addr = %016lx, size = %dK\n", ··· 571 552 (absChunk > hptLastChunk)) && 572 553 ((absChunk < loadAreaFirstChunk) || 573 554 (absChunk > loadAreaLastChunk))) { 574 - msChunks.abs[nextPhysChunk] = absChunk; 555 + mschunks_map.mapping[nextPhysChunk] = 556 + absChunk; 575 557 ++nextPhysChunk; 576 558 } 577 559 } ··· 963 943 ppc_md.get_rtc_time = iSeries_get_rtc_time; 964 944 ppc_md.calibrate_decr = iSeries_calibrate_decr; 965 945 ppc_md.progress = iSeries_progress; 946 + 947 + /* XXX Implement enable_pmcs for iSeries */ 966 948 967 949 if (get_paca()->lppaca.shared_proc) { 968 950 ppc_md.idle_loop = iseries_shared_idle;
+144
arch/ppc64/kernel/iSeries_vio.c
··· 1 + /* 2 + * IBM PowerPC iSeries Virtual I/O Infrastructure Support. 3 + * 4 + * Copyright (c) 2005 Stephen Rothwell, IBM Corp. 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License 8 + * as published by the Free Software Foundation; either version 9 + * 2 of the License, or (at your option) any later version. 10 + */ 11 + #include <linux/types.h> 12 + #include <linux/device.h> 13 + #include <linux/init.h> 14 + 15 + #include <asm/vio.h> 16 + #include <asm/iommu.h> 17 + #include <asm/abs_addr.h> 18 + #include <asm/page.h> 19 + #include <asm/iSeries/vio.h> 20 + #include <asm/iSeries/HvTypes.h> 21 + #include <asm/iSeries/HvLpConfig.h> 22 + #include <asm/iSeries/HvCallXm.h> 23 + 24 + struct device *iSeries_vio_dev = &vio_bus_device.dev; 25 + EXPORT_SYMBOL(iSeries_vio_dev); 26 + 27 + static struct iommu_table veth_iommu_table; 28 + static struct iommu_table vio_iommu_table; 29 + 30 + static void __init iommu_vio_init(void) 31 + { 32 + struct iommu_table *t; 33 + struct iommu_table_cb cb; 34 + unsigned long cbp; 35 + unsigned long itc_entries; 36 + 37 + cb.itc_busno = 255; /* Bus 255 is the virtual bus */ 38 + cb.itc_virtbus = 0xff; /* Ask for virtual bus */ 39 + 40 + cbp = virt_to_abs(&cb); 41 + HvCallXm_getTceTableParms(cbp); 42 + 43 + itc_entries = cb.itc_size * PAGE_SIZE / sizeof(union tce_entry); 44 + veth_iommu_table.it_size = itc_entries / 2; 45 + veth_iommu_table.it_busno = cb.itc_busno; 46 + veth_iommu_table.it_offset = cb.itc_offset; 47 + veth_iommu_table.it_index = cb.itc_index; 48 + veth_iommu_table.it_type = TCE_VB; 49 + veth_iommu_table.it_blocksize = 1; 50 + 51 + t = iommu_init_table(&veth_iommu_table); 52 + 53 + if (!t) 54 + printk("Virtual Bus VETH TCE table failed.\n"); 55 + 56 + vio_iommu_table.it_size = itc_entries - veth_iommu_table.it_size; 57 + vio_iommu_table.it_busno = cb.itc_busno; 58 + vio_iommu_table.it_offset = cb.itc_offset + 59 + veth_iommu_table.it_size; 60 + vio_iommu_table.it_index = cb.itc_index; 61 + vio_iommu_table.it_type = TCE_VB; 62 + vio_iommu_table.it_blocksize = 1; 63 + 64 + t = iommu_init_table(&vio_iommu_table); 65 + 66 + if (!t) 67 + printk("Virtual Bus VIO TCE table failed.\n"); 68 + } 69 + 70 + /** 71 + * vio_register_device: - Register a new vio device. 72 + * @voidev: The device to register. 73 + */ 74 + static struct vio_dev *__init vio_register_device_iseries(char *type, 75 + uint32_t unit_num) 76 + { 77 + struct vio_dev *viodev; 78 + 79 + /* allocate a vio_dev for this node */ 80 + viodev = kmalloc(sizeof(struct vio_dev), GFP_KERNEL); 81 + if (!viodev) 82 + return NULL; 83 + memset(viodev, 0, sizeof(struct vio_dev)); 84 + 85 + snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%s%d", type, unit_num); 86 + 87 + return vio_register_device_common(viodev, viodev->dev.bus_id, type, 88 + unit_num, &vio_iommu_table); 89 + } 90 + 91 + void __init probe_bus_iseries(void) 92 + { 93 + HvLpIndexMap vlan_map; 94 + struct vio_dev *viodev; 95 + int i; 96 + 97 + /* there is only one of each of these */ 98 + vio_register_device_iseries("viocons", 0); 99 + vio_register_device_iseries("vscsi", 0); 100 + 101 + vlan_map = HvLpConfig_getVirtualLanIndexMap(); 102 + for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) { 103 + if ((vlan_map & (0x8000 >> i)) == 0) 104 + continue; 105 + viodev = vio_register_device_iseries("vlan", i); 106 + /* veth is special and has it own iommu_table */ 107 + viodev->iommu_table = &veth_iommu_table; 108 + } 109 + for (i = 0; i < HVMAXARCHITECTEDVIRTUALDISKS; i++) 110 + vio_register_device_iseries("viodasd", i); 111 + for (i = 0; i < HVMAXARCHITECTEDVIRTUALCDROMS; i++) 112 + vio_register_device_iseries("viocd", i); 113 + for (i = 0; i < HVMAXARCHITECTEDVIRTUALTAPES; i++) 114 + vio_register_device_iseries("viotape", i); 115 + } 116 + 117 + /** 118 + * vio_match_device_iseries: - Tell if a iSeries VIO device matches a 119 + * vio_device_id 120 + */ 121 + static int vio_match_device_iseries(const struct vio_device_id *id, 122 + const struct vio_dev *dev) 123 + { 124 + return strncmp(dev->type, id->type, strlen(id->type)) == 0; 125 + } 126 + 127 + /** 128 + * vio_bus_init_iseries: - Initialize the iSeries virtual IO bus 129 + */ 130 + static int __init vio_bus_init_iseries(void) 131 + { 132 + int err; 133 + 134 + err = vio_bus_init(vio_match_device_iseries, NULL, NULL); 135 + if (err == 0) { 136 + iommu_vio_init(); 137 + vio_bus_device.iommu_table = &vio_iommu_table; 138 + iSeries_vio_dev = &vio_bus_device.dev; 139 + probe_bus_iseries(); 140 + } 141 + return err; 142 + } 143 + 144 + __initcall(vio_bus_init_iseries);
+38 -111
arch/ppc64/kernel/lmb.c
··· 28 28 { 29 29 #ifdef DEBUG 30 30 unsigned long i; 31 - struct lmb *_lmb = &lmb; 32 31 33 32 udbg_printf("lmb_dump_all:\n"); 34 33 udbg_printf(" memory.cnt = 0x%lx\n", 35 - _lmb->memory.cnt); 34 + lmb.memory.cnt); 36 35 udbg_printf(" memory.size = 0x%lx\n", 37 - _lmb->memory.size); 38 - for (i=0; i < _lmb->memory.cnt ;i++) { 36 + lmb.memory.size); 37 + for (i=0; i < lmb.memory.cnt ;i++) { 39 38 udbg_printf(" memory.region[0x%x].base = 0x%lx\n", 40 - i, _lmb->memory.region[i].base); 41 - udbg_printf(" .physbase = 0x%lx\n", 42 - _lmb->memory.region[i].physbase); 39 + i, lmb.memory.region[i].base); 43 40 udbg_printf(" .size = 0x%lx\n", 44 - _lmb->memory.region[i].size); 41 + lmb.memory.region[i].size); 45 42 } 46 43 47 44 udbg_printf("\n reserved.cnt = 0x%lx\n", 48 - _lmb->reserved.cnt); 45 + lmb.reserved.cnt); 49 46 udbg_printf(" reserved.size = 0x%lx\n", 50 - _lmb->reserved.size); 51 - for (i=0; i < _lmb->reserved.cnt ;i++) { 47 + lmb.reserved.size); 48 + for (i=0; i < lmb.reserved.cnt ;i++) { 52 49 udbg_printf(" reserved.region[0x%x].base = 0x%lx\n", 53 - i, _lmb->reserved.region[i].base); 54 - udbg_printf(" .physbase = 0x%lx\n", 55 - _lmb->reserved.region[i].physbase); 50 + i, lmb.reserved.region[i].base); 56 51 udbg_printf(" .size = 0x%lx\n", 57 - _lmb->reserved.region[i].size); 52 + lmb.reserved.region[i].size); 58 53 } 59 54 #endif /* DEBUG */ 60 55 } ··· 93 98 rgn->region[r1].size += rgn->region[r2].size; 94 99 for (i=r2; i < rgn->cnt-1; i++) { 95 100 rgn->region[i].base = rgn->region[i+1].base; 96 - rgn->region[i].physbase = rgn->region[i+1].physbase; 97 101 rgn->region[i].size = rgn->region[i+1].size; 98 102 } 99 103 rgn->cnt--; ··· 102 108 void __init 103 109 lmb_init(void) 104 110 { 105 - struct lmb *_lmb = &lmb; 106 - 107 111 /* Create a dummy zero size LMB which will get coalesced away later. 108 112 * This simplifies the lmb_add() code below... 109 113 */ 110 - _lmb->memory.region[0].base = 0; 111 - _lmb->memory.region[0].size = 0; 112 - _lmb->memory.cnt = 1; 114 + lmb.memory.region[0].base = 0; 115 + lmb.memory.region[0].size = 0; 116 + lmb.memory.cnt = 1; 113 117 114 118 /* Ditto. */ 115 - _lmb->reserved.region[0].base = 0; 116 - _lmb->reserved.region[0].size = 0; 117 - _lmb->reserved.cnt = 1; 119 + lmb.reserved.region[0].base = 0; 120 + lmb.reserved.region[0].size = 0; 121 + lmb.reserved.cnt = 1; 118 122 } 119 123 120 124 /* This routine called with relocation disabled. */ 121 125 void __init 122 126 lmb_analyze(void) 123 127 { 124 - unsigned long i; 125 - unsigned long mem_size = 0; 126 - unsigned long size_mask = 0; 127 - struct lmb *_lmb = &lmb; 128 - #ifdef CONFIG_MSCHUNKS 129 - unsigned long physbase = 0; 130 - #endif 128 + int i; 131 129 132 - for (i=0; i < _lmb->memory.cnt; i++) { 133 - unsigned long lmb_size; 130 + lmb.memory.size = 0; 134 131 135 - lmb_size = _lmb->memory.region[i].size; 136 - 137 - #ifdef CONFIG_MSCHUNKS 138 - _lmb->memory.region[i].physbase = physbase; 139 - physbase += lmb_size; 140 - #else 141 - _lmb->memory.region[i].physbase = _lmb->memory.region[i].base; 142 - #endif 143 - mem_size += lmb_size; 144 - size_mask |= lmb_size; 145 - } 146 - 147 - _lmb->memory.size = mem_size; 132 + for (i = 0; i < lmb.memory.cnt; i++) 133 + lmb.memory.size += lmb.memory.region[i].size; 148 134 } 149 135 150 136 /* This routine called with relocation disabled. */ ··· 142 168 adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize); 143 169 if ( adjacent > 0 ) { 144 170 rgn->region[i].base -= size; 145 - rgn->region[i].physbase -= size; 146 171 rgn->region[i].size += size; 147 172 coalesced++; 148 173 break; ··· 168 195 for (i=rgn->cnt-1; i >= 0; i--) { 169 196 if (base < rgn->region[i].base) { 170 197 rgn->region[i+1].base = rgn->region[i].base; 171 - rgn->region[i+1].physbase = rgn->region[i].physbase; 172 198 rgn->region[i+1].size = rgn->region[i].size; 173 199 } else { 174 200 rgn->region[i+1].base = base; 175 - rgn->region[i+1].physbase = lmb_abs_to_phys(base); 176 201 rgn->region[i+1].size = size; 177 202 break; 178 203 } ··· 184 213 long __init 185 214 lmb_add(unsigned long base, unsigned long size) 186 215 { 187 - struct lmb *_lmb = &lmb; 188 - struct lmb_region *_rgn = &(_lmb->memory); 216 + struct lmb_region *_rgn = &(lmb.memory); 189 217 190 218 /* On pSeries LPAR systems, the first LMB is our RMO region. */ 191 219 if ( base == 0 ) 192 - _lmb->rmo_size = size; 220 + lmb.rmo_size = size; 193 221 194 222 return lmb_add_region(_rgn, base, size); 195 223 ··· 197 227 long __init 198 228 lmb_reserve(unsigned long base, unsigned long size) 199 229 { 200 - struct lmb *_lmb = &lmb; 201 - struct lmb_region *_rgn = &(_lmb->reserved); 230 + struct lmb_region *_rgn = &(lmb.reserved); 202 231 203 232 return lmb_add_region(_rgn, base, size); 204 233 } ··· 229 260 { 230 261 long i, j; 231 262 unsigned long base = 0; 232 - struct lmb *_lmb = &lmb; 233 - struct lmb_region *_mem = &(_lmb->memory); 234 - struct lmb_region *_rsv = &(_lmb->reserved); 235 263 236 - for (i=_mem->cnt-1; i >= 0; i--) { 237 - unsigned long lmbbase = _mem->region[i].base; 238 - unsigned long lmbsize = _mem->region[i].size; 264 + for (i=lmb.memory.cnt-1; i >= 0; i--) { 265 + unsigned long lmbbase = lmb.memory.region[i].base; 266 + unsigned long lmbsize = lmb.memory.region[i].size; 239 267 240 268 if ( max_addr == LMB_ALLOC_ANYWHERE ) 241 269 base = _ALIGN_DOWN(lmbbase+lmbsize-size, align); ··· 242 276 continue; 243 277 244 278 while ( (lmbbase <= base) && 245 - ((j = lmb_overlaps_region(_rsv,base,size)) >= 0) ) { 246 - base = _ALIGN_DOWN(_rsv->region[j].base-size, align); 279 + ((j = lmb_overlaps_region(&lmb.reserved,base,size)) >= 0) ) { 280 + base = _ALIGN_DOWN(lmb.reserved.region[j].base-size, align); 247 281 } 248 282 249 283 if ( (base != 0) && (lmbbase <= base) ) ··· 253 287 if ( i < 0 ) 254 288 return 0; 255 289 256 - lmb_add_region(_rsv, base, size); 290 + lmb_add_region(&lmb.reserved, base, size); 257 291 258 292 return base; 259 293 } 260 294 295 + /* You must call lmb_analyze() before this. */ 261 296 unsigned long __init 262 297 lmb_phys_mem_size(void) 263 298 { 264 - struct lmb *_lmb = &lmb; 265 - #ifdef CONFIG_MSCHUNKS 266 - return _lmb->memory.size; 267 - #else 268 - struct lmb_region *_mem = &(_lmb->memory); 269 - unsigned long total = 0; 270 - int i; 271 - 272 - /* add all physical memory to the bootmem map */ 273 - for (i=0; i < _mem->cnt; i++) 274 - total += _mem->region[i].size; 275 - return total; 276 - #endif /* CONFIG_MSCHUNKS */ 299 + return lmb.memory.size; 277 300 } 278 301 279 302 unsigned long __init 280 303 lmb_end_of_DRAM(void) 281 304 { 282 - struct lmb *_lmb = &lmb; 283 - struct lmb_region *_mem = &(_lmb->memory); 284 - int idx = _mem->cnt - 1; 305 + int idx = lmb.memory.cnt - 1; 285 306 286 - #ifdef CONFIG_MSCHUNKS 287 - return (_mem->region[idx].physbase + _mem->region[idx].size); 288 - #else 289 - return (_mem->region[idx].base + _mem->region[idx].size); 290 - #endif /* CONFIG_MSCHUNKS */ 291 - 292 - return 0; 293 - } 294 - 295 - unsigned long __init 296 - lmb_abs_to_phys(unsigned long aa) 297 - { 298 - unsigned long i, pa = aa; 299 - struct lmb *_lmb = &lmb; 300 - struct lmb_region *_mem = &(_lmb->memory); 301 - 302 - for (i=0; i < _mem->cnt; i++) { 303 - unsigned long lmbbase = _mem->region[i].base; 304 - unsigned long lmbsize = _mem->region[i].size; 305 - if ( lmb_addrs_overlap(aa,1,lmbbase,lmbsize) ) { 306 - pa = _mem->region[i].physbase + (aa - lmbbase); 307 - break; 308 - } 309 - } 310 - 311 - return pa; 307 + return (lmb.memory.region[idx].base + lmb.memory.region[idx].size); 312 308 } 313 309 314 310 /* ··· 281 353 { 282 354 extern unsigned long memory_limit; 283 355 unsigned long i, limit; 284 - struct lmb_region *mem = &(lmb.memory); 285 356 286 357 if (! memory_limit) 287 358 return; 288 359 289 360 limit = memory_limit; 290 - for (i = 0; i < mem->cnt; i++) { 291 - if (limit > mem->region[i].size) { 292 - limit -= mem->region[i].size; 361 + for (i = 0; i < lmb.memory.cnt; i++) { 362 + if (limit > lmb.memory.region[i].size) { 363 + limit -= lmb.memory.region[i].size; 293 364 continue; 294 365 } 295 366 296 - mem->region[i].size = limit; 297 - mem->cnt = i + 1; 367 + lmb.memory.region[i].size = limit; 368 + lmb.memory.cnt = i + 1; 298 369 break; 299 370 } 300 371 }
+3 -3
arch/ppc64/kernel/lparcfg.c
··· 29 29 #include <asm/iSeries/HvLpConfig.h> 30 30 #include <asm/lppaca.h> 31 31 #include <asm/hvcall.h> 32 - #include <asm/cputable.h> 32 + #include <asm/firmware.h> 33 33 #include <asm/rtas.h> 34 34 #include <asm/system.h> 35 35 #include <asm/time.h> ··· 377 377 378 378 partition_active_processors = lparcfg_count_active_processors(); 379 379 380 - if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { 380 + if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 381 381 unsigned long h_entitled, h_unallocated; 382 382 unsigned long h_aggregation, h_resource; 383 383 unsigned long pool_idle_time, pool_procs; ··· 571 571 mode_t mode = S_IRUSR; 572 572 573 573 /* Allow writing if we have FW_FEATURE_SPLPAR */ 574 - if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { 574 + if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 575 575 lparcfg_fops.write = lparcfg_write; 576 576 mode |= S_IWUSR; 577 577 }
+98
arch/ppc64/kernel/misc.S
··· 680 680 ld r30,-16(r1) 681 681 blr 682 682 683 + /* 684 + * disable_kernel_fp() 685 + * Disable the FPU. 686 + */ 687 + _GLOBAL(disable_kernel_fp) 688 + mfmsr r3 689 + rldicl r0,r3,(63-MSR_FP_LG),1 690 + rldicl r3,r0,(MSR_FP_LG+1),0 691 + mtmsrd r3 /* disable use of fpu now */ 692 + isync 693 + blr 694 + 695 + /* 696 + * giveup_fpu(tsk) 697 + * Disable FP for the task given as the argument, 698 + * and save the floating-point registers in its thread_struct. 699 + * Enables the FPU for use in the kernel on return. 700 + */ 701 + _GLOBAL(giveup_fpu) 702 + mfmsr r5 703 + ori r5,r5,MSR_FP 704 + mtmsrd r5 /* enable use of fpu now */ 705 + isync 706 + cmpdi 0,r3,0 707 + beqlr- /* if no previous owner, done */ 708 + addi r3,r3,THREAD /* want THREAD of task */ 709 + ld r5,PT_REGS(r3) 710 + cmpdi 0,r5,0 711 + SAVE_32FPRS(0, r3) 712 + mffs fr0 713 + stfd fr0,THREAD_FPSCR(r3) 714 + beq 1f 715 + ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 716 + li r3,MSR_FP|MSR_FE0|MSR_FE1 717 + andc r4,r4,r3 /* disable FP for previous task */ 718 + std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 719 + 1: 720 + #ifndef CONFIG_SMP 721 + li r5,0 722 + ld r4,last_task_used_math@got(r2) 723 + std r5,0(r4) 724 + #endif /* CONFIG_SMP */ 725 + blr 726 + 727 + #ifdef CONFIG_ALTIVEC 728 + 729 + #if 0 /* this has no callers for now */ 730 + /* 731 + * disable_kernel_altivec() 732 + * Disable the VMX. 733 + */ 734 + _GLOBAL(disable_kernel_altivec) 735 + mfmsr r3 736 + rldicl r0,r3,(63-MSR_VEC_LG),1 737 + rldicl r3,r0,(MSR_VEC_LG+1),0 738 + mtmsrd r3 /* disable use of VMX now */ 739 + isync 740 + blr 741 + #endif /* 0 */ 742 + 743 + /* 744 + * giveup_altivec(tsk) 745 + * Disable VMX for the task given as the argument, 746 + * and save the vector registers in its thread_struct. 747 + * Enables the VMX for use in the kernel on return. 748 + */ 749 + _GLOBAL(giveup_altivec) 750 + mfmsr r5 751 + oris r5,r5,MSR_VEC@h 752 + mtmsrd r5 /* enable use of VMX now */ 753 + isync 754 + cmpdi 0,r3,0 755 + beqlr- /* if no previous owner, done */ 756 + addi r3,r3,THREAD /* want THREAD of task */ 757 + ld r5,PT_REGS(r3) 758 + cmpdi 0,r5,0 759 + SAVE_32VRS(0,r4,r3) 760 + mfvscr vr0 761 + li r4,THREAD_VSCR 762 + stvx vr0,r4,r3 763 + beq 1f 764 + ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 765 + lis r3,MSR_VEC@h 766 + andc r4,r4,r3 /* disable FP for previous task */ 767 + std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 768 + 1: 769 + #ifndef CONFIG_SMP 770 + li r5,0 771 + ld r4,last_task_used_altivec@got(r2) 772 + std r5,0(r4) 773 + #endif /* CONFIG_SMP */ 774 + blr 775 + 776 + #endif /* CONFIG_ALTIVEC */ 777 + 778 + _GLOBAL(__setup_cpu_power3) 779 + blr 780 + 683 781 /* kexec_wait(phys_cpu) 684 782 * 685 783 * wait for the flag to change, indicating this kernel is going away but
+2 -1
arch/ppc64/kernel/pSeries_iommu.c
··· 45 45 #include <asm/plpar_wrappers.h> 46 46 #include <asm/pSeries_reconfig.h> 47 47 #include <asm/systemcfg.h> 48 + #include <asm/firmware.h> 48 49 #include "pci.h" 49 50 50 51 #define DBG(fmt...) ··· 547 546 } 548 547 549 548 if (systemcfg->platform & PLATFORM_LPAR) { 550 - if (cur_cpu_spec->firmware_features & FW_FEATURE_MULTITCE) { 549 + if (firmware_has_feature(FW_FEATURE_MULTITCE)) { 551 550 ppc_md.tce_build = tce_buildmulti_pSeriesLP; 552 551 ppc_md.tce_free = tce_freemulti_pSeriesLP; 553 552 } else {
+1 -3
arch/ppc64/kernel/pSeries_lpar.c
··· 52 52 EXPORT_SYMBOL(plpar_hcall_norets); 53 53 EXPORT_SYMBOL(plpar_hcall_8arg_2ret); 54 54 55 - extern void fw_feature_init(void); 56 55 extern void pSeries_find_serial_port(void); 57 56 58 57 ··· 278 279 unsigned long va, unsigned long prpn, 279 280 unsigned long vflags, unsigned long rflags) 280 281 { 281 - unsigned long arpn = physRpn_to_absRpn(prpn); 282 282 unsigned long lpar_rc; 283 283 unsigned long flags; 284 284 unsigned long slot; ··· 288 290 if (vflags & HPTE_V_LARGE) 289 291 hpte_v &= ~(1UL << HPTE_V_AVPN_SHIFT); 290 292 291 - hpte_r = (arpn << HPTE_R_RPN_SHIFT) | rflags; 293 + hpte_r = (prpn << HPTE_R_RPN_SHIFT) | rflags; 292 294 293 295 /* Now fill in the actual HPTE */ 294 296 /* Set CEC cookie to 0 */
+29 -10
arch/ppc64/kernel/pSeries_setup.c
··· 60 60 #include <asm/nvram.h> 61 61 #include <asm/plpar_wrappers.h> 62 62 #include <asm/xics.h> 63 - #include <asm/cputable.h> 63 + #include <asm/firmware.h> 64 + #include <asm/pmc.h> 64 65 65 66 #include "i8259.h" 66 67 #include "mpic.h" ··· 188 187 " MPIC "); 189 188 } 190 189 190 + static void pseries_lpar_enable_pmcs(void) 191 + { 192 + unsigned long set, reset; 193 + 194 + power4_enable_pmcs(); 195 + 196 + set = 1UL << 63; 197 + reset = 0; 198 + plpar_hcall_norets(H_PERFMON, set, reset); 199 + 200 + /* instruct hypervisor to maintain PMCs */ 201 + if (firmware_has_feature(FW_FEATURE_SPLPAR)) 202 + get_paca()->lppaca.pmcregs_in_use = 1; 203 + } 204 + 191 205 static void __init pSeries_setup_arch(void) 192 206 { 193 207 /* Fixup ppc_md depending on the type of interrupt controller */ ··· 247 231 248 232 pSeries_nvram_init(); 249 233 250 - if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) 251 - vpa_init(boot_cpuid); 252 - 253 234 /* Choose an idle loop */ 254 - if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { 235 + if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 236 + vpa_init(boot_cpuid); 255 237 if (get_paca()->lppaca.shared_proc) { 256 238 printk(KERN_INFO "Using shared processor idle loop\n"); 257 239 ppc_md.idle_loop = pseries_shared_idle; ··· 261 247 printk(KERN_INFO "Using default idle loop\n"); 262 248 ppc_md.idle_loop = default_idle; 263 249 } 250 + 251 + if (systemcfg->platform & PLATFORM_LPAR) 252 + ppc_md.enable_pmcs = pseries_lpar_enable_pmcs; 253 + else 254 + ppc_md.enable_pmcs = power4_enable_pmcs; 264 255 } 265 256 266 257 static int __init pSeries_init_panel(void) ··· 279 260 arch_initcall(pSeries_init_panel); 280 261 281 262 282 - /* Build up the firmware_features bitmask field 263 + /* Build up the ppc64_firmware_features bitmask field 283 264 * using contents of device-tree/ibm,hypertas-functions. 284 265 * Ultimately this functionality may be moved into prom.c prom_init(). 285 266 */ 286 - void __init fw_feature_init(void) 267 + static void __init fw_feature_init(void) 287 268 { 288 269 struct device_node * dn; 289 270 char * hypertas; ··· 291 272 292 273 DBG(" -> fw_feature_init()\n"); 293 274 294 - cur_cpu_spec->firmware_features = 0; 275 + ppc64_firmware_features = 0; 295 276 dn = of_find_node_by_path("/rtas"); 296 277 if (dn == NULL) { 297 278 printk(KERN_ERR "WARNING ! Cannot find RTAS in device-tree !\n"); ··· 307 288 if ((firmware_features_table[i].name) && 308 289 (strcmp(firmware_features_table[i].name,hypertas))==0) { 309 290 /* we have a match */ 310 - cur_cpu_spec->firmware_features |= 291 + ppc64_firmware_features |= 311 292 (firmware_features_table[i].val); 312 293 break; 313 294 } ··· 321 302 of_node_put(dn); 322 303 no_rtas: 323 304 printk(KERN_INFO "firmware_features = 0x%lx\n", 324 - cur_cpu_spec->firmware_features); 305 + ppc64_firmware_features); 325 306 326 307 DBG(" <- fw_feature_init()\n"); 327 308 }
+2 -1
arch/ppc64/kernel/pSeries_smp.c
··· 41 41 #include <asm/machdep.h> 42 42 #include <asm/xics.h> 43 43 #include <asm/cputable.h> 44 + #include <asm/firmware.h> 44 45 #include <asm/system.h> 45 46 #include <asm/rtas.h> 46 47 #include <asm/plpar_wrappers.h> ··· 327 326 if (cpu != boot_cpuid) 328 327 xics_setup_cpu(); 329 328 330 - if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) 329 + if (firmware_has_feature(FW_FEATURE_SPLPAR)) 331 330 vpa_init(cpu); 332 331 333 332 cpu_clear(cpu, of_spin_map);
+266
arch/ppc64/kernel/pSeries_vio.c
··· 1 + /* 2 + * IBM PowerPC pSeries Virtual I/O Infrastructure Support. 3 + * 4 + * Copyright (c) 2003-2005 IBM Corp. 5 + * Dave Engebretsen engebret@us.ibm.com 6 + * Santiago Leon santil@us.ibm.com 7 + * Hollis Blanchard <hollisb@us.ibm.com> 8 + * Stephen Rothwell 9 + * 10 + * This program is free software; you can redistribute it and/or 11 + * modify it under the terms of the GNU General Public License 12 + * as published by the Free Software Foundation; either version 13 + * 2 of the License, or (at your option) any later version. 14 + */ 15 + 16 + #include <linux/init.h> 17 + #include <linux/module.h> 18 + #include <linux/mm.h> 19 + #include <linux/kobject.h> 20 + #include <asm/iommu.h> 21 + #include <asm/dma.h> 22 + #include <asm/vio.h> 23 + #include <asm/hvcall.h> 24 + 25 + extern struct subsystem devices_subsys; /* needed for vio_find_name() */ 26 + 27 + static void probe_bus_pseries(void) 28 + { 29 + struct device_node *node_vroot, *of_node; 30 + 31 + node_vroot = find_devices("vdevice"); 32 + if ((node_vroot == NULL) || (node_vroot->child == NULL)) 33 + /* this machine doesn't do virtual IO, and that's ok */ 34 + return; 35 + 36 + /* 37 + * Create struct vio_devices for each virtual device in the device tree. 38 + * Drivers will associate with them later. 39 + */ 40 + for (of_node = node_vroot->child; of_node != NULL; 41 + of_node = of_node->sibling) { 42 + printk(KERN_DEBUG "%s: processing %p\n", __FUNCTION__, of_node); 43 + vio_register_device_node(of_node); 44 + } 45 + } 46 + 47 + /** 48 + * vio_match_device_pseries: - Tell if a pSeries VIO device matches a 49 + * vio_device_id 50 + */ 51 + static int vio_match_device_pseries(const struct vio_device_id *id, 52 + const struct vio_dev *dev) 53 + { 54 + return (strncmp(dev->type, id->type, strlen(id->type)) == 0) && 55 + device_is_compatible(dev->dev.platform_data, id->compat); 56 + } 57 + 58 + static void vio_release_device_pseries(struct device *dev) 59 + { 60 + /* XXX free TCE table */ 61 + of_node_put(dev->platform_data); 62 + } 63 + 64 + static ssize_t viodev_show_devspec(struct device *dev, 65 + struct device_attribute *attr, char *buf) 66 + { 67 + struct device_node *of_node = dev->platform_data; 68 + 69 + return sprintf(buf, "%s\n", of_node->full_name); 70 + } 71 + DEVICE_ATTR(devspec, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_devspec, NULL); 72 + 73 + static void vio_unregister_device_pseries(struct vio_dev *viodev) 74 + { 75 + device_remove_file(&viodev->dev, &dev_attr_devspec); 76 + } 77 + 78 + /** 79 + * vio_bus_init_pseries: - Initialize the pSeries virtual IO bus 80 + */ 81 + static int __init vio_bus_init_pseries(void) 82 + { 83 + int err; 84 + 85 + err = vio_bus_init(vio_match_device_pseries, 86 + vio_unregister_device_pseries, 87 + vio_release_device_pseries); 88 + if (err == 0) 89 + probe_bus_pseries(); 90 + return err; 91 + } 92 + 93 + __initcall(vio_bus_init_pseries); 94 + 95 + /** 96 + * vio_build_iommu_table: - gets the dma information from OF and 97 + * builds the TCE tree. 98 + * @dev: the virtual device. 99 + * 100 + * Returns a pointer to the built tce tree, or NULL if it can't 101 + * find property. 102 + */ 103 + static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) 104 + { 105 + unsigned int *dma_window; 106 + struct iommu_table *newTceTable; 107 + unsigned long offset; 108 + int dma_window_property_size; 109 + 110 + dma_window = (unsigned int *) get_property(dev->dev.platform_data, "ibm,my-dma-window", &dma_window_property_size); 111 + if(!dma_window) { 112 + return NULL; 113 + } 114 + 115 + newTceTable = (struct iommu_table *) kmalloc(sizeof(struct iommu_table), GFP_KERNEL); 116 + 117 + /* There should be some code to extract the phys-encoded offset 118 + using prom_n_addr_cells(). However, according to a comment 119 + on earlier versions, it's always zero, so we don't bother */ 120 + offset = dma_window[1] >> PAGE_SHIFT; 121 + 122 + /* TCE table size - measured in tce entries */ 123 + newTceTable->it_size = dma_window[4] >> PAGE_SHIFT; 124 + /* offset for VIO should always be 0 */ 125 + newTceTable->it_offset = offset; 126 + newTceTable->it_busno = 0; 127 + newTceTable->it_index = (unsigned long)dma_window[0]; 128 + newTceTable->it_type = TCE_VB; 129 + 130 + return iommu_init_table(newTceTable); 131 + } 132 + 133 + /** 134 + * vio_register_device_node: - Register a new vio device. 135 + * @of_node: The OF node for this device. 136 + * 137 + * Creates and initializes a vio_dev structure from the data in 138 + * of_node (dev.platform_data) and adds it to the list of virtual devices. 139 + * Returns a pointer to the created vio_dev or NULL if node has 140 + * NULL device_type or compatible fields. 141 + */ 142 + struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node) 143 + { 144 + struct vio_dev *viodev; 145 + unsigned int *unit_address; 146 + unsigned int *irq_p; 147 + 148 + /* we need the 'device_type' property, in order to match with drivers */ 149 + if ((NULL == of_node->type)) { 150 + printk(KERN_WARNING 151 + "%s: node %s missing 'device_type'\n", __FUNCTION__, 152 + of_node->name ? of_node->name : "<unknown>"); 153 + return NULL; 154 + } 155 + 156 + unit_address = (unsigned int *)get_property(of_node, "reg", NULL); 157 + if (!unit_address) { 158 + printk(KERN_WARNING "%s: node %s missing 'reg'\n", __FUNCTION__, 159 + of_node->name ? of_node->name : "<unknown>"); 160 + return NULL; 161 + } 162 + 163 + /* allocate a vio_dev for this node */ 164 + viodev = kmalloc(sizeof(struct vio_dev), GFP_KERNEL); 165 + if (!viodev) { 166 + return NULL; 167 + } 168 + memset(viodev, 0, sizeof(struct vio_dev)); 169 + 170 + viodev->dev.platform_data = of_node_get(of_node); 171 + 172 + viodev->irq = NO_IRQ; 173 + irq_p = (unsigned int *)get_property(of_node, "interrupts", NULL); 174 + if (irq_p) { 175 + int virq = virt_irq_create_mapping(*irq_p); 176 + if (virq == NO_IRQ) { 177 + printk(KERN_ERR "Unable to allocate interrupt " 178 + "number for %s\n", of_node->full_name); 179 + } else 180 + viodev->irq = irq_offset_up(virq); 181 + } 182 + 183 + snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address); 184 + 185 + /* register with generic device framework */ 186 + if (vio_register_device_common(viodev, of_node->name, of_node->type, 187 + *unit_address, vio_build_iommu_table(viodev)) 188 + == NULL) { 189 + /* XXX free TCE table */ 190 + kfree(viodev); 191 + return NULL; 192 + } 193 + device_create_file(&viodev->dev, &dev_attr_devspec); 194 + 195 + return viodev; 196 + } 197 + EXPORT_SYMBOL(vio_register_device_node); 198 + 199 + /** 200 + * vio_get_attribute: - get attribute for virtual device 201 + * @vdev: The vio device to get property. 202 + * @which: The property/attribute to be extracted. 203 + * @length: Pointer to length of returned data size (unused if NULL). 204 + * 205 + * Calls prom.c's get_property() to return the value of the 206 + * attribute specified by the preprocessor constant @which 207 + */ 208 + const void * vio_get_attribute(struct vio_dev *vdev, void* which, int* length) 209 + { 210 + return get_property(vdev->dev.platform_data, (char*)which, length); 211 + } 212 + EXPORT_SYMBOL(vio_get_attribute); 213 + 214 + /* vio_find_name() - internal because only vio.c knows how we formatted the 215 + * kobject name 216 + * XXX once vio_bus_type.devices is actually used as a kset in 217 + * drivers/base/bus.c, this function should be removed in favor of 218 + * "device_find(kobj_name, &vio_bus_type)" 219 + */ 220 + static struct vio_dev *vio_find_name(const char *kobj_name) 221 + { 222 + struct kobject *found; 223 + 224 + found = kset_find_obj(&devices_subsys.kset, kobj_name); 225 + if (!found) 226 + return NULL; 227 + 228 + return to_vio_dev(container_of(found, struct device, kobj)); 229 + } 230 + 231 + /** 232 + * vio_find_node - find an already-registered vio_dev 233 + * @vnode: device_node of the virtual device we're looking for 234 + */ 235 + struct vio_dev *vio_find_node(struct device_node *vnode) 236 + { 237 + uint32_t *unit_address; 238 + char kobj_name[BUS_ID_SIZE]; 239 + 240 + /* construct the kobject name from the device node */ 241 + unit_address = (uint32_t *)get_property(vnode, "reg", NULL); 242 + if (!unit_address) 243 + return NULL; 244 + snprintf(kobj_name, BUS_ID_SIZE, "%x", *unit_address); 245 + 246 + return vio_find_name(kobj_name); 247 + } 248 + EXPORT_SYMBOL(vio_find_node); 249 + 250 + int vio_enable_interrupts(struct vio_dev *dev) 251 + { 252 + int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE); 253 + if (rc != H_Success) 254 + printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc); 255 + return rc; 256 + } 257 + EXPORT_SYMBOL(vio_enable_interrupts); 258 + 259 + int vio_disable_interrupts(struct vio_dev *dev) 260 + { 261 + int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE); 262 + if (rc != H_Success) 263 + printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc); 264 + return rc; 265 + } 266 + EXPORT_SYMBOL(vio_disable_interrupts);
+2 -2
arch/ppc64/kernel/pacaData.c
··· 78 78 79 79 #define BOOTCPU_PACA_INIT(number) \ 80 80 { \ 81 - PACA_INIT_COMMON(number, 1, 0, STAB0_VIRT_ADDR) \ 81 + PACA_INIT_COMMON(number, 1, 0, (u64)&initial_stab) \ 82 82 PACA_INIT_ISERIES(number) \ 83 83 } 84 84 ··· 90 90 91 91 #define BOOTCPU_PACA_INIT(number) \ 92 92 { \ 93 - PACA_INIT_COMMON(number, 1, STAB0_PHYS_ADDR, STAB0_VIRT_ADDR) \ 93 + PACA_INIT_COMMON(number, 1, STAB0_PHYS_ADDR, (u64)&initial_stab) \ 94 94 } 95 95 #endif 96 96
+2
arch/ppc64/kernel/pmac_setup.c
··· 71 71 #include <asm/of_device.h> 72 72 #include <asm/lmb.h> 73 73 #include <asm/smu.h> 74 + #include <asm/pmc.h> 74 75 75 76 #include "pmac.h" 76 77 #include "mpic.h" ··· 512 511 .progress = pmac_progress, 513 512 .check_legacy_ioport = pmac_check_legacy_ioport, 514 513 .idle_loop = native_idle, 514 + .enable_pmcs = power4_enable_pmcs, 515 515 };
+21
arch/ppc64/kernel/pmc.c
··· 65 65 spin_unlock(&pmc_owner_lock); 66 66 } 67 67 EXPORT_SYMBOL_GPL(release_pmc_hardware); 68 + 69 + void power4_enable_pmcs(void) 70 + { 71 + unsigned long hid0; 72 + 73 + hid0 = mfspr(HID0); 74 + hid0 |= 1UL << (63 - 20); 75 + 76 + /* POWER4 requires the following sequence */ 77 + asm volatile( 78 + "sync\n" 79 + "mtspr %1, %0\n" 80 + "mfspr %0, %1\n" 81 + "mfspr %0, %1\n" 82 + "mfspr %0, %1\n" 83 + "mfspr %0, %1\n" 84 + "mfspr %0, %1\n" 85 + "mfspr %0, %1\n" 86 + "isync" : "=&r" (hid0) : "i" (HID0), "0" (hid0): 87 + "memory"); 88 + }
+5 -7
arch/ppc64/kernel/process.c
··· 50 50 #include <asm/machdep.h> 51 51 #include <asm/iSeries/HvCallHpt.h> 52 52 #include <asm/cputable.h> 53 + #include <asm/firmware.h> 53 54 #include <asm/sections.h> 54 55 #include <asm/tlbflush.h> 55 56 #include <asm/time.h> ··· 203 202 new_thread = &new->thread; 204 203 old_thread = &current->thread; 205 204 206 - /* Collect purr utilization data per process and per processor wise */ 207 - /* purr is nothing but processor time base */ 208 - 209 - #if defined(CONFIG_PPC_PSERIES) 210 - if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { 205 + /* Collect purr utilization data per process and per processor 206 + * wise purr is nothing but processor time base 207 + */ 208 + if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 211 209 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 212 210 long unsigned start_tb, current_tb; 213 211 start_tb = old_thread->start_tb; ··· 214 214 old_thread->accum_tb += (current_tb - start_tb); 215 215 new_thread->start_tb = current_tb; 216 216 } 217 - #endif 218 - 219 217 220 218 local_irq_save(flags); 221 219 last = _switch(old_thread, new_thread);
+152 -32
arch/ppc64/kernel/prom.c
··· 625 625 626 626 static inline char *find_flat_dt_string(u32 offset) 627 627 { 628 - return ((char *)initial_boot_params) + initial_boot_params->off_dt_strings 629 - + offset; 628 + return ((char *)initial_boot_params) + 629 + initial_boot_params->off_dt_strings + offset; 630 630 } 631 631 632 632 /** ··· 635 635 * unflatten the tree 636 636 */ 637 637 static int __init scan_flat_dt(int (*it)(unsigned long node, 638 - const char *full_path, void *data), 638 + const char *uname, int depth, 639 + void *data), 639 640 void *data) 640 641 { 641 642 unsigned long p = ((unsigned long)initial_boot_params) + 642 643 initial_boot_params->off_dt_struct; 643 644 int rc = 0; 645 + int depth = -1; 644 646 645 647 do { 646 648 u32 tag = *((u32 *)p); 647 649 char *pathp; 648 650 649 651 p += 4; 650 - if (tag == OF_DT_END_NODE) 652 + if (tag == OF_DT_END_NODE) { 653 + depth --; 654 + continue; 655 + } 656 + if (tag == OF_DT_NOP) 651 657 continue; 652 658 if (tag == OF_DT_END) 653 659 break; 654 660 if (tag == OF_DT_PROP) { 655 661 u32 sz = *((u32 *)p); 656 662 p += 8; 657 - p = _ALIGN(p, sz >= 8 ? 8 : 4); 663 + if (initial_boot_params->version < 0x10) 664 + p = _ALIGN(p, sz >= 8 ? 8 : 4); 658 665 p += sz; 659 666 p = _ALIGN(p, 4); 660 667 continue; ··· 671 664 " device tree !\n", tag); 672 665 return -EINVAL; 673 666 } 667 + depth++; 674 668 pathp = (char *)p; 675 669 p = _ALIGN(p + strlen(pathp) + 1, 4); 676 - rc = it(p, pathp, data); 670 + if ((*pathp) == '/') { 671 + char *lp, *np; 672 + for (lp = NULL, np = pathp; *np; np++) 673 + if ((*np) == '/') 674 + lp = np+1; 675 + if (lp != NULL) 676 + pathp = lp; 677 + } 678 + rc = it(p, pathp, depth, data); 677 679 if (rc != 0) 678 680 break; 679 681 } while(1); ··· 705 689 const char *nstr; 706 690 707 691 p += 4; 692 + if (tag == OF_DT_NOP) 693 + continue; 708 694 if (tag != OF_DT_PROP) 709 695 return NULL; 710 696 711 697 sz = *((u32 *)p); 712 698 noff = *((u32 *)(p + 4)); 713 699 p += 8; 714 - p = _ALIGN(p, sz >= 8 ? 8 : 4); 700 + if (initial_boot_params->version < 0x10) 701 + p = _ALIGN(p, sz >= 8 ? 8 : 4); 715 702 716 703 nstr = find_flat_dt_string(noff); 717 704 if (nstr == NULL) { 718 - printk(KERN_WARNING "Can't find property index name !\n"); 705 + printk(KERN_WARNING "Can't find property index" 706 + " name !\n"); 719 707 return NULL; 720 708 } 721 709 if (strcmp(name, nstr) == 0) { ··· 733 713 } 734 714 735 715 static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size, 736 - unsigned long align) 716 + unsigned long align) 737 717 { 738 718 void *res; 739 719 ··· 747 727 static unsigned long __init unflatten_dt_node(unsigned long mem, 748 728 unsigned long *p, 749 729 struct device_node *dad, 750 - struct device_node ***allnextpp) 730 + struct device_node ***allnextpp, 731 + unsigned long fpsize) 751 732 { 752 733 struct device_node *np; 753 734 struct property *pp, **prev_pp = NULL; 754 735 char *pathp; 755 736 u32 tag; 756 - unsigned int l; 737 + unsigned int l, allocl; 738 + int has_name = 0; 739 + int new_format = 0; 757 740 758 741 tag = *((u32 *)(*p)); 759 742 if (tag != OF_DT_BEGIN_NODE) { ··· 765 742 } 766 743 *p += 4; 767 744 pathp = (char *)*p; 768 - l = strlen(pathp) + 1; 745 + l = allocl = strlen(pathp) + 1; 769 746 *p = _ALIGN(*p + l, 4); 770 747 771 - np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + l, 748 + /* version 0x10 has a more compact unit name here instead of the full 749 + * path. we accumulate the full path size using "fpsize", we'll rebuild 750 + * it later. We detect this because the first character of the name is 751 + * not '/'. 752 + */ 753 + if ((*pathp) != '/') { 754 + new_format = 1; 755 + if (fpsize == 0) { 756 + /* root node: special case. fpsize accounts for path 757 + * plus terminating zero. root node only has '/', so 758 + * fpsize should be 2, but we want to avoid the first 759 + * level nodes to have two '/' so we use fpsize 1 here 760 + */ 761 + fpsize = 1; 762 + allocl = 2; 763 + } else { 764 + /* account for '/' and path size minus terminal 0 765 + * already in 'l' 766 + */ 767 + fpsize += l; 768 + allocl = fpsize; 769 + } 770 + } 771 + 772 + 773 + np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl, 772 774 __alignof__(struct device_node)); 773 775 if (allnextpp) { 774 776 memset(np, 0, sizeof(*np)); 775 777 np->full_name = ((char*)np) + sizeof(struct device_node); 776 - memcpy(np->full_name, pathp, l); 778 + if (new_format) { 779 + char *p = np->full_name; 780 + /* rebuild full path for new format */ 781 + if (dad && dad->parent) { 782 + strcpy(p, dad->full_name); 783 + #ifdef DEBUG 784 + if ((strlen(p) + l + 1) != allocl) { 785 + DBG("%s: p: %d, l: %d, a: %d\n", 786 + pathp, strlen(p), l, allocl); 787 + } 788 + #endif 789 + p += strlen(p); 790 + } 791 + *(p++) = '/'; 792 + memcpy(p, pathp, l); 793 + } else 794 + memcpy(np->full_name, pathp, l); 777 795 prev_pp = &np->properties; 778 796 **allnextpp = np; 779 797 *allnextpp = &np->allnext; 780 798 if (dad != NULL) { 781 799 np->parent = dad; 782 - /* we temporarily use the `next' field as `last_child'. */ 800 + /* we temporarily use the next field as `last_child'*/ 783 801 if (dad->next == 0) 784 802 dad->child = np; 785 803 else ··· 834 770 char *pname; 835 771 836 772 tag = *((u32 *)(*p)); 773 + if (tag == OF_DT_NOP) { 774 + *p += 4; 775 + continue; 776 + } 837 777 if (tag != OF_DT_PROP) 838 778 break; 839 779 *p += 4; 840 780 sz = *((u32 *)(*p)); 841 781 noff = *((u32 *)((*p) + 4)); 842 - *p = _ALIGN((*p) + 8, sz >= 8 ? 8 : 4); 782 + *p += 8; 783 + if (initial_boot_params->version < 0x10) 784 + *p = _ALIGN(*p, sz >= 8 ? 8 : 4); 843 785 844 786 pname = find_flat_dt_string(noff); 845 787 if (pname == NULL) { 846 788 printk("Can't find property name in list !\n"); 847 789 break; 848 790 } 791 + if (strcmp(pname, "name") == 0) 792 + has_name = 1; 849 793 l = strlen(pname) + 1; 850 794 pp = unflatten_dt_alloc(&mem, sizeof(struct property), 851 795 __alignof__(struct property)); ··· 873 801 } 874 802 *p = _ALIGN((*p) + sz, 4); 875 803 } 804 + /* with version 0x10 we may not have the name property, recreate 805 + * it here from the unit name if absent 806 + */ 807 + if (!has_name) { 808 + char *p = pathp, *ps = pathp, *pa = NULL; 809 + int sz; 810 + 811 + while (*p) { 812 + if ((*p) == '@') 813 + pa = p; 814 + if ((*p) == '/') 815 + ps = p + 1; 816 + p++; 817 + } 818 + if (pa < ps) 819 + pa = p; 820 + sz = (pa - ps) + 1; 821 + pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz, 822 + __alignof__(struct property)); 823 + if (allnextpp) { 824 + pp->name = "name"; 825 + pp->length = sz; 826 + pp->value = (unsigned char *)(pp + 1); 827 + *prev_pp = pp; 828 + prev_pp = &pp->next; 829 + memcpy(pp->value, ps, sz - 1); 830 + ((char *)pp->value)[sz - 1] = 0; 831 + DBG("fixed up name for %s -> %s\n", pathp, pp->value); 832 + } 833 + } 876 834 if (allnextpp) { 877 835 *prev_pp = NULL; 878 836 np->name = get_property(np, "name", NULL); ··· 914 812 np->type = "<NULL>"; 915 813 } 916 814 while (tag == OF_DT_BEGIN_NODE) { 917 - mem = unflatten_dt_node(mem, p, np, allnextpp); 815 + mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize); 918 816 tag = *((u32 *)(*p)); 919 817 } 920 818 if (tag != OF_DT_END_NODE) { 921 - printk("Weird tag at start of node: %x\n", tag); 819 + printk("Weird tag at end of node: %x\n", tag); 922 820 return mem; 923 821 } 924 822 *p += 4; ··· 944 842 /* First pass, scan for size */ 945 843 start = ((unsigned long)initial_boot_params) + 946 844 initial_boot_params->off_dt_struct; 947 - size = unflatten_dt_node(0, &start, NULL, NULL); 845 + size = unflatten_dt_node(0, &start, NULL, NULL, 0); 846 + size = (size | 3) + 1; 948 847 949 848 DBG(" size is %lx, allocating...\n", size); 950 849 951 850 /* Allocate memory for the expanded device tree */ 952 - mem = (unsigned long)abs_to_virt(lmb_alloc(size, 953 - __alignof__(struct device_node))); 851 + mem = lmb_alloc(size + 4, __alignof__(struct device_node)); 852 + if (!mem) { 853 + DBG("Couldn't allocate memory with lmb_alloc()!\n"); 854 + panic("Couldn't allocate memory with lmb_alloc()!\n"); 855 + } 856 + mem = (unsigned long)abs_to_virt(mem); 857 + 858 + ((u32 *)mem)[size / 4] = 0xdeadbeef; 859 + 954 860 DBG(" unflattening...\n", mem); 955 861 956 862 /* Second pass, do actual unflattening */ 957 863 start = ((unsigned long)initial_boot_params) + 958 864 initial_boot_params->off_dt_struct; 959 - unflatten_dt_node(mem, &start, NULL, &allnextp); 865 + unflatten_dt_node(mem, &start, NULL, &allnextp, 0); 960 866 if (*((u32 *)start) != OF_DT_END) 961 - printk(KERN_WARNING "Weird tag at end of tree: %x\n", *((u32 *)start)); 867 + printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start)); 868 + if (((u32 *)mem)[size / 4] != 0xdeadbeef) 869 + printk(KERN_WARNING "End of tree marker overwritten: %08x\n", 870 + ((u32 *)mem)[size / 4] ); 962 871 *allnextp = NULL; 963 872 964 873 /* Get pointer to OF "/chosen" node for use everywhere */ ··· 993 880 994 881 995 882 static int __init early_init_dt_scan_cpus(unsigned long node, 996 - const char *full_path, void *data) 883 + const char *uname, int depth, void *data) 997 884 { 998 885 char *type = get_flat_dt_prop(node, "device_type", NULL); 999 886 u32 *prop; ··· 1060 947 } 1061 948 1062 949 static int __init early_init_dt_scan_chosen(unsigned long node, 1063 - const char *full_path, void *data) 950 + const char *uname, int depth, void *data) 1064 951 { 1065 952 u32 *prop; 1066 953 u64 *prop64; 1067 954 extern unsigned long memory_limit, tce_alloc_start, tce_alloc_end; 1068 955 1069 - if (strcmp(full_path, "/chosen") != 0) 956 + DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname); 957 + 958 + if (depth != 1 || strcmp(uname, "chosen") != 0) 1070 959 return 0; 1071 960 1072 961 /* get platform type */ ··· 1118 1003 } 1119 1004 1120 1005 static int __init early_init_dt_scan_root(unsigned long node, 1121 - const char *full_path, void *data) 1006 + const char *uname, int depth, void *data) 1122 1007 { 1123 1008 u32 *prop; 1124 1009 1125 - if (strcmp(full_path, "/") != 0) 1010 + if (depth != 0) 1126 1011 return 0; 1127 1012 1128 1013 prop = (u32 *)get_flat_dt_prop(node, "#size-cells", NULL); 1129 1014 dt_root_size_cells = (prop == NULL) ? 1 : *prop; 1130 - 1015 + DBG("dt_root_size_cells = %x\n", dt_root_size_cells); 1016 + 1131 1017 prop = (u32 *)get_flat_dt_prop(node, "#address-cells", NULL); 1132 1018 dt_root_addr_cells = (prop == NULL) ? 2 : *prop; 1019 + DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells); 1133 1020 1134 1021 /* break now */ 1135 1022 return 1; ··· 1159 1042 1160 1043 1161 1044 static int __init early_init_dt_scan_memory(unsigned long node, 1162 - const char *full_path, void *data) 1045 + const char *uname, int depth, void *data) 1163 1046 { 1164 1047 char *type = get_flat_dt_prop(node, "device_type", NULL); 1165 1048 cell_t *reg, *endp; ··· 1175 1058 1176 1059 endp = reg + (l / sizeof(cell_t)); 1177 1060 1178 - DBG("memory scan node %s ...\n", full_path); 1061 + DBG("memory scan node %s ..., reg size %ld, data: %x %x %x %x, ...\n", 1062 + uname, l, reg[0], reg[1], reg[2], reg[3]); 1063 + 1179 1064 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { 1180 1065 unsigned long base, size; 1181 1066 ··· 1588 1469 struct device_node *np = allnodes; 1589 1470 1590 1471 read_lock(&devtree_lock); 1591 - for (; np != 0; np = np->allnext) 1472 + for (; np != 0; np = np->allnext) { 1592 1473 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0 1593 1474 && of_node_get(np)) 1594 1475 break; 1476 + } 1595 1477 read_unlock(&devtree_lock); 1596 1478 return np; 1597 1479 }
+55 -33
arch/ppc64/kernel/prom_init.c
··· 1534 1534 */ 1535 1535 #define MAX_PROPERTY_NAME 64 1536 1536 1537 - static void __init scan_dt_build_strings(phandle node, unsigned long *mem_start, 1537 + static void __init scan_dt_build_strings(phandle node, 1538 + unsigned long *mem_start, 1538 1539 unsigned long *mem_end) 1539 1540 { 1540 1541 unsigned long offset = reloc_offset(); ··· 1548 1547 /* get and store all property names */ 1549 1548 prev_name = RELOC(""); 1550 1549 for (;;) { 1551 - int rc; 1552 - 1553 1550 /* 64 is max len of name including nul. */ 1554 1551 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1); 1555 - rc = call_prom("nextprop", 3, 1, node, prev_name, namep); 1556 - if (rc != 1) { 1552 + if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) { 1557 1553 /* No more nodes: unwind alloc */ 1558 1554 *mem_start = (unsigned long)namep; 1559 1555 break; 1560 1556 } 1557 + 1558 + /* skip "name" */ 1559 + if (strcmp(namep, RELOC("name")) == 0) { 1560 + *mem_start = (unsigned long)namep; 1561 + prev_name = RELOC("name"); 1562 + continue; 1563 + } 1564 + /* get/create string entry */ 1561 1565 soff = dt_find_string(namep); 1562 1566 if (soff != 0) { 1563 1567 *mem_start = (unsigned long)namep; ··· 1577 1571 1578 1572 /* do all our children */ 1579 1573 child = call_prom("child", 1, 1, node); 1580 - while (child != (phandle)0) { 1574 + while (child != 0) { 1581 1575 scan_dt_build_strings(child, mem_start, mem_end); 1582 1576 child = call_prom("peer", 1, 1, child); 1583 1577 } ··· 1586 1580 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, 1587 1581 unsigned long *mem_end) 1588 1582 { 1589 - int l, align; 1590 1583 phandle child; 1591 - char *namep, *prev_name, *sstart, *p, *ep; 1584 + char *namep, *prev_name, *sstart, *p, *ep, *lp, *path; 1592 1585 unsigned long soff; 1593 1586 unsigned char *valp; 1594 1587 unsigned long offset = reloc_offset(); 1595 - char pname[MAX_PROPERTY_NAME]; 1596 - char *path; 1597 - 1598 - path = RELOC(prom_scratch); 1588 + static char pname[MAX_PROPERTY_NAME]; 1589 + int l; 1599 1590 1600 1591 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end); 1601 1592 ··· 1602 1599 namep, *mem_end - *mem_start); 1603 1600 if (l >= 0) { 1604 1601 /* Didn't fit? Get more room. */ 1605 - if (l+1 > *mem_end - *mem_start) { 1602 + if ((l+1) > (*mem_end - *mem_start)) { 1606 1603 namep = make_room(mem_start, mem_end, l+1, 1); 1607 1604 call_prom("package-to-path", 3, 1, node, namep, l); 1608 1605 } 1609 1606 namep[l] = '\0'; 1607 + 1610 1608 /* Fixup an Apple bug where they have bogus \0 chars in the 1611 1609 * middle of the path in some properties 1612 1610 */ 1613 1611 for (p = namep, ep = namep + l; p < ep; p++) 1614 1612 if (*p == '\0') { 1615 1613 memmove(p, p+1, ep - p); 1616 - ep--; l--; 1614 + ep--; l--; p--; 1617 1615 } 1618 - *mem_start = _ALIGN(((unsigned long) namep) + strlen(namep) + 1, 4); 1616 + 1617 + /* now try to extract the unit name in that mess */ 1618 + for (p = namep, lp = NULL; *p; p++) 1619 + if (*p == '/') 1620 + lp = p + 1; 1621 + if (lp != NULL) 1622 + memmove(namep, lp, strlen(lp) + 1); 1623 + *mem_start = _ALIGN(((unsigned long) namep) + 1624 + strlen(namep) + 1, 4); 1619 1625 } 1620 1626 1621 1627 /* get it again for debugging */ 1628 + path = RELOC(prom_scratch); 1622 1629 memset(path, 0, PROM_SCRATCH_SIZE); 1623 1630 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1); 1624 1631 ··· 1636 1623 prev_name = RELOC(""); 1637 1624 sstart = (char *)RELOC(dt_string_start); 1638 1625 for (;;) { 1639 - int rc; 1640 - 1641 - rc = call_prom("nextprop", 3, 1, node, prev_name, pname); 1642 - if (rc != 1) 1626 + if (call_prom("nextprop", 3, 1, node, prev_name, 1627 + RELOC(pname)) != 1) 1643 1628 break; 1644 1629 1630 + /* skip "name" */ 1631 + if (strcmp(RELOC(pname), RELOC("name")) == 0) { 1632 + prev_name = RELOC("name"); 1633 + continue; 1634 + } 1635 + 1645 1636 /* find string offset */ 1646 - soff = dt_find_string(pname); 1637 + soff = dt_find_string(RELOC(pname)); 1647 1638 if (soff == 0) { 1648 - prom_printf("WARNING: Can't find string index for <%s>, node %s\n", 1649 - pname, path); 1639 + prom_printf("WARNING: Can't find string index for" 1640 + " <%s>, node %s\n", RELOC(pname), path); 1650 1641 break; 1651 1642 } 1652 1643 prev_name = sstart + soff; 1653 1644 1654 1645 /* get length */ 1655 - l = call_prom("getproplen", 2, 1, node, pname); 1646 + l = call_prom("getproplen", 2, 1, node, RELOC(pname)); 1656 1647 1657 1648 /* sanity checks */ 1658 1649 if (l == PROM_ERROR) ··· 1665 1648 prom_printf("WARNING: ignoring large property "); 1666 1649 /* It seems OF doesn't null-terminate the path :-( */ 1667 1650 prom_printf("[%s] ", path); 1668 - prom_printf("%s length 0x%x\n", pname, l); 1651 + prom_printf("%s length 0x%x\n", RELOC(pname), l); 1669 1652 continue; 1670 1653 } 1671 1654 ··· 1675 1658 dt_push_token(soff, mem_start, mem_end); 1676 1659 1677 1660 /* push property content */ 1678 - align = (l >= 8) ? 8 : 4; 1679 - valp = make_room(mem_start, mem_end, l, align); 1680 - call_prom("getprop", 4, 1, node, pname, valp, l); 1661 + valp = make_room(mem_start, mem_end, l, 4); 1662 + call_prom("getprop", 4, 1, node, RELOC(pname), valp, l); 1681 1663 *mem_start = _ALIGN(*mem_start, 4); 1682 1664 } 1683 1665 1684 1666 /* Add a "linux,phandle" property. */ 1685 1667 soff = dt_find_string(RELOC("linux,phandle")); 1686 1668 if (soff == 0) 1687 - prom_printf("WARNING: Can't find string index for <linux-phandle>" 1688 - " node %s\n", path); 1669 + prom_printf("WARNING: Can't find string index for" 1670 + " <linux-phandle> node %s\n", path); 1689 1671 else { 1690 1672 dt_push_token(OF_DT_PROP, mem_start, mem_end); 1691 1673 dt_push_token(4, mem_start, mem_end); ··· 1695 1679 1696 1680 /* do all our children */ 1697 1681 child = call_prom("child", 1, 1, node); 1698 - while (child != (phandle)0) { 1682 + while (child != 0) { 1699 1683 scan_dt_build_struct(child, mem_start, mem_end); 1700 1684 child = call_prom("peer", 1, 1, child); 1701 1685 } ··· 1734 1718 1735 1719 /* Build header and make room for mem rsv map */ 1736 1720 mem_start = _ALIGN(mem_start, 4); 1737 - hdr = make_room(&mem_start, &mem_end, sizeof(struct boot_param_header), 4); 1721 + hdr = make_room(&mem_start, &mem_end, 1722 + sizeof(struct boot_param_header), 4); 1738 1723 RELOC(dt_header_start) = (unsigned long)hdr; 1739 1724 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8); 1740 1725 ··· 1748 1731 namep = make_room(&mem_start, &mem_end, 16, 1); 1749 1732 strcpy(namep, RELOC("linux,phandle")); 1750 1733 mem_start = (unsigned long)namep + strlen(namep) + 1; 1751 - RELOC(dt_string_end) = mem_start; 1752 1734 1753 1735 /* Build string array */ 1754 1736 prom_printf("Building dt strings...\n"); 1755 1737 scan_dt_build_strings(root, &mem_start, &mem_end); 1738 + RELOC(dt_string_end) = mem_start; 1756 1739 1757 1740 /* Build structure */ 1758 1741 mem_start = PAGE_ALIGN(mem_start); ··· 1767 1750 hdr->totalsize = RELOC(dt_struct_end) - RELOC(dt_header_start); 1768 1751 hdr->off_dt_struct = RELOC(dt_struct_start) - RELOC(dt_header_start); 1769 1752 hdr->off_dt_strings = RELOC(dt_string_start) - RELOC(dt_header_start); 1753 + hdr->dt_strings_size = RELOC(dt_string_end) - RELOC(dt_string_start); 1770 1754 hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - RELOC(dt_header_start); 1771 1755 hdr->version = OF_DT_VERSION; 1772 - hdr->last_comp_version = 1; 1756 + /* Version 16 is not backward compatible */ 1757 + hdr->last_comp_version = 0x10; 1773 1758 1774 1759 /* Reserve the whole thing and copy the reserve map in, we 1775 1760 * also bump mem_reserve_cnt to cause further reservations to ··· 1827 1808 /* does it need fixup ? */ 1828 1809 if (prom_getproplen(i2c, "interrupts") > 0) 1829 1810 return; 1811 + 1812 + prom_printf("fixing up bogus interrupts for u3 i2c...\n"); 1813 + 1830 1814 /* interrupt on this revision of u3 is number 0 and level */ 1831 1815 interrupts[0] = 0; 1832 1816 interrupts[1] = 1;
+17 -2
arch/ppc64/kernel/rtas_pci.c
··· 58 58 return 0; 59 59 } 60 60 61 + static int of_device_available(struct device_node * dn) 62 + { 63 + char * status; 64 + 65 + status = get_property(dn, "status", NULL); 66 + 67 + if (!status) 68 + return 1; 69 + 70 + if (!strcmp(status, "okay")) 71 + return 1; 72 + 73 + return 0; 74 + } 75 + 61 76 static int rtas_read_config(struct device_node *dn, int where, int size, u32 *val) 62 77 { 63 78 int returnval = -1; ··· 118 103 119 104 /* Search only direct children of the bus */ 120 105 for (dn = busdn->child; dn; dn = dn->sibling) 121 - if (dn->devfn == devfn) 106 + if (dn->devfn == devfn && of_device_available(dn)) 122 107 return rtas_read_config(dn, where, size, val); 123 108 return PCIBIOS_DEVICE_NOT_FOUND; 124 109 } ··· 161 146 162 147 /* Search only direct children of the bus */ 163 148 for (dn = busdn->child; dn; dn = dn->sibling) 164 - if (dn->devfn == devfn) 149 + if (dn->devfn == devfn && of_device_available(dn)) 165 150 return rtas_write_config(dn, where, size, val); 166 151 return PCIBIOS_DEVICE_NOT_FOUND; 167 152 }
+17 -11
arch/ppc64/kernel/setup.c
··· 536 536 537 537 DBG(" -> check_for_initrd()\n"); 538 538 539 - prop = (u64 *)get_property(of_chosen, "linux,initrd-start", NULL); 540 - if (prop != NULL) { 541 - initrd_start = (unsigned long)__va(*prop); 542 - prop = (u64 *)get_property(of_chosen, "linux,initrd-end", NULL); 539 + if (of_chosen) { 540 + prop = (u64 *)get_property(of_chosen, 541 + "linux,initrd-start", NULL); 543 542 if (prop != NULL) { 544 - initrd_end = (unsigned long)__va(*prop); 545 - initrd_below_start_ok = 1; 546 - } else 547 - initrd_start = 0; 543 + initrd_start = (unsigned long)__va(*prop); 544 + prop = (u64 *)get_property(of_chosen, 545 + "linux,initrd-end", NULL); 546 + if (prop != NULL) { 547 + initrd_end = (unsigned long)__va(*prop); 548 + initrd_below_start_ok = 1; 549 + } else 550 + initrd_start = 0; 551 + } 548 552 } 549 553 550 554 /* If we were passed an initrd, set the ROOT_DEV properly if the values ··· 631 627 * Initialize xmon 632 628 */ 633 629 #ifdef CONFIG_XMON_DEFAULT 634 - xmon_init(); 630 + xmon_init(1); 635 631 #endif 636 632 /* 637 633 * Register early console ··· 1347 1343 /* ensure xmon is enabled */ 1348 1344 if (p) { 1349 1345 if (strncmp(p, "on", 2) == 0) 1350 - xmon_init(); 1346 + xmon_init(1); 1347 + if (strncmp(p, "off", 3) == 0) 1348 + xmon_init(0); 1351 1349 if (strncmp(p, "early", 5) != 0) 1352 1350 return 0; 1353 1351 } 1354 - xmon_init(); 1352 + xmon_init(1); 1355 1353 debugger(NULL); 1356 1354 1357 1355 return 0;
+5 -52
arch/ppc64/kernel/sysfs.c
··· 13 13 #include <asm/current.h> 14 14 #include <asm/processor.h> 15 15 #include <asm/cputable.h> 16 + #include <asm/firmware.h> 16 17 #include <asm/hvcall.h> 17 18 #include <asm/prom.h> 18 19 #include <asm/systemcfg.h> ··· 101 100 } 102 101 __setup("smt-snooze-delay=", setup_smt_snooze_delay); 103 102 103 + #endif /* CONFIG_PPC_MULTIPLATFORM */ 104 + 104 105 /* 105 106 * Enabling PMCs will slow partition context switch times so we only do 106 107 * it the first time we write to the PMCs. ··· 112 109 113 110 void ppc64_enable_pmcs(void) 114 111 { 115 - unsigned long hid0; 116 - #ifdef CONFIG_PPC_PSERIES 117 - unsigned long set, reset; 118 - #endif /* CONFIG_PPC_PSERIES */ 119 - 120 112 /* Only need to enable them once */ 121 113 if (__get_cpu_var(pmcs_enabled)) 122 114 return; 123 115 124 116 __get_cpu_var(pmcs_enabled) = 1; 125 117 126 - switch (systemcfg->platform) { 127 - case PLATFORM_PSERIES: 128 - case PLATFORM_POWERMAC: 129 - hid0 = mfspr(HID0); 130 - hid0 |= 1UL << (63 - 20); 131 - 132 - /* POWER4 requires the following sequence */ 133 - asm volatile( 134 - "sync\n" 135 - "mtspr %1, %0\n" 136 - "mfspr %0, %1\n" 137 - "mfspr %0, %1\n" 138 - "mfspr %0, %1\n" 139 - "mfspr %0, %1\n" 140 - "mfspr %0, %1\n" 141 - "mfspr %0, %1\n" 142 - "isync" : "=&r" (hid0) : "i" (HID0), "0" (hid0): 143 - "memory"); 144 - break; 145 - 146 - #ifdef CONFIG_PPC_PSERIES 147 - case PLATFORM_PSERIES_LPAR: 148 - set = 1UL << 63; 149 - reset = 0; 150 - plpar_hcall_norets(H_PERFMON, set, reset); 151 - break; 152 - #endif /* CONFIG_PPC_PSERIES */ 153 - 154 - default: 155 - break; 156 - } 157 - 158 - #ifdef CONFIG_PPC_PSERIES 159 - /* instruct hypervisor to maintain PMCs */ 160 - if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) 161 - get_paca()->lppaca.pmcregs_in_use = 1; 162 - #endif /* CONFIG_PPC_PSERIES */ 118 + if (ppc_md.enable_pmcs) 119 + ppc_md.enable_pmcs(); 163 120 } 164 - 165 - #else 166 - 167 - /* PMC stuff */ 168 - void ppc64_enable_pmcs(void) 169 - { 170 - /* XXX Implement for iseries */ 171 - } 172 - #endif /* CONFIG_PPC_MULTIPLATFORM */ 173 - 174 121 EXPORT_SYMBOL(ppc64_enable_pmcs); 175 122 176 123 /* XXX convert to rusty's on_one_cpu */
+3 -4
arch/ppc64/kernel/time.c
··· 67 67 #include <asm/prom.h> 68 68 #include <asm/sections.h> 69 69 #include <asm/systemcfg.h> 70 + #include <asm/firmware.h> 70 71 71 72 u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; 72 73 ··· 371 370 process_hvlpevents(regs); 372 371 #endif 373 372 374 - /* collect purr register values often, for accurate calculations */ 375 - #if defined(CONFIG_PPC_PSERIES) 376 - if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { 373 + /* collect purr register values often, for accurate calculations */ 374 + if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 377 375 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 378 376 cu->current_tb = mfspr(SPRN_PURR); 379 377 } 380 - #endif 381 378 382 379 irq_exit(); 383 380
+26 -381
arch/ppc64/kernel/vio.c
··· 1 1 /* 2 2 * IBM PowerPC Virtual I/O Infrastructure Support. 3 3 * 4 - * Copyright (c) 2003 IBM Corp. 4 + * Copyright (c) 2003-2005 IBM Corp. 5 5 * Dave Engebretsen engebret@us.ibm.com 6 6 * Santiago Leon santil@us.ibm.com 7 7 * Hollis Blanchard <hollisb@us.ibm.com> 8 + * Stephen Rothwell 8 9 * 9 10 * This program is free software; you can redistribute it and/or 10 11 * modify it under the terms of the GNU General Public License ··· 15 14 16 15 #include <linux/init.h> 17 16 #include <linux/console.h> 18 - #include <linux/version.h> 19 17 #include <linux/module.h> 20 - #include <linux/kobject.h> 21 18 #include <linux/mm.h> 22 19 #include <linux/dma-mapping.h> 23 - #include <asm/rtas.h> 24 20 #include <asm/iommu.h> 25 21 #include <asm/dma.h> 26 - #include <asm/ppcdebug.h> 27 22 #include <asm/vio.h> 28 - #include <asm/hvcall.h> 29 - #include <asm/iSeries/vio.h> 30 - #include <asm/iSeries/HvTypes.h> 31 - #include <asm/iSeries/HvCallXm.h> 32 - #include <asm/iSeries/HvLpConfig.h> 33 - 34 - #define DBGENTER() pr_debug("%s entered\n", __FUNCTION__) 35 - 36 - extern struct subsystem devices_subsys; /* needed for vio_find_name() */ 37 23 38 24 static const struct vio_device_id *vio_match_device( 39 25 const struct vio_device_id *, const struct vio_dev *); 40 26 41 - #ifdef CONFIG_PPC_PSERIES 42 - static struct iommu_table *vio_build_iommu_table(struct vio_dev *); 43 - static int vio_num_address_cells; 44 - #endif 45 - #ifdef CONFIG_PPC_ISERIES 46 - static struct iommu_table veth_iommu_table; 47 - static struct iommu_table vio_iommu_table; 48 - #endif 49 - static struct vio_dev vio_bus_device = { /* fake "parent" device */ 27 + struct vio_dev vio_bus_device = { /* fake "parent" device */ 50 28 .name = vio_bus_device.dev.bus_id, 51 29 .type = "", 52 - #ifdef CONFIG_PPC_ISERIES 53 - .iommu_table = &vio_iommu_table, 54 - #endif 55 30 .dev.bus_id = "vio", 56 31 .dev.bus = &vio_bus_type, 57 32 }; 58 33 59 - #ifdef CONFIG_PPC_ISERIES 60 - static struct vio_dev *__init vio_register_device_iseries(char *type, 61 - uint32_t unit_num); 62 - 63 - struct device *iSeries_vio_dev = &vio_bus_device.dev; 64 - EXPORT_SYMBOL(iSeries_vio_dev); 65 - 66 - #define device_is_compatible(a, b) 1 67 - 68 - #endif 34 + static int (*is_match)(const struct vio_device_id *id, 35 + const struct vio_dev *dev); 36 + static void (*unregister_device_callback)(struct vio_dev *dev); 37 + static void (*release_device_callback)(struct device *dev); 69 38 70 39 /* convert from struct device to struct vio_dev and pass to driver. 71 40 * dev->driver has already been set by generic code because vio_bus_match ··· 46 75 struct vio_driver *viodrv = to_vio_driver(dev->driver); 47 76 const struct vio_device_id *id; 48 77 int error = -ENODEV; 49 - 50 - DBGENTER(); 51 78 52 79 if (!viodrv->probe) 53 80 return error; ··· 63 94 { 64 95 struct vio_dev *viodev = to_vio_dev(dev); 65 96 struct vio_driver *viodrv = to_vio_driver(dev->driver); 66 - 67 - DBGENTER(); 68 97 69 98 if (viodrv->remove) { 70 99 return viodrv->remove(viodev); ··· 113 146 static const struct vio_device_id * vio_match_device(const struct vio_device_id *ids, 114 147 const struct vio_dev *dev) 115 148 { 116 - DBGENTER(); 117 - 118 149 while (ids->type) { 119 - if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) && 120 - device_is_compatible(dev->dev.platform_data, ids->compat)) 150 + if (is_match(ids, dev)) 121 151 return ids; 122 152 ids++; 123 153 } 124 154 return NULL; 125 155 } 126 156 127 - #ifdef CONFIG_PPC_ISERIES 128 - void __init iommu_vio_init(void) 129 - { 130 - struct iommu_table *t; 131 - struct iommu_table_cb cb; 132 - unsigned long cbp; 133 - unsigned long itc_entries; 134 - 135 - cb.itc_busno = 255; /* Bus 255 is the virtual bus */ 136 - cb.itc_virtbus = 0xff; /* Ask for virtual bus */ 137 - 138 - cbp = virt_to_abs(&cb); 139 - HvCallXm_getTceTableParms(cbp); 140 - 141 - itc_entries = cb.itc_size * PAGE_SIZE / sizeof(union tce_entry); 142 - veth_iommu_table.it_size = itc_entries / 2; 143 - veth_iommu_table.it_busno = cb.itc_busno; 144 - veth_iommu_table.it_offset = cb.itc_offset; 145 - veth_iommu_table.it_index = cb.itc_index; 146 - veth_iommu_table.it_type = TCE_VB; 147 - veth_iommu_table.it_blocksize = 1; 148 - 149 - t = iommu_init_table(&veth_iommu_table); 150 - 151 - if (!t) 152 - printk("Virtual Bus VETH TCE table failed.\n"); 153 - 154 - vio_iommu_table.it_size = itc_entries - veth_iommu_table.it_size; 155 - vio_iommu_table.it_busno = cb.itc_busno; 156 - vio_iommu_table.it_offset = cb.itc_offset + 157 - veth_iommu_table.it_size; 158 - vio_iommu_table.it_index = cb.itc_index; 159 - vio_iommu_table.it_type = TCE_VB; 160 - vio_iommu_table.it_blocksize = 1; 161 - 162 - t = iommu_init_table(&vio_iommu_table); 163 - 164 - if (!t) 165 - printk("Virtual Bus VIO TCE table failed.\n"); 166 - } 167 - #endif 168 - 169 - #ifdef CONFIG_PPC_PSERIES 170 - static void probe_bus_pseries(void) 171 - { 172 - struct device_node *node_vroot, *of_node; 173 - 174 - node_vroot = find_devices("vdevice"); 175 - if ((node_vroot == NULL) || (node_vroot->child == NULL)) 176 - /* this machine doesn't do virtual IO, and that's ok */ 177 - return; 178 - 179 - vio_num_address_cells = prom_n_addr_cells(node_vroot->child); 180 - 181 - /* 182 - * Create struct vio_devices for each virtual device in the device tree. 183 - * Drivers will associate with them later. 184 - */ 185 - for (of_node = node_vroot->child; of_node != NULL; 186 - of_node = of_node->sibling) { 187 - printk(KERN_DEBUG "%s: processing %p\n", __FUNCTION__, of_node); 188 - vio_register_device_node(of_node); 189 - } 190 - } 191 - #endif 192 - 193 - #ifdef CONFIG_PPC_ISERIES 194 - static void probe_bus_iseries(void) 195 - { 196 - HvLpIndexMap vlan_map = HvLpConfig_getVirtualLanIndexMap(); 197 - struct vio_dev *viodev; 198 - int i; 199 - 200 - /* there is only one of each of these */ 201 - vio_register_device_iseries("viocons", 0); 202 - vio_register_device_iseries("vscsi", 0); 203 - 204 - vlan_map = HvLpConfig_getVirtualLanIndexMap(); 205 - for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) { 206 - if ((vlan_map & (0x8000 >> i)) == 0) 207 - continue; 208 - viodev = vio_register_device_iseries("vlan", i); 209 - /* veth is special and has it own iommu_table */ 210 - viodev->iommu_table = &veth_iommu_table; 211 - } 212 - for (i = 0; i < HVMAXARCHITECTEDVIRTUALDISKS; i++) 213 - vio_register_device_iseries("viodasd", i); 214 - for (i = 0; i < HVMAXARCHITECTEDVIRTUALCDROMS; i++) 215 - vio_register_device_iseries("viocd", i); 216 - for (i = 0; i < HVMAXARCHITECTEDVIRTUALTAPES; i++) 217 - vio_register_device_iseries("viotape", i); 218 - } 219 - #endif 220 - 221 157 /** 222 158 * vio_bus_init: - Initialize the virtual IO bus 223 159 */ 224 - static int __init vio_bus_init(void) 160 + int __init vio_bus_init(int (*match_func)(const struct vio_device_id *id, 161 + const struct vio_dev *dev), 162 + void (*unregister_dev)(struct vio_dev *), 163 + void (*release_dev)(struct device *)) 225 164 { 226 165 int err; 166 + 167 + is_match = match_func; 168 + unregister_device_callback = unregister_dev; 169 + release_device_callback = release_dev; 227 170 228 171 err = bus_register(&vio_bus_type); 229 172 if (err) { ··· 141 264 return err; 142 265 } 143 266 144 - /* the fake parent of all vio devices, just to give us a nice directory */ 267 + /* the fake parent of all vio devices, just to give us 268 + * a nice directory 269 + */ 145 270 err = device_register(&vio_bus_device.dev); 146 271 if (err) { 147 - printk(KERN_WARNING "%s: device_register returned %i\n", __FUNCTION__, 148 - err); 272 + printk(KERN_WARNING "%s: device_register returned %i\n", 273 + __FUNCTION__, err); 149 274 return err; 150 275 } 151 - 152 - #ifdef CONFIG_PPC_PSERIES 153 - probe_bus_pseries(); 154 - #endif 155 - #ifdef CONFIG_PPC_ISERIES 156 - probe_bus_iseries(); 157 - #endif 158 276 159 277 return 0; 160 278 } 161 279 162 - __initcall(vio_bus_init); 163 - 164 280 /* vio_dev refcount hit 0 */ 165 281 static void __devinit vio_dev_release(struct device *dev) 166 282 { 167 - DBGENTER(); 168 - 169 - #ifdef CONFIG_PPC_PSERIES 170 - /* XXX free TCE table */ 171 - of_node_put(dev->platform_data); 172 - #endif 283 + if (release_device_callback) 284 + release_device_callback(dev); 173 285 kfree(to_vio_dev(dev)); 174 286 } 175 - 176 - #ifdef CONFIG_PPC_PSERIES 177 - static ssize_t viodev_show_devspec(struct device *dev, struct device_attribute *attr, char *buf) 178 - { 179 - struct device_node *of_node = dev->platform_data; 180 - 181 - return sprintf(buf, "%s\n", of_node->full_name); 182 - } 183 - DEVICE_ATTR(devspec, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_devspec, NULL); 184 - #endif 185 287 186 288 static ssize_t viodev_show_name(struct device *dev, struct device_attribute *attr, char *buf) 187 289 { ··· 168 312 } 169 313 DEVICE_ATTR(name, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_name, NULL); 170 314 171 - static struct vio_dev * __devinit vio_register_device_common( 315 + struct vio_dev * __devinit vio_register_device_common( 172 316 struct vio_dev *viodev, char *name, char *type, 173 317 uint32_t unit_address, struct iommu_table *iommu_table) 174 318 { 175 - DBGENTER(); 176 - 177 319 viodev->name = name; 178 320 viodev->type = type; 179 321 viodev->unit_address = unit_address; ··· 192 338 return viodev; 193 339 } 194 340 195 - #ifdef CONFIG_PPC_PSERIES 196 - /** 197 - * vio_register_device_node: - Register a new vio device. 198 - * @of_node: The OF node for this device. 199 - * 200 - * Creates and initializes a vio_dev structure from the data in 201 - * of_node (dev.platform_data) and adds it to the list of virtual devices. 202 - * Returns a pointer to the created vio_dev or NULL if node has 203 - * NULL device_type or compatible fields. 204 - */ 205 - struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node) 206 - { 207 - struct vio_dev *viodev; 208 - unsigned int *unit_address; 209 - unsigned int *irq_p; 210 - 211 - DBGENTER(); 212 - 213 - /* we need the 'device_type' property, in order to match with drivers */ 214 - if ((NULL == of_node->type)) { 215 - printk(KERN_WARNING 216 - "%s: node %s missing 'device_type'\n", __FUNCTION__, 217 - of_node->name ? of_node->name : "<unknown>"); 218 - return NULL; 219 - } 220 - 221 - unit_address = (unsigned int *)get_property(of_node, "reg", NULL); 222 - if (!unit_address) { 223 - printk(KERN_WARNING "%s: node %s missing 'reg'\n", __FUNCTION__, 224 - of_node->name ? of_node->name : "<unknown>"); 225 - return NULL; 226 - } 227 - 228 - /* allocate a vio_dev for this node */ 229 - viodev = kmalloc(sizeof(struct vio_dev), GFP_KERNEL); 230 - if (!viodev) { 231 - return NULL; 232 - } 233 - memset(viodev, 0, sizeof(struct vio_dev)); 234 - 235 - viodev->dev.platform_data = of_node_get(of_node); 236 - 237 - viodev->irq = NO_IRQ; 238 - irq_p = (unsigned int *)get_property(of_node, "interrupts", NULL); 239 - if (irq_p) { 240 - int virq = virt_irq_create_mapping(*irq_p); 241 - if (virq == NO_IRQ) { 242 - printk(KERN_ERR "Unable to allocate interrupt " 243 - "number for %s\n", of_node->full_name); 244 - } else 245 - viodev->irq = irq_offset_up(virq); 246 - } 247 - 248 - snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address); 249 - 250 - /* register with generic device framework */ 251 - if (vio_register_device_common(viodev, of_node->name, of_node->type, 252 - *unit_address, vio_build_iommu_table(viodev)) 253 - == NULL) { 254 - /* XXX free TCE table */ 255 - kfree(viodev); 256 - return NULL; 257 - } 258 - device_create_file(&viodev->dev, &dev_attr_devspec); 259 - 260 - return viodev; 261 - } 262 - EXPORT_SYMBOL(vio_register_device_node); 263 - #endif 264 - 265 - #ifdef CONFIG_PPC_ISERIES 266 - /** 267 - * vio_register_device: - Register a new vio device. 268 - * @voidev: The device to register. 269 - */ 270 - static struct vio_dev *__init vio_register_device_iseries(char *type, 271 - uint32_t unit_num) 272 - { 273 - struct vio_dev *viodev; 274 - 275 - DBGENTER(); 276 - 277 - /* allocate a vio_dev for this node */ 278 - viodev = kmalloc(sizeof(struct vio_dev), GFP_KERNEL); 279 - if (!viodev) 280 - return NULL; 281 - memset(viodev, 0, sizeof(struct vio_dev)); 282 - 283 - snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%s%d", type, unit_num); 284 - 285 - return vio_register_device_common(viodev, viodev->dev.bus_id, type, 286 - unit_num, &vio_iommu_table); 287 - } 288 - #endif 289 - 290 341 void __devinit vio_unregister_device(struct vio_dev *viodev) 291 342 { 292 - DBGENTER(); 293 - #ifdef CONFIG_PPC_PSERIES 294 - device_remove_file(&viodev->dev, &dev_attr_devspec); 295 - #endif 343 + if (unregister_device_callback) 344 + unregister_device_callback(viodev); 296 345 device_remove_file(&viodev->dev, &dev_attr_name); 297 346 device_unregister(&viodev->dev); 298 347 } 299 348 EXPORT_SYMBOL(vio_unregister_device); 300 - 301 - #ifdef CONFIG_PPC_PSERIES 302 - /** 303 - * vio_get_attribute: - get attribute for virtual device 304 - * @vdev: The vio device to get property. 305 - * @which: The property/attribute to be extracted. 306 - * @length: Pointer to length of returned data size (unused if NULL). 307 - * 308 - * Calls prom.c's get_property() to return the value of the 309 - * attribute specified by the preprocessor constant @which 310 - */ 311 - const void * vio_get_attribute(struct vio_dev *vdev, void* which, int* length) 312 - { 313 - return get_property(vdev->dev.platform_data, (char*)which, length); 314 - } 315 - EXPORT_SYMBOL(vio_get_attribute); 316 - 317 - /* vio_find_name() - internal because only vio.c knows how we formatted the 318 - * kobject name 319 - * XXX once vio_bus_type.devices is actually used as a kset in 320 - * drivers/base/bus.c, this function should be removed in favor of 321 - * "device_find(kobj_name, &vio_bus_type)" 322 - */ 323 - static struct vio_dev *vio_find_name(const char *kobj_name) 324 - { 325 - struct kobject *found; 326 - 327 - found = kset_find_obj(&devices_subsys.kset, kobj_name); 328 - if (!found) 329 - return NULL; 330 - 331 - return to_vio_dev(container_of(found, struct device, kobj)); 332 - } 333 - 334 - /** 335 - * vio_find_node - find an already-registered vio_dev 336 - * @vnode: device_node of the virtual device we're looking for 337 - */ 338 - struct vio_dev *vio_find_node(struct device_node *vnode) 339 - { 340 - uint32_t *unit_address; 341 - char kobj_name[BUS_ID_SIZE]; 342 - 343 - /* construct the kobject name from the device node */ 344 - unit_address = (uint32_t *)get_property(vnode, "reg", NULL); 345 - if (!unit_address) 346 - return NULL; 347 - snprintf(kobj_name, BUS_ID_SIZE, "%x", *unit_address); 348 - 349 - return vio_find_name(kobj_name); 350 - } 351 - EXPORT_SYMBOL(vio_find_node); 352 - 353 - /** 354 - * vio_build_iommu_table: - gets the dma information from OF and builds the TCE tree. 355 - * @dev: the virtual device. 356 - * 357 - * Returns a pointer to the built tce tree, or NULL if it can't 358 - * find property. 359 - */ 360 - static struct iommu_table * vio_build_iommu_table(struct vio_dev *dev) 361 - { 362 - unsigned int *dma_window; 363 - struct iommu_table *newTceTable; 364 - unsigned long offset; 365 - int dma_window_property_size; 366 - 367 - dma_window = (unsigned int *) get_property(dev->dev.platform_data, "ibm,my-dma-window", &dma_window_property_size); 368 - if(!dma_window) { 369 - return NULL; 370 - } 371 - 372 - newTceTable = (struct iommu_table *) kmalloc(sizeof(struct iommu_table), GFP_KERNEL); 373 - 374 - /* There should be some code to extract the phys-encoded offset 375 - using prom_n_addr_cells(). However, according to a comment 376 - on earlier versions, it's always zero, so we don't bother */ 377 - offset = dma_window[1] >> PAGE_SHIFT; 378 - 379 - /* TCE table size - measured in tce entries */ 380 - newTceTable->it_size = dma_window[4] >> PAGE_SHIFT; 381 - /* offset for VIO should always be 0 */ 382 - newTceTable->it_offset = offset; 383 - newTceTable->it_busno = 0; 384 - newTceTable->it_index = (unsigned long)dma_window[0]; 385 - newTceTable->it_type = TCE_VB; 386 - 387 - return iommu_init_table(newTceTable); 388 - } 389 - 390 - int vio_enable_interrupts(struct vio_dev *dev) 391 - { 392 - int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE); 393 - if (rc != H_Success) { 394 - printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc); 395 - } 396 - return rc; 397 - } 398 - EXPORT_SYMBOL(vio_enable_interrupts); 399 - 400 - int vio_disable_interrupts(struct vio_dev *dev) 401 - { 402 - int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE); 403 - if (rc != H_Success) { 404 - printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc); 405 - } 406 - return rc; 407 - } 408 - EXPORT_SYMBOL(vio_disable_interrupts); 409 - #endif 410 349 411 350 static dma_addr_t vio_map_single(struct device *dev, void *vaddr, 412 351 size_t size, enum dma_data_direction direction) ··· 263 616 struct vio_driver *vio_drv = to_vio_driver(drv); 264 617 const struct vio_device_id *ids = vio_drv->id_table; 265 618 const struct vio_device_id *found_id; 266 - 267 - DBGENTER(); 268 619 269 620 if (!ids) 270 621 return 0;
+1 -2
arch/ppc64/mm/hash_native.c
··· 51 51 unsigned long prpn, unsigned long vflags, 52 52 unsigned long rflags) 53 53 { 54 - unsigned long arpn = physRpn_to_absRpn(prpn); 55 54 hpte_t *hptep = htab_address + hpte_group; 56 55 unsigned long hpte_v, hpte_r; 57 56 int i; ··· 73 74 hpte_v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID; 74 75 if (vflags & HPTE_V_LARGE) 75 76 va &= ~(1UL << HPTE_V_AVPN_SHIFT); 76 - hpte_r = (arpn << HPTE_R_RPN_SHIFT) | rflags; 77 + hpte_r = (prpn << HPTE_R_RPN_SHIFT) | rflags; 77 78 78 79 hptep->r = hpte_r; 79 80 /* Guarantee the second dword is visible before the valid bit */
+2 -2
arch/ppc64/mm/hash_utils.c
··· 210 210 211 211 /* create bolted the linear mapping in the hash table */ 212 212 for (i=0; i < lmb.memory.cnt; i++) { 213 - base = lmb.memory.region[i].physbase + KERNELBASE; 213 + base = lmb.memory.region[i].base + KERNELBASE; 214 214 size = lmb.memory.region[i].size; 215 215 216 216 DBG("creating mapping for region: %lx : %lx\n", base, size); ··· 302 302 int local = 0; 303 303 cpumask_t tmp; 304 304 305 - if ((ea & ~REGION_MASK) > EADDR_MASK) 305 + if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) 306 306 return 1; 307 307 308 308 switch (REGION_ID(ea)) {
+222 -186
arch/ppc64/mm/hugetlbpage.c
··· 27 27 28 28 #include <linux/sysctl.h> 29 29 30 - #define HUGEPGDIR_SHIFT (HPAGE_SHIFT + PAGE_SHIFT - 3) 31 - #define HUGEPGDIR_SIZE (1UL << HUGEPGDIR_SHIFT) 32 - #define HUGEPGDIR_MASK (~(HUGEPGDIR_SIZE-1)) 30 + #define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT) 31 + #define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT) 33 32 34 - #define HUGEPTE_INDEX_SIZE 9 35 - #define HUGEPGD_INDEX_SIZE 10 36 - 37 - #define PTRS_PER_HUGEPTE (1 << HUGEPTE_INDEX_SIZE) 38 - #define PTRS_PER_HUGEPGD (1 << HUGEPGD_INDEX_SIZE) 39 - 40 - static inline int hugepgd_index(unsigned long addr) 33 + /* Modelled after find_linux_pte() */ 34 + pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 41 35 { 42 - return (addr & ~REGION_MASK) >> HUGEPGDIR_SHIFT; 43 - } 36 + pgd_t *pg; 37 + pud_t *pu; 38 + pmd_t *pm; 39 + pte_t *pt; 44 40 45 - static pud_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr) 46 - { 47 - int index; 48 - 49 - if (! mm->context.huge_pgdir) 50 - return NULL; 51 - 52 - 53 - index = hugepgd_index(addr); 54 - BUG_ON(index >= PTRS_PER_HUGEPGD); 55 - return (pud_t *)(mm->context.huge_pgdir + index); 56 - } 57 - 58 - static inline pte_t *hugepte_offset(pud_t *dir, unsigned long addr) 59 - { 60 - int index; 61 - 62 - if (pud_none(*dir)) 63 - return NULL; 64 - 65 - index = (addr >> HPAGE_SHIFT) % PTRS_PER_HUGEPTE; 66 - return (pte_t *)pud_page(*dir) + index; 67 - } 68 - 69 - static pud_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr) 70 - { 71 41 BUG_ON(! in_hugepage_area(mm->context, addr)); 72 42 73 - if (! mm->context.huge_pgdir) { 74 - pgd_t *new; 75 - spin_unlock(&mm->page_table_lock); 76 - /* Don't use pgd_alloc(), because we want __GFP_REPEAT */ 77 - new = kmem_cache_alloc(zero_cache, GFP_KERNEL | __GFP_REPEAT); 78 - BUG_ON(memcmp(new, empty_zero_page, PAGE_SIZE)); 79 - spin_lock(&mm->page_table_lock); 43 + addr &= HPAGE_MASK; 80 44 81 - /* 82 - * Because we dropped the lock, we should re-check the 83 - * entry, as somebody else could have populated it.. 84 - */ 85 - if (mm->context.huge_pgdir) 86 - pgd_free(new); 87 - else 88 - mm->context.huge_pgdir = new; 89 - } 90 - return hugepgd_offset(mm, addr); 91 - } 92 - 93 - static pte_t *hugepte_alloc(struct mm_struct *mm, pud_t *dir, unsigned long addr) 94 - { 95 - if (! pud_present(*dir)) { 96 - pte_t *new; 97 - 98 - spin_unlock(&mm->page_table_lock); 99 - new = kmem_cache_alloc(zero_cache, GFP_KERNEL | __GFP_REPEAT); 100 - BUG_ON(memcmp(new, empty_zero_page, PAGE_SIZE)); 101 - spin_lock(&mm->page_table_lock); 102 - /* 103 - * Because we dropped the lock, we should re-check the 104 - * entry, as somebody else could have populated it.. 105 - */ 106 - if (pud_present(*dir)) { 107 - if (new) 108 - kmem_cache_free(zero_cache, new); 109 - } else { 110 - struct page *ptepage; 111 - 112 - if (! new) 113 - return NULL; 114 - ptepage = virt_to_page(new); 115 - ptepage->mapping = (void *) mm; 116 - ptepage->index = addr & HUGEPGDIR_MASK; 117 - pud_populate(mm, dir, new); 45 + pg = pgd_offset(mm, addr); 46 + if (!pgd_none(*pg)) { 47 + pu = pud_offset(pg, addr); 48 + if (!pud_none(*pu)) { 49 + pm = pmd_offset(pu, addr); 50 + pt = (pte_t *)pm; 51 + BUG_ON(!pmd_none(*pm) 52 + && !(pte_present(*pt) && pte_huge(*pt))); 53 + return pt; 118 54 } 119 55 } 120 56 121 - return hugepte_offset(dir, addr); 122 - } 123 - 124 - pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 125 - { 126 - pud_t *pud; 127 - 128 - BUG_ON(! in_hugepage_area(mm->context, addr)); 129 - 130 - pud = hugepgd_offset(mm, addr); 131 - if (! pud) 132 - return NULL; 133 - 134 - return hugepte_offset(pud, addr); 57 + return NULL; 135 58 } 136 59 137 60 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) 138 61 { 139 - pud_t *pud; 62 + pgd_t *pg; 63 + pud_t *pu; 64 + pmd_t *pm; 65 + pte_t *pt; 140 66 141 67 BUG_ON(! in_hugepage_area(mm->context, addr)); 142 68 143 - pud = hugepgd_alloc(mm, addr); 144 - if (! pud) 145 - return NULL; 69 + addr &= HPAGE_MASK; 146 70 147 - return hugepte_alloc(mm, pud, addr); 71 + pg = pgd_offset(mm, addr); 72 + pu = pud_alloc(mm, pg, addr); 73 + 74 + if (pu) { 75 + pm = pmd_alloc(mm, pu, addr); 76 + if (pm) { 77 + pt = (pte_t *)pm; 78 + BUG_ON(!pmd_none(*pm) 79 + && !(pte_present(*pt) && pte_huge(*pt))); 80 + return pt; 81 + } 82 + } 83 + 84 + return NULL; 85 + } 86 + 87 + #define HUGEPTE_BATCH_SIZE (HPAGE_SIZE / PMD_SIZE) 88 + 89 + void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 90 + pte_t *ptep, pte_t pte) 91 + { 92 + int i; 93 + 94 + if (pte_present(*ptep)) { 95 + pte_clear(mm, addr, ptep); 96 + flush_tlb_pending(); 97 + } 98 + 99 + for (i = 0; i < HUGEPTE_BATCH_SIZE; i++) { 100 + *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); 101 + ptep++; 102 + } 103 + } 104 + 105 + pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 106 + pte_t *ptep) 107 + { 108 + unsigned long old = pte_update(ptep, ~0UL); 109 + int i; 110 + 111 + if (old & _PAGE_HASHPTE) 112 + hpte_update(mm, addr, old, 0); 113 + 114 + for (i = 1; i < HUGEPTE_BATCH_SIZE; i++) 115 + ptep[i] = __pte(0); 116 + 117 + return __pte(old); 148 118 } 149 119 150 120 /* ··· 132 162 return 0; 133 163 } 134 164 135 - static void flush_segments(void *parm) 165 + static void flush_low_segments(void *parm) 136 166 { 137 - u16 segs = (unsigned long) parm; 167 + u16 areas = (unsigned long) parm; 138 168 unsigned long i; 139 169 140 170 asm volatile("isync" : : : "memory"); 141 171 142 - for (i = 0; i < 16; i++) { 143 - if (! (segs & (1U << i))) 172 + BUILD_BUG_ON((sizeof(areas)*8) != NUM_LOW_AREAS); 173 + 174 + for (i = 0; i < NUM_LOW_AREAS; i++) { 175 + if (! (areas & (1U << i))) 144 176 continue; 145 177 asm volatile("slbie %0" : : "r" (i << SID_SHIFT)); 146 178 } ··· 150 178 asm volatile("isync" : : : "memory"); 151 179 } 152 180 153 - static int prepare_low_seg_for_htlb(struct mm_struct *mm, unsigned long seg) 181 + static void flush_high_segments(void *parm) 154 182 { 155 - unsigned long start = seg << SID_SHIFT; 156 - unsigned long end = (seg+1) << SID_SHIFT; 183 + u16 areas = (unsigned long) parm; 184 + unsigned long i, j; 185 + 186 + asm volatile("isync" : : : "memory"); 187 + 188 + BUILD_BUG_ON((sizeof(areas)*8) != NUM_HIGH_AREAS); 189 + 190 + for (i = 0; i < NUM_HIGH_AREAS; i++) { 191 + if (! (areas & (1U << i))) 192 + continue; 193 + for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++) 194 + asm volatile("slbie %0" 195 + :: "r" ((i << HTLB_AREA_SHIFT) + (j << SID_SHIFT))); 196 + } 197 + 198 + asm volatile("isync" : : : "memory"); 199 + } 200 + 201 + static int prepare_low_area_for_htlb(struct mm_struct *mm, unsigned long area) 202 + { 203 + unsigned long start = area << SID_SHIFT; 204 + unsigned long end = (area+1) << SID_SHIFT; 157 205 struct vm_area_struct *vma; 158 206 159 - BUG_ON(seg >= 16); 207 + BUG_ON(area >= NUM_LOW_AREAS); 160 208 161 209 /* Check no VMAs are in the region */ 162 210 vma = find_vma(mm, start); ··· 186 194 return 0; 187 195 } 188 196 189 - static int open_low_hpage_segs(struct mm_struct *mm, u16 newsegs) 197 + static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area) 198 + { 199 + unsigned long start = area << HTLB_AREA_SHIFT; 200 + unsigned long end = (area+1) << HTLB_AREA_SHIFT; 201 + struct vm_area_struct *vma; 202 + 203 + BUG_ON(area >= NUM_HIGH_AREAS); 204 + 205 + /* Check no VMAs are in the region */ 206 + vma = find_vma(mm, start); 207 + if (vma && (vma->vm_start < end)) 208 + return -EBUSY; 209 + 210 + return 0; 211 + } 212 + 213 + static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas) 190 214 { 191 215 unsigned long i; 192 216 193 - newsegs &= ~(mm->context.htlb_segs); 194 - if (! newsegs) 217 + BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS); 218 + BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS); 219 + 220 + newareas &= ~(mm->context.low_htlb_areas); 221 + if (! newareas) 195 222 return 0; /* The segments we want are already open */ 196 223 197 - for (i = 0; i < 16; i++) 198 - if ((1 << i) & newsegs) 199 - if (prepare_low_seg_for_htlb(mm, i) != 0) 224 + for (i = 0; i < NUM_LOW_AREAS; i++) 225 + if ((1 << i) & newareas) 226 + if (prepare_low_area_for_htlb(mm, i) != 0) 200 227 return -EBUSY; 201 228 202 - mm->context.htlb_segs |= newsegs; 229 + mm->context.low_htlb_areas |= newareas; 203 230 204 231 /* update the paca copy of the context struct */ 205 232 get_paca()->context = mm->context; ··· 226 215 /* the context change must make it to memory before the flush, 227 216 * so that further SLB misses do the right thing. */ 228 217 mb(); 229 - on_each_cpu(flush_segments, (void *)(unsigned long)newsegs, 0, 1); 218 + on_each_cpu(flush_low_segments, (void *)(unsigned long)newareas, 0, 1); 219 + 220 + return 0; 221 + } 222 + 223 + static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas) 224 + { 225 + unsigned long i; 226 + 227 + BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS); 228 + BUILD_BUG_ON((sizeof(mm->context.high_htlb_areas)*8) 229 + != NUM_HIGH_AREAS); 230 + 231 + newareas &= ~(mm->context.high_htlb_areas); 232 + if (! newareas) 233 + return 0; /* The areas we want are already open */ 234 + 235 + for (i = 0; i < NUM_HIGH_AREAS; i++) 236 + if ((1 << i) & newareas) 237 + if (prepare_high_area_for_htlb(mm, i) != 0) 238 + return -EBUSY; 239 + 240 + mm->context.high_htlb_areas |= newareas; 241 + 242 + /* update the paca copy of the context struct */ 243 + get_paca()->context = mm->context; 244 + 245 + /* the context change must make it to memory before the flush, 246 + * so that further SLB misses do the right thing. */ 247 + mb(); 248 + on_each_cpu(flush_high_segments, (void *)(unsigned long)newareas, 0, 1); 230 249 231 250 return 0; 232 251 } 233 252 234 253 int prepare_hugepage_range(unsigned long addr, unsigned long len) 235 254 { 236 - if (within_hugepage_high_range(addr, len)) 237 - return 0; 238 - else if ((addr < 0x100000000UL) && ((addr+len) < 0x100000000UL)) { 239 - int err; 240 - /* Yes, we need both tests, in case addr+len overflows 241 - * 64-bit arithmetic */ 242 - err = open_low_hpage_segs(current->mm, 255 + int err; 256 + 257 + if ( (addr+len) < addr ) 258 + return -EINVAL; 259 + 260 + if ((addr + len) < 0x100000000UL) 261 + err = open_low_hpage_areas(current->mm, 243 262 LOW_ESID_MASK(addr, len)); 244 - if (err) 245 - printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)" 246 - " failed (segs: 0x%04hx)\n", addr, len, 247 - LOW_ESID_MASK(addr, len)); 263 + else 264 + err = open_high_hpage_areas(current->mm, 265 + HTLB_AREA_MASK(addr, len)); 266 + if (err) { 267 + printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)" 268 + " failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n", 269 + addr, len, 270 + LOW_ESID_MASK(addr, len), HTLB_AREA_MASK(addr, len)); 248 271 return err; 249 272 } 250 273 251 - return -EINVAL; 274 + return 0; 252 275 } 253 276 254 277 struct page * ··· 354 309 vma = find_vma(mm, addr); 355 310 continue; 356 311 } 357 - if (touches_hugepage_high_range(addr, len)) { 358 - addr = TASK_HPAGE_END; 312 + if (touches_hugepage_high_range(mm, addr, len)) { 313 + addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT); 359 314 vma = find_vma(mm, addr); 360 315 continue; 361 316 } ··· 434 389 if (touches_hugepage_low_range(mm, addr, len)) { 435 390 addr = (addr & ((~0) << SID_SHIFT)) - len; 436 391 goto hugepage_recheck; 437 - } else if (touches_hugepage_high_range(addr, len)) { 438 - addr = TASK_HPAGE_BASE - len; 392 + } else if (touches_hugepage_high_range(mm, addr, len)) { 393 + addr = (addr & ((~0UL) << HTLB_AREA_SHIFT)) - len; 394 + goto hugepage_recheck; 439 395 } 440 396 441 397 /* ··· 527 481 return -ENOMEM; 528 482 } 529 483 530 - static unsigned long htlb_get_high_area(unsigned long len) 484 + static unsigned long htlb_get_high_area(unsigned long len, u16 areamask) 531 485 { 532 - unsigned long addr = TASK_HPAGE_BASE; 486 + unsigned long addr = 0x100000000UL; 533 487 struct vm_area_struct *vma; 534 488 535 489 vma = find_vma(current->mm, addr); 536 - for (vma = find_vma(current->mm, addr); 537 - addr + len <= TASK_HPAGE_END; 538 - vma = vma->vm_next) { 490 + while (addr + len <= TASK_SIZE_USER64) { 539 491 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */ 540 - BUG_ON(! within_hugepage_high_range(addr, len)); 492 + 493 + if (! __within_hugepage_high_range(addr, len, areamask)) { 494 + addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT); 495 + vma = find_vma(current->mm, addr); 496 + continue; 497 + } 541 498 542 499 if (!vma || (addr + len) <= vma->vm_start) 543 500 return addr; 544 501 addr = ALIGN(vma->vm_end, HPAGE_SIZE); 545 - /* Because we're in a hugepage region, this alignment 546 - * should not skip us over any VMAs */ 502 + /* Depending on segmask this might not be a confirmed 503 + * hugepage region, so the ALIGN could have skipped 504 + * some VMAs */ 505 + vma = find_vma(current->mm, addr); 547 506 } 548 507 549 508 return -ENOMEM; ··· 558 507 unsigned long len, unsigned long pgoff, 559 508 unsigned long flags) 560 509 { 510 + int lastshift; 511 + u16 areamask, curareas; 512 + 561 513 if (len & ~HPAGE_MASK) 562 514 return -EINVAL; 563 515 ··· 568 514 return -EINVAL; 569 515 570 516 if (test_thread_flag(TIF_32BIT)) { 571 - int lastshift = 0; 572 - u16 segmask, cursegs = current->mm->context.htlb_segs; 517 + curareas = current->mm->context.low_htlb_areas; 573 518 574 519 /* First see if we can do the mapping in the existing 575 - * low hpage segments */ 576 - addr = htlb_get_low_area(len, cursegs); 520 + * low areas */ 521 + addr = htlb_get_low_area(len, curareas); 577 522 if (addr != -ENOMEM) 578 523 return addr; 579 524 580 - for (segmask = LOW_ESID_MASK(0x100000000UL-len, len); 581 - ! lastshift; segmask >>=1) { 582 - if (segmask & 1) 525 + lastshift = 0; 526 + for (areamask = LOW_ESID_MASK(0x100000000UL-len, len); 527 + ! lastshift; areamask >>=1) { 528 + if (areamask & 1) 583 529 lastshift = 1; 584 530 585 - addr = htlb_get_low_area(len, cursegs | segmask); 531 + addr = htlb_get_low_area(len, curareas | areamask); 586 532 if ((addr != -ENOMEM) 587 - && open_low_hpage_segs(current->mm, segmask) == 0) 533 + && open_low_hpage_areas(current->mm, areamask) == 0) 588 534 return addr; 589 535 } 590 - printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open" 591 - " enough segments\n"); 592 - return -ENOMEM; 593 536 } else { 594 - return htlb_get_high_area(len); 595 - } 596 - } 537 + curareas = current->mm->context.high_htlb_areas; 597 538 598 - void hugetlb_mm_free_pgd(struct mm_struct *mm) 599 - { 600 - int i; 601 - pgd_t *pgdir; 539 + /* First see if we can do the mapping in the existing 540 + * high areas */ 541 + addr = htlb_get_high_area(len, curareas); 542 + if (addr != -ENOMEM) 543 + return addr; 602 544 603 - spin_lock(&mm->page_table_lock); 545 + lastshift = 0; 546 + for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len); 547 + ! lastshift; areamask >>=1) { 548 + if (areamask & 1) 549 + lastshift = 1; 604 550 605 - pgdir = mm->context.huge_pgdir; 606 - if (! pgdir) 607 - goto out; 608 - 609 - mm->context.huge_pgdir = NULL; 610 - 611 - /* cleanup any hugepte pages leftover */ 612 - for (i = 0; i < PTRS_PER_HUGEPGD; i++) { 613 - pud_t *pud = (pud_t *)(pgdir + i); 614 - 615 - if (! pud_none(*pud)) { 616 - pte_t *pte = (pte_t *)pud_page(*pud); 617 - struct page *ptepage = virt_to_page(pte); 618 - 619 - ptepage->mapping = NULL; 620 - 621 - BUG_ON(memcmp(pte, empty_zero_page, PAGE_SIZE)); 622 - kmem_cache_free(zero_cache, pte); 551 + addr = htlb_get_high_area(len, curareas | areamask); 552 + if ((addr != -ENOMEM) 553 + && open_high_hpage_areas(current->mm, areamask) == 0) 554 + return addr; 623 555 } 624 - pud_clear(pud); 625 556 } 626 - 627 - BUG_ON(memcmp(pgdir, empty_zero_page, PAGE_SIZE)); 628 - kmem_cache_free(zero_cache, pgdir); 629 - 630 - out: 631 - spin_unlock(&mm->page_table_lock); 557 + printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open" 558 + " enough areas\n"); 559 + return -ENOMEM; 632 560 } 633 561 634 562 int hash_huge_page(struct mm_struct *mm, unsigned long access,
+1 -1
arch/ppc64/mm/imalloc.c
··· 31 31 break; 32 32 if ((unsigned long)tmp->addr >= ioremap_bot) 33 33 addr = tmp->size + (unsigned long) tmp->addr; 34 - if (addr > IMALLOC_END-size) 34 + if (addr >= IMALLOC_END-size) 35 35 return 1; 36 36 } 37 37 *im_addr = addr;
+54 -42
arch/ppc64/mm/init.c
··· 42 42 43 43 #include <asm/pgalloc.h> 44 44 #include <asm/page.h> 45 - #include <asm/abs_addr.h> 46 45 #include <asm/prom.h> 47 46 #include <asm/lmb.h> 48 47 #include <asm/rtas.h> ··· 64 65 #include <asm/abs_addr.h> 65 66 #include <asm/vdso.h> 66 67 #include <asm/imalloc.h> 68 + 69 + #if PGTABLE_RANGE > USER_VSID_RANGE 70 + #warning Limited user VSID range means pagetable space is wasted 71 + #endif 72 + 73 + #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE) 74 + #warning TASK_SIZE is smaller than it needs to be. 75 + #endif 67 76 68 77 int mem_init_done; 69 78 unsigned long ioremap_bot = IMALLOC_BASE; ··· 166 159 ptep = pte_alloc_kernel(&init_mm, pmdp, ea); 167 160 if (!ptep) 168 161 return -ENOMEM; 169 - pa = abs_to_phys(pa); 170 162 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, 171 163 __pgprot(flags))); 172 164 spin_unlock(&init_mm.page_table_lock); ··· 232 226 * Before that, we map using addresses going 233 227 * up from ioremap_bot. imalloc will use 234 228 * the addresses from ioremap_bot through 235 - * IMALLOC_END (0xE000001fffffffff) 229 + * IMALLOC_END 236 230 * 237 231 */ 238 232 pa = addr & PAGE_MASK; ··· 423 417 int index; 424 418 int err; 425 419 426 - #ifdef CONFIG_HUGETLB_PAGE 427 - /* We leave htlb_segs as it was, but for a fork, we need to 428 - * clear the huge_pgdir. */ 429 - mm->context.huge_pgdir = NULL; 430 - #endif 431 - 432 420 again: 433 421 if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL)) 434 422 return -ENOMEM; ··· 453 453 spin_unlock(&mmu_context_lock); 454 454 455 455 mm->context.id = NO_CONTEXT; 456 - 457 - hugetlb_mm_free_pgd(mm); 458 456 } 459 457 460 458 /* ··· 482 484 for (i = 1; i < lmb.memory.cnt; i++) { 483 485 unsigned long base, prevbase, prevsize; 484 486 485 - prevbase = lmb.memory.region[i-1].physbase; 487 + prevbase = lmb.memory.region[i-1].base; 486 488 prevsize = lmb.memory.region[i-1].size; 487 - base = lmb.memory.region[i].physbase; 489 + base = lmb.memory.region[i].base; 488 490 if (base > (prevbase + prevsize)) { 489 491 io_hole_start = prevbase + prevsize; 490 492 io_hole_size = base - (prevbase + prevsize); ··· 511 513 for (i=0; i < lmb.memory.cnt; i++) { 512 514 unsigned long base; 513 515 514 - #ifdef CONFIG_MSCHUNKS 515 - base = lmb.memory.region[i].physbase; 516 - #else 517 516 base = lmb.memory.region[i].base; 518 - #endif 517 + 519 518 if ((paddr >= base) && 520 519 (paddr < (base + lmb.memory.region[i].size))) { 521 520 return 1; ··· 542 547 */ 543 548 bootmap_pages = bootmem_bootmap_pages(total_pages); 544 549 545 - start = abs_to_phys(lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE)); 550 + start = lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE); 546 551 BUG_ON(!start); 547 552 548 553 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); ··· 553 558 * present. 554 559 */ 555 560 for (i=0; i < lmb.memory.cnt; i++) { 556 - unsigned long physbase, size; 561 + unsigned long base, size; 557 562 unsigned long start_pfn, end_pfn; 558 563 559 - physbase = lmb.memory.region[i].physbase; 564 + base = lmb.memory.region[i].base; 560 565 size = lmb.memory.region[i].size; 561 566 562 - start_pfn = physbase >> PAGE_SHIFT; 567 + start_pfn = base >> PAGE_SHIFT; 563 568 end_pfn = start_pfn + (size >> PAGE_SHIFT); 564 569 memory_present(0, start_pfn, end_pfn); 565 570 566 - free_bootmem(physbase, size); 571 + free_bootmem(base, size); 567 572 } 568 573 569 574 /* reserve the sections we're already using */ 570 575 for (i=0; i < lmb.reserved.cnt; i++) { 571 - unsigned long physbase = lmb.reserved.region[i].physbase; 576 + unsigned long base = lmb.reserved.region[i].base; 572 577 unsigned long size = lmb.reserved.region[i].size; 573 578 574 - reserve_bootmem(physbase, size); 579 + reserve_bootmem(base, size); 575 580 } 576 581 } 577 582 ··· 610 615 int i; 611 616 612 617 for (i=0; i < lmb.memory.cnt; i++) { 613 - unsigned long physbase, size; 618 + unsigned long base, size; 614 619 struct kcore_list *kcore_mem; 615 620 616 - physbase = lmb.memory.region[i].physbase; 621 + base = lmb.memory.region[i].base; 617 622 size = lmb.memory.region[i].size; 618 623 619 624 /* GFP_ATOMIC to avoid might_sleep warnings during boot */ ··· 621 626 if (!kcore_mem) 622 627 panic("mem_init: kmalloc failed\n"); 623 628 624 - kclist_add(kcore_mem, __va(physbase), size); 629 + kclist_add(kcore_mem, __va(base), size); 625 630 } 626 631 627 632 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); ··· 681 686 682 687 mem_init_done = 1; 683 688 684 - #ifdef CONFIG_PPC_ISERIES 685 - iommu_vio_init(); 686 - #endif 687 689 /* Initialize the vDSO */ 688 690 vdso_init(); 689 691 } ··· 825 833 return virt_addr; 826 834 } 827 835 828 - kmem_cache_t *zero_cache; 829 - 830 - static void zero_ctor(void *pte, kmem_cache_t *cache, unsigned long flags) 836 + static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) 831 837 { 832 - memset(pte, 0, PAGE_SIZE); 838 + memset(addr, 0, kmem_cache_size(cache)); 833 839 } 840 + 841 + static const int pgtable_cache_size[2] = { 842 + PTE_TABLE_SIZE, PMD_TABLE_SIZE 843 + }; 844 + static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { 845 + "pgd_pte_cache", "pud_pmd_cache", 846 + }; 847 + 848 + kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)]; 834 849 835 850 void pgtable_cache_init(void) 836 851 { 837 - zero_cache = kmem_cache_create("zero", 838 - PAGE_SIZE, 839 - 0, 840 - SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, 841 - zero_ctor, 842 - NULL); 843 - if (!zero_cache) 844 - panic("pgtable_cache_init(): could not create zero_cache!\n"); 852 + int i; 853 + 854 + BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]); 855 + BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]); 856 + BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]); 857 + BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]); 858 + 859 + for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) { 860 + int size = pgtable_cache_size[i]; 861 + const char *name = pgtable_cache_name[i]; 862 + 863 + pgtable_cache[i] = kmem_cache_create(name, 864 + size, size, 865 + SLAB_HWCACHE_ALIGN 866 + | SLAB_MUST_HWCACHE_ALIGN, 867 + zero_ctor, 868 + NULL); 869 + if (! pgtable_cache[i]) 870 + panic("pgtable_cache_init(): could not create %s!\n", 871 + name); 872 + } 845 873 } 846 874 847 875 pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
+1 -1
arch/ppc64/mm/numa.c
··· 671 671 * Mark reserved regions on this node 672 672 */ 673 673 for (i = 0; i < lmb.reserved.cnt; i++) { 674 - unsigned long physbase = lmb.reserved.region[i].physbase; 674 + unsigned long physbase = lmb.reserved.region[i].base; 675 675 unsigned long size = lmb.reserved.region[i].size; 676 676 677 677 if (pa_to_nid(physbase) != nid &&
+14 -13
arch/ppc64/mm/slb_low.S
··· 89 89 b 9f 90 90 91 91 0: /* user address: proto-VSID = context<<15 | ESID */ 92 - li r11,SLB_VSID_USER 93 - 94 - srdi. r9,r3,13 92 + srdi. r9,r3,USER_ESID_BITS 95 93 bne- 8f /* invalid ea bits set */ 96 94 97 95 #ifdef CONFIG_HUGETLB_PAGE 98 96 BEGIN_FTR_SECTION 99 - /* check against the hugepage ranges */ 100 - cmpldi r3,(TASK_HPAGE_END>>SID_SHIFT) 101 - bge 6f /* >= TASK_HPAGE_END */ 102 - cmpldi r3,(TASK_HPAGE_BASE>>SID_SHIFT) 103 - bge 5f /* TASK_HPAGE_BASE..TASK_HPAGE_END */ 104 - cmpldi r3,16 105 - bge 6f /* 4GB..TASK_HPAGE_BASE */ 97 + lhz r9,PACAHIGHHTLBAREAS(r13) 98 + srdi r11,r3,(HTLB_AREA_SHIFT-SID_SHIFT) 99 + srd r9,r9,r11 100 + andi. r9,r9,1 101 + bne 5f 106 102 107 - lhz r9,PACAHTLBSEGS(r13) 103 + li r11,SLB_VSID_USER 104 + 105 + cmpldi r3,16 106 + bge 6f 107 + 108 + lhz r9,PACALOWHTLBAREAS(r13) 108 109 srd r9,r9,r3 109 110 andi. r9,r9,1 111 + 110 112 beq 6f 111 113 112 - 5: /* this is a hugepage user address */ 113 - li r11,(SLB_VSID_USER|SLB_VSID_L) 114 + 5: li r11,SLB_VSID_USER|SLB_VSID_L 114 115 END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE) 115 116 #endif /* CONFIG_HUGETLB_PAGE */ 116 117
+55 -40
arch/ppc64/mm/tlb.c
··· 41 41 DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); 42 42 unsigned long pte_freelist_forced_free; 43 43 44 - void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage) 44 + struct pte_freelist_batch 45 + { 46 + struct rcu_head rcu; 47 + unsigned int index; 48 + pgtable_free_t tables[0]; 49 + }; 50 + 51 + DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); 52 + unsigned long pte_freelist_forced_free; 53 + 54 + #define PTE_FREELIST_SIZE \ 55 + ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ 56 + / sizeof(pgtable_free_t)) 57 + 58 + #ifdef CONFIG_SMP 59 + static void pte_free_smp_sync(void *arg) 60 + { 61 + /* Do nothing, just ensure we sync with all CPUs */ 62 + } 63 + #endif 64 + 65 + /* This is only called when we are critically out of memory 66 + * (and fail to get a page in pte_free_tlb). 67 + */ 68 + static void pgtable_free_now(pgtable_free_t pgf) 69 + { 70 + pte_freelist_forced_free++; 71 + 72 + smp_call_function(pte_free_smp_sync, NULL, 0, 1); 73 + 74 + pgtable_free(pgf); 75 + } 76 + 77 + static void pte_free_rcu_callback(struct rcu_head *head) 78 + { 79 + struct pte_freelist_batch *batch = 80 + container_of(head, struct pte_freelist_batch, rcu); 81 + unsigned int i; 82 + 83 + for (i = 0; i < batch->index; i++) 84 + pgtable_free(batch->tables[i]); 85 + 86 + free_page((unsigned long)batch); 87 + } 88 + 89 + static void pte_free_submit(struct pte_freelist_batch *batch) 90 + { 91 + INIT_RCU_HEAD(&batch->rcu); 92 + call_rcu(&batch->rcu, pte_free_rcu_callback); 93 + } 94 + 95 + void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) 45 96 { 46 97 /* This is safe as we are holding page_table_lock */ 47 98 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); ··· 100 49 101 50 if (atomic_read(&tlb->mm->mm_users) < 2 || 102 51 cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { 103 - pte_free(ptepage); 52 + pgtable_free(pgf); 104 53 return; 105 54 } 106 55 107 56 if (*batchp == NULL) { 108 57 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); 109 58 if (*batchp == NULL) { 110 - pte_free_now(ptepage); 59 + pgtable_free_now(pgf); 111 60 return; 112 61 } 113 62 (*batchp)->index = 0; 114 63 } 115 - (*batchp)->pages[(*batchp)->index++] = ptepage; 64 + (*batchp)->tables[(*batchp)->index++] = pgf; 116 65 if ((*batchp)->index == PTE_FREELIST_SIZE) { 117 66 pte_free_submit(*batchp); 118 67 *batchp = NULL; ··· 181 130 flush_hash_range(batch->context, i, local); 182 131 batch->index = 0; 183 132 put_cpu(); 184 - } 185 - 186 - #ifdef CONFIG_SMP 187 - static void pte_free_smp_sync(void *arg) 188 - { 189 - /* Do nothing, just ensure we sync with all CPUs */ 190 - } 191 - #endif 192 - 193 - /* This is only called when we are critically out of memory 194 - * (and fail to get a page in pte_free_tlb). 195 - */ 196 - void pte_free_now(struct page *ptepage) 197 - { 198 - pte_freelist_forced_free++; 199 - 200 - smp_call_function(pte_free_smp_sync, NULL, 0, 1); 201 - 202 - pte_free(ptepage); 203 - } 204 - 205 - static void pte_free_rcu_callback(struct rcu_head *head) 206 - { 207 - struct pte_freelist_batch *batch = 208 - container_of(head, struct pte_freelist_batch, rcu); 209 - unsigned int i; 210 - 211 - for (i = 0; i < batch->index; i++) 212 - pte_free(batch->pages[i]); 213 - free_page((unsigned long)batch); 214 - } 215 - 216 - void pte_free_submit(struct pte_freelist_batch *batch) 217 - { 218 - INIT_RCU_HEAD(&batch->rcu); 219 - call_rcu(&batch->rcu, pte_free_rcu_callback); 220 133 } 221 134 222 135 void pte_free_finish(void)
+1 -1
arch/ppc64/xmon/start.c
··· 27 27 struct tty_struct *tty) 28 28 { 29 29 /* ensure xmon is enabled */ 30 - xmon_init(); 30 + xmon_init(1); 31 31 debugger(pt_regs); 32 32 } 33 33
+18 -8
arch/ppc64/xmon/xmon.c
··· 2496 2496 } 2497 2497 } 2498 2498 2499 - void xmon_init(void) 2499 + void xmon_init(int enable) 2500 2500 { 2501 - __debugger = xmon; 2502 - __debugger_ipi = xmon_ipi; 2503 - __debugger_bpt = xmon_bpt; 2504 - __debugger_sstep = xmon_sstep; 2505 - __debugger_iabr_match = xmon_iabr_match; 2506 - __debugger_dabr_match = xmon_dabr_match; 2507 - __debugger_fault_handler = xmon_fault_handler; 2501 + if (enable) { 2502 + __debugger = xmon; 2503 + __debugger_ipi = xmon_ipi; 2504 + __debugger_bpt = xmon_bpt; 2505 + __debugger_sstep = xmon_sstep; 2506 + __debugger_iabr_match = xmon_iabr_match; 2507 + __debugger_dabr_match = xmon_dabr_match; 2508 + __debugger_fault_handler = xmon_fault_handler; 2509 + } else { 2510 + __debugger = NULL; 2511 + __debugger_ipi = NULL; 2512 + __debugger_bpt = NULL; 2513 + __debugger_sstep = NULL; 2514 + __debugger_iabr_match = NULL; 2515 + __debugger_dabr_match = NULL; 2516 + __debugger_fault_handler = NULL; 2517 + } 2508 2518 } 2509 2519 2510 2520 void dump_segments(void)
+25 -67
include/asm-ppc64/abs_addr.h
··· 16 16 #include <asm/page.h> 17 17 #include <asm/prom.h> 18 18 #include <asm/lmb.h> 19 + #include <asm/firmware.h> 19 20 20 - typedef u32 msChunks_entry; 21 - struct msChunks { 21 + struct mschunks_map { 22 22 unsigned long num_chunks; 23 23 unsigned long chunk_size; 24 24 unsigned long chunk_shift; 25 25 unsigned long chunk_mask; 26 - msChunks_entry *abs; 26 + u32 *mapping; 27 27 }; 28 28 29 - extern struct msChunks msChunks; 29 + extern struct mschunks_map mschunks_map; 30 30 31 - extern unsigned long msChunks_alloc(unsigned long, unsigned long, unsigned long); 32 - extern unsigned long reloc_offset(void); 31 + /* Chunks are 256 KB */ 32 + #define MSCHUNKS_CHUNK_SHIFT (18) 33 + #define MSCHUNKS_CHUNK_SIZE (1UL << MSCHUNKS_CHUNK_SHIFT) 34 + #define MSCHUNKS_OFFSET_MASK (MSCHUNKS_CHUNK_SIZE - 1) 33 35 34 - #ifdef CONFIG_MSCHUNKS 35 - 36 - static inline unsigned long 37 - chunk_to_addr(unsigned long chunk) 36 + static inline unsigned long chunk_to_addr(unsigned long chunk) 38 37 { 39 - unsigned long offset = reloc_offset(); 40 - struct msChunks *_msChunks = PTRRELOC(&msChunks); 41 - 42 - return chunk << _msChunks->chunk_shift; 38 + return chunk << MSCHUNKS_CHUNK_SHIFT; 43 39 } 44 40 45 - static inline unsigned long 46 - addr_to_chunk(unsigned long addr) 41 + static inline unsigned long addr_to_chunk(unsigned long addr) 47 42 { 48 - unsigned long offset = reloc_offset(); 49 - struct msChunks *_msChunks = PTRRELOC(&msChunks); 50 - 51 - return addr >> _msChunks->chunk_shift; 43 + return addr >> MSCHUNKS_CHUNK_SHIFT; 52 44 } 53 45 54 - static inline unsigned long 55 - chunk_offset(unsigned long addr) 46 + static inline unsigned long phys_to_abs(unsigned long pa) 56 47 { 57 - unsigned long offset = reloc_offset(); 58 - struct msChunks *_msChunks = PTRRELOC(&msChunks); 48 + unsigned long chunk; 59 49 60 - return addr & _msChunks->chunk_mask; 50 + /* This is a no-op on non-iSeries */ 51 + if (!firmware_has_feature(FW_FEATURE_ISERIES)) 52 + return pa; 53 + 54 + chunk = addr_to_chunk(pa); 55 + 56 + if (chunk < mschunks_map.num_chunks) 57 + chunk = mschunks_map.mapping[chunk]; 58 + 59 + return chunk_to_addr(chunk) + (pa & MSCHUNKS_OFFSET_MASK); 61 60 } 62 - 63 - static inline unsigned long 64 - abs_chunk(unsigned long pchunk) 65 - { 66 - unsigned long offset = reloc_offset(); 67 - struct msChunks *_msChunks = PTRRELOC(&msChunks); 68 - if ( pchunk >= _msChunks->num_chunks ) { 69 - return pchunk; 70 - } 71 - return PTRRELOC(_msChunks->abs)[pchunk]; 72 - } 73 - 74 - /* A macro so it can take pointers or unsigned long. */ 75 - #define phys_to_abs(pa) \ 76 - ({ unsigned long _pa = (unsigned long)(pa); \ 77 - chunk_to_addr(abs_chunk(addr_to_chunk(_pa))) + chunk_offset(_pa); \ 78 - }) 79 - 80 - static inline unsigned long 81 - physRpn_to_absRpn(unsigned long rpn) 82 - { 83 - unsigned long pa = rpn << PAGE_SHIFT; 84 - unsigned long aa = phys_to_abs(pa); 85 - return (aa >> PAGE_SHIFT); 86 - } 87 - 88 - /* A macro so it can take pointers or unsigned long. */ 89 - #define abs_to_phys(aa) lmb_abs_to_phys((unsigned long)(aa)) 90 - 91 - #else /* !CONFIG_MSCHUNKS */ 92 - 93 - #define chunk_to_addr(chunk) ((unsigned long)(chunk)) 94 - #define addr_to_chunk(addr) (addr) 95 - #define chunk_offset(addr) (0) 96 - #define abs_chunk(pchunk) (pchunk) 97 - 98 - #define phys_to_abs(pa) (pa) 99 - #define physRpn_to_absRpn(rpn) (rpn) 100 - #define abs_to_phys(aa) (aa) 101 - 102 - #endif /* !CONFIG_MSCHUNKS */ 103 61 104 62 /* Convenience macros */ 105 63 #define virt_to_abs(va) phys_to_abs(__pa(va)) 106 - #define abs_to_virt(aa) __va(abs_to_phys(aa)) 64 + #define abs_to_virt(aa) __va(aa) 107 65 108 66 #endif /* _ABS_ADDR_H */
+3 -44
include/asm-ppc64/cputable.h
··· 56 56 * BHT, SPD, etc... from head.S before branching to identify_machine 57 57 */ 58 58 cpu_setup_t cpu_setup; 59 - 60 - /* This is used to identify firmware features which are available 61 - * to the kernel. 62 - */ 63 - unsigned long firmware_features; 64 59 }; 65 60 66 61 extern struct cpu_spec cpu_specs[]; ··· 65 70 { 66 71 return cur_cpu_spec->cpu_features & feature; 67 72 } 68 - 69 - 70 - /* firmware feature bitmask values */ 71 - #define FIRMWARE_MAX_FEATURES 63 72 - 73 - #define FW_FEATURE_PFT (1UL<<0) 74 - #define FW_FEATURE_TCE (1UL<<1) 75 - #define FW_FEATURE_SPRG0 (1UL<<2) 76 - #define FW_FEATURE_DABR (1UL<<3) 77 - #define FW_FEATURE_COPY (1UL<<4) 78 - #define FW_FEATURE_ASR (1UL<<5) 79 - #define FW_FEATURE_DEBUG (1UL<<6) 80 - #define FW_FEATURE_TERM (1UL<<7) 81 - #define FW_FEATURE_PERF (1UL<<8) 82 - #define FW_FEATURE_DUMP (1UL<<9) 83 - #define FW_FEATURE_INTERRUPT (1UL<<10) 84 - #define FW_FEATURE_MIGRATE (1UL<<11) 85 - #define FW_FEATURE_PERFMON (1UL<<12) 86 - #define FW_FEATURE_CRQ (1UL<<13) 87 - #define FW_FEATURE_VIO (1UL<<14) 88 - #define FW_FEATURE_RDMA (1UL<<15) 89 - #define FW_FEATURE_LLAN (1UL<<16) 90 - #define FW_FEATURE_BULK (1UL<<17) 91 - #define FW_FEATURE_XDABR (1UL<<18) 92 - #define FW_FEATURE_MULTITCE (1UL<<19) 93 - #define FW_FEATURE_SPLPAR (1UL<<20) 94 - 95 - typedef struct { 96 - unsigned long val; 97 - char * name; 98 - } firmware_feature_t; 99 - 100 - extern firmware_feature_t firmware_features_table[]; 101 73 102 74 #endif /* __ASSEMBLY__ */ 103 75 ··· 102 140 #define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0000080000000000) 103 141 #define CPU_FTR_CTRL ASM_CONST(0x0000100000000000) 104 142 105 - /* Platform firmware features */ 106 - #define FW_FTR_ ASM_CONST(0x0000000000000001) 107 - 108 143 #ifndef __ASSEMBLY__ 144 + 109 145 #define COMMON_USER_PPC64 (PPC_FEATURE_32 | PPC_FEATURE_64 | \ 110 146 PPC_FEATURE_HAS_FPU | PPC_FEATURE_HAS_MMU) 111 147 ··· 116 156 #define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_PPCAS_ARCH_V2_BASE) 117 157 #else 118 158 #define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_PPCAS_ARCH_V2_BASE | CPU_FTR_16M_PAGE) 119 - #endif 159 + #endif /* CONFIG_PPC_ISERIES */ 120 160 121 - #define COMMON_PPC64_FW (0) 122 - #endif 161 + #endif /* __ASSEMBLY */ 123 162 124 163 #ifdef __ASSEMBLY__ 125 164
+101
include/asm-ppc64/firmware.h
··· 1 + /* 2 + * include/asm-ppc64/firmware.h 3 + * 4 + * Extracted from include/asm-ppc64/cputable.h 5 + * 6 + * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) 7 + * 8 + * Modifications for ppc64: 9 + * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com> 10 + * 11 + * This program is free software; you can redistribute it and/or 12 + * modify it under the terms of the GNU General Public License 13 + * as published by the Free Software Foundation; either version 14 + * 2 of the License, or (at your option) any later version. 15 + */ 16 + #ifndef __ASM_PPC_FIRMWARE_H 17 + #define __ASM_PPC_FIRMWARE_H 18 + 19 + #ifdef __KERNEL__ 20 + 21 + #ifndef __ASSEMBLY__ 22 + 23 + /* firmware feature bitmask values */ 24 + #define FIRMWARE_MAX_FEATURES 63 25 + 26 + #define FW_FEATURE_PFT (1UL<<0) 27 + #define FW_FEATURE_TCE (1UL<<1) 28 + #define FW_FEATURE_SPRG0 (1UL<<2) 29 + #define FW_FEATURE_DABR (1UL<<3) 30 + #define FW_FEATURE_COPY (1UL<<4) 31 + #define FW_FEATURE_ASR (1UL<<5) 32 + #define FW_FEATURE_DEBUG (1UL<<6) 33 + #define FW_FEATURE_TERM (1UL<<7) 34 + #define FW_FEATURE_PERF (1UL<<8) 35 + #define FW_FEATURE_DUMP (1UL<<9) 36 + #define FW_FEATURE_INTERRUPT (1UL<<10) 37 + #define FW_FEATURE_MIGRATE (1UL<<11) 38 + #define FW_FEATURE_PERFMON (1UL<<12) 39 + #define FW_FEATURE_CRQ (1UL<<13) 40 + #define FW_FEATURE_VIO (1UL<<14) 41 + #define FW_FEATURE_RDMA (1UL<<15) 42 + #define FW_FEATURE_LLAN (1UL<<16) 43 + #define FW_FEATURE_BULK (1UL<<17) 44 + #define FW_FEATURE_XDABR (1UL<<18) 45 + #define FW_FEATURE_MULTITCE (1UL<<19) 46 + #define FW_FEATURE_SPLPAR (1UL<<20) 47 + #define FW_FEATURE_ISERIES (1UL<<21) 48 + 49 + enum { 50 + FW_FEATURE_PSERIES_POSSIBLE = FW_FEATURE_PFT | FW_FEATURE_TCE | 51 + FW_FEATURE_SPRG0 | FW_FEATURE_DABR | FW_FEATURE_COPY | 52 + FW_FEATURE_ASR | FW_FEATURE_DEBUG | FW_FEATURE_TERM | 53 + FW_FEATURE_PERF | FW_FEATURE_DUMP | FW_FEATURE_INTERRUPT | 54 + FW_FEATURE_MIGRATE | FW_FEATURE_PERFMON | FW_FEATURE_CRQ | 55 + FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN | 56 + FW_FEATURE_BULK | FW_FEATURE_XDABR | FW_FEATURE_MULTITCE | 57 + FW_FEATURE_SPLPAR, 58 + FW_FEATURE_PSERIES_ALWAYS = 0, 59 + FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES, 60 + FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES, 61 + FW_FEATURE_POSSIBLE = 62 + #ifdef CONFIG_PPC_PSERIES 63 + FW_FEATURE_PSERIES_POSSIBLE | 64 + #endif 65 + #ifdef CONFIG_PPC_ISERIES 66 + FW_FEATURE_ISERIES_POSSIBLE | 67 + #endif 68 + 0, 69 + FW_FEATURE_ALWAYS = 70 + #ifdef CONFIG_PPC_PSERIES 71 + FW_FEATURE_PSERIES_ALWAYS & 72 + #endif 73 + #ifdef CONFIG_PPC_ISERIES 74 + FW_FEATURE_ISERIES_ALWAYS & 75 + #endif 76 + FW_FEATURE_POSSIBLE, 77 + }; 78 + 79 + /* This is used to identify firmware features which are available 80 + * to the kernel. 81 + */ 82 + extern unsigned long ppc64_firmware_features; 83 + 84 + static inline unsigned long firmware_has_feature(unsigned long feature) 85 + { 86 + return (FW_FEATURE_ALWAYS & feature) || 87 + (FW_FEATURE_POSSIBLE & ppc64_firmware_features & feature); 88 + } 89 + 90 + #ifdef CONFIG_PPC_PSERIES 91 + typedef struct { 92 + unsigned long val; 93 + char * name; 94 + } firmware_feature_t; 95 + 96 + extern firmware_feature_t firmware_features_table[]; 97 + #endif 98 + 99 + #endif /* __ASSEMBLY__ */ 100 + #endif /* __KERNEL__ */ 101 + #endif /* __ASM_PPC_FIRMWARE_H */
+1 -1
include/asm-ppc64/imalloc.h
··· 6 6 */ 7 7 #define PHBS_IO_BASE VMALLOC_END 8 8 #define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */ 9 - #define IMALLOC_END (VMALLOC_START + EADDR_MASK) 9 + #define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE) 10 10 11 11 12 12 /* imalloc region types */
-3
include/asm-ppc64/iommu.h
··· 104 104 105 105 #ifdef CONFIG_PPC_ISERIES 106 106 107 - /* Initializes tables for bio buses */ 108 - extern void __init iommu_vio_init(void); 109 - 110 107 struct iSeries_Device_Node; 111 108 /* Creates table for an individual device node */ 112 109 extern void iommu_devnode_init_iSeries(struct iSeries_Device_Node *dn);
-1
include/asm-ppc64/lmb.h
··· 22 22 23 23 struct lmb_property { 24 24 unsigned long base; 25 - unsigned long physbase; 26 25 unsigned long size; 27 26 }; 28 27
+3
include/asm-ppc64/machdep.h
··· 140 140 141 141 /* Idle loop for this platform, leave empty for default idle loop */ 142 142 int (*idle_loop)(void); 143 + 144 + /* Function to enable pmcs for this platform, called once per cpu. */ 145 + void (*enable_pmcs)(void); 143 146 }; 144 147 145 148 extern int default_idle(void);
+10 -6
include/asm-ppc64/mmu.h
··· 28 28 #define STE_VSID_SHIFT 12 29 29 30 30 /* Location of cpu0's segment table */ 31 - #define STAB0_PAGE 0x9 31 + #define STAB0_PAGE 0x6 32 32 #define STAB0_PHYS_ADDR (STAB0_PAGE<<PAGE_SHIFT) 33 - #define STAB0_VIRT_ADDR (KERNELBASE+STAB0_PHYS_ADDR) 33 + 34 + #ifndef __ASSEMBLY__ 35 + extern char initial_stab[]; 36 + #endif /* ! __ASSEMBLY */ 34 37 35 38 /* 36 39 * SLB ··· 262 259 #define VSID_BITS 36 263 260 #define VSID_MODULUS ((1UL<<VSID_BITS)-1) 264 261 265 - #define CONTEXT_BITS 20 266 - #define USER_ESID_BITS 15 262 + #define CONTEXT_BITS 19 263 + #define USER_ESID_BITS 16 264 + 265 + #define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT)) 267 266 268 267 /* 269 268 * This macro generates asm code to compute the VSID scramble ··· 307 302 typedef struct { 308 303 mm_context_id_t id; 309 304 #ifdef CONFIG_HUGETLB_PAGE 310 - pgd_t *huge_pgdir; 311 - u16 htlb_segs; /* bitmask */ 305 + u16 low_htlb_areas, high_htlb_areas; 312 306 #endif 313 307 } mm_context_t; 314 308
-7
include/asm-ppc64/naca.h
··· 12 12 13 13 #include <asm/types.h> 14 14 15 - #ifndef __ASSEMBLY__ 16 - 17 15 struct naca_struct { 18 16 /* Kernel only data - undefined for user space */ 19 17 void *xItVpdAreas; /* VPD Data 0x00 */ ··· 20 22 }; 21 23 22 24 extern struct naca_struct naca; 23 - 24 - #endif /* __ASSEMBLY__ */ 25 - 26 - #define NACA_PAGE 0x4 27 - #define NACA_PHYS_ADDR (NACA_PAGE<<PAGE_SHIFT) 28 25 29 26 #endif /* _NACA_H */
+32 -23
include/asm-ppc64/page.h
··· 37 37 38 38 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 39 39 40 - /* For 64-bit processes the hugepage range is 1T-1.5T */ 41 - #define TASK_HPAGE_BASE ASM_CONST(0x0000010000000000) 42 - #define TASK_HPAGE_END ASM_CONST(0x0000018000000000) 40 + #define HTLB_AREA_SHIFT 40 41 + #define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT) 42 + #define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT) 43 43 44 44 #define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \ 45 45 - (1U << GET_ESID(addr))) & 0xffff) 46 + #define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \ 47 + - (1U << GET_HTLB_AREA(addr))) & 0xffff) 46 48 47 49 #define ARCH_HAS_HUGEPAGE_ONLY_RANGE 48 50 #define ARCH_HAS_PREPARE_HUGEPAGE_RANGE 51 + #define ARCH_HAS_SETCLEAR_HUGE_PTE 49 52 50 53 #define touches_hugepage_low_range(mm, addr, len) \ 51 - (LOW_ESID_MASK((addr), (len)) & mm->context.htlb_segs) 52 - #define touches_hugepage_high_range(addr, len) \ 53 - (((addr) > (TASK_HPAGE_BASE-(len))) && ((addr) < TASK_HPAGE_END)) 54 + (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas) 55 + #define touches_hugepage_high_range(mm, addr, len) \ 56 + (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas) 54 57 55 58 #define __within_hugepage_low_range(addr, len, segmask) \ 56 59 ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask)) 57 60 #define within_hugepage_low_range(addr, len) \ 58 61 __within_hugepage_low_range((addr), (len), \ 59 - current->mm->context.htlb_segs) 60 - #define within_hugepage_high_range(addr, len) (((addr) >= TASK_HPAGE_BASE) \ 61 - && ((addr)+(len) <= TASK_HPAGE_END) && ((addr)+(len) >= (addr))) 62 + current->mm->context.low_htlb_areas) 63 + #define __within_hugepage_high_range(addr, len, zonemask) \ 64 + ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask)) 65 + #define within_hugepage_high_range(addr, len) \ 66 + __within_hugepage_high_range((addr), (len), \ 67 + current->mm->context.high_htlb_areas) 62 68 63 69 #define is_hugepage_only_range(mm, addr, len) \ 64 - (touches_hugepage_high_range((addr), (len)) || \ 70 + (touches_hugepage_high_range((mm), (addr), (len)) || \ 65 71 touches_hugepage_low_range((mm), (addr), (len))) 66 72 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 67 73 68 74 #define in_hugepage_area(context, addr) \ 69 75 (cpu_has_feature(CPU_FTR_16M_PAGE) && \ 70 - ( (((addr) >= TASK_HPAGE_BASE) && ((addr) < TASK_HPAGE_END)) || \ 76 + ( ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) || \ 71 77 ( ((addr) < 0x100000000L) && \ 72 - ((1 << GET_ESID(addr)) & (context).htlb_segs) ) ) ) 78 + ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) ) ) 73 79 74 80 #else /* !CONFIG_HUGETLB_PAGE */ 75 81 ··· 131 125 * Entries in the pte table are 64b, while entries in the pgd & pmd are 32b. 132 126 */ 133 127 typedef struct { unsigned long pte; } pte_t; 134 - typedef struct { unsigned int pmd; } pmd_t; 135 - typedef struct { unsigned int pgd; } pgd_t; 128 + typedef struct { unsigned long pmd; } pmd_t; 129 + typedef struct { unsigned long pud; } pud_t; 130 + typedef struct { unsigned long pgd; } pgd_t; 136 131 typedef struct { unsigned long pgprot; } pgprot_t; 137 132 138 133 #define pte_val(x) ((x).pte) 139 134 #define pmd_val(x) ((x).pmd) 135 + #define pud_val(x) ((x).pud) 140 136 #define pgd_val(x) ((x).pgd) 141 137 #define pgprot_val(x) ((x).pgprot) 142 138 143 - #define __pte(x) ((pte_t) { (x) } ) 144 - #define __pmd(x) ((pmd_t) { (x) } ) 145 - #define __pgd(x) ((pgd_t) { (x) } ) 146 - #define __pgprot(x) ((pgprot_t) { (x) } ) 139 + #define __pte(x) ((pte_t) { (x) }) 140 + #define __pmd(x) ((pmd_t) { (x) }) 141 + #define __pud(x) ((pud_t) { (x) }) 142 + #define __pgd(x) ((pgd_t) { (x) }) 143 + #define __pgprot(x) ((pgprot_t) { (x) }) 147 144 148 145 #else 149 146 /* 150 147 * .. while these make it easier on the compiler 151 148 */ 152 149 typedef unsigned long pte_t; 153 - typedef unsigned int pmd_t; 154 - typedef unsigned int pgd_t; 150 + typedef unsigned long pmd_t; 151 + typedef unsigned long pud_t; 152 + typedef unsigned long pgd_t; 155 153 typedef unsigned long pgprot_t; 156 154 157 155 #define pte_val(x) (x) 158 156 #define pmd_val(x) (x) 157 + #define pud_val(x) (x) 159 158 #define pgd_val(x) (x) 160 159 #define pgprot_val(x) (x) 161 160 162 161 #define __pte(x) (x) 163 162 #define __pmd(x) (x) 163 + #define __pud(x) (x) 164 164 #define __pgd(x) (x) 165 165 #define __pgprot(x) (x) 166 166 ··· 219 207 #define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT) 220 208 #define USER_REGION_ID (0UL) 221 209 #define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) 222 - 223 - #define __bpn_to_ba(x) ((((unsigned long)(x)) << PAGE_SHIFT) + KERNELBASE) 224 - #define __ba_to_bpn(x) ((((unsigned long)(x)) & ~REGION_MASK) >> PAGE_SHIFT) 225 210 226 211 #define __va(x) ((void *)((unsigned long)(x) + KERNELBASE)) 227 212
+61 -32
include/asm-ppc64/pgalloc.h
··· 6 6 #include <linux/cpumask.h> 7 7 #include <linux/percpu.h> 8 8 9 - extern kmem_cache_t *zero_cache; 9 + extern kmem_cache_t *pgtable_cache[]; 10 + 11 + #define PTE_CACHE_NUM 0 12 + #define PMD_CACHE_NUM 1 13 + #define PUD_CACHE_NUM 1 14 + #define PGD_CACHE_NUM 0 10 15 11 16 /* 12 17 * This program is free software; you can redistribute it and/or ··· 20 15 * 2 of the License, or (at your option) any later version. 21 16 */ 22 17 23 - static inline pgd_t * 24 - pgd_alloc(struct mm_struct *mm) 18 + static inline pgd_t *pgd_alloc(struct mm_struct *mm) 25 19 { 26 - return kmem_cache_alloc(zero_cache, GFP_KERNEL); 20 + return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL); 27 21 } 28 22 29 - static inline void 30 - pgd_free(pgd_t *pgd) 23 + static inline void pgd_free(pgd_t *pgd) 31 24 { 32 - kmem_cache_free(zero_cache, pgd); 25 + kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd); 26 + } 27 + 28 + #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD) 29 + 30 + static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 31 + { 32 + return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM], 33 + GFP_KERNEL|__GFP_REPEAT); 34 + } 35 + 36 + static inline void pud_free(pud_t *pud) 37 + { 38 + kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud); 33 39 } 34 40 35 41 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) 36 42 37 - static inline pmd_t * 38 - pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 43 + static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 39 44 { 40 - return kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT); 45 + return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM], 46 + GFP_KERNEL|__GFP_REPEAT); 41 47 } 42 48 43 - static inline void 44 - pmd_free(pmd_t *pmd) 49 + static inline void pmd_free(pmd_t *pmd) 45 50 { 46 - kmem_cache_free(zero_cache, pmd); 51 + kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd); 47 52 } 48 53 49 54 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte) ··· 62 47 63 48 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 64 49 { 65 - return kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT); 50 + return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM], 51 + GFP_KERNEL|__GFP_REPEAT); 66 52 } 67 53 68 54 static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) 69 55 { 70 - pte_t *pte = kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT); 71 - if (pte) 72 - return virt_to_page(pte); 73 - return NULL; 56 + return virt_to_page(pte_alloc_one_kernel(mm, address)); 74 57 } 75 58 76 59 static inline void pte_free_kernel(pte_t *pte) 77 60 { 78 - kmem_cache_free(zero_cache, pte); 61 + kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte); 79 62 } 80 63 81 64 static inline void pte_free(struct page *ptepage) 82 65 { 83 - kmem_cache_free(zero_cache, page_address(ptepage)); 66 + pte_free_kernel(page_address(ptepage)); 84 67 } 85 68 86 - struct pte_freelist_batch 69 + #define PGF_CACHENUM_MASK 0xf 70 + 71 + typedef struct pgtable_free { 72 + unsigned long val; 73 + } pgtable_free_t; 74 + 75 + static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum, 76 + unsigned long mask) 87 77 { 88 - struct rcu_head rcu; 89 - unsigned int index; 90 - struct page * pages[0]; 91 - }; 78 + BUG_ON(cachenum > PGF_CACHENUM_MASK); 92 79 93 - #define PTE_FREELIST_SIZE ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) / \ 94 - sizeof(struct page *)) 80 + return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum}; 81 + } 95 82 96 - extern void pte_free_now(struct page *ptepage); 97 - extern void pte_free_submit(struct pte_freelist_batch *batch); 83 + static inline void pgtable_free(pgtable_free_t pgf) 84 + { 85 + void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); 86 + int cachenum = pgf.val & PGF_CACHENUM_MASK; 98 87 99 - DECLARE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); 88 + kmem_cache_free(pgtable_cache[cachenum], p); 89 + } 100 90 101 - void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage); 102 - #define __pmd_free_tlb(tlb, pmd) __pte_free_tlb(tlb, virt_to_page(pmd)) 91 + void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); 92 + 93 + #define __pte_free_tlb(tlb, ptepage) \ 94 + pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ 95 + PTE_CACHE_NUM, PTE_TABLE_SIZE-1)) 96 + #define __pmd_free_tlb(tlb, pmd) \ 97 + pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ 98 + PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) 99 + #define __pud_free_tlb(tlb, pmd) \ 100 + pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ 101 + PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) 103 102 104 103 #define check_pgt_cache() do { } while (0) 105 104
+54 -38
include/asm-ppc64/pgtable.h
··· 15 15 #include <asm/tlbflush.h> 16 16 #endif /* __ASSEMBLY__ */ 17 17 18 - #include <asm-generic/pgtable-nopud.h> 19 - 20 18 /* 21 19 * Entries per page directory level. The PTE level must use a 64b record 22 20 * for each page table entry. The PMD and PGD level use a 32b record for 23 21 * each entry by assuming that each entry is page aligned. 24 22 */ 25 23 #define PTE_INDEX_SIZE 9 26 - #define PMD_INDEX_SIZE 10 27 - #define PGD_INDEX_SIZE 10 24 + #define PMD_INDEX_SIZE 7 25 + #define PUD_INDEX_SIZE 7 26 + #define PGD_INDEX_SIZE 9 27 + 28 + #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) 29 + #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) 30 + #define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE) 31 + #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) 28 32 29 33 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) 30 34 #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) 35 + #define PTRS_PER_PUD (1 << PMD_INDEX_SIZE) 31 36 #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) 32 37 33 38 /* PMD_SHIFT determines what a second-level page table entry can map */ ··· 40 35 #define PMD_SIZE (1UL << PMD_SHIFT) 41 36 #define PMD_MASK (~(PMD_SIZE-1)) 42 37 43 - /* PGDIR_SHIFT determines what a third-level page table entry can map */ 44 - #define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) 38 + /* PUD_SHIFT determines what a third-level page table entry can map */ 39 + #define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) 40 + #define PUD_SIZE (1UL << PUD_SHIFT) 41 + #define PUD_MASK (~(PUD_SIZE-1)) 42 + 43 + /* PGDIR_SHIFT determines what a fourth-level page table entry can map */ 44 + #define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE) 45 45 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 46 46 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 47 47 ··· 55 45 /* 56 46 * Size of EA range mapped by our pagetables. 57 47 */ 58 - #define EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ 59 - PGD_INDEX_SIZE + PAGE_SHIFT) 60 - #define EADDR_MASK ((1UL << EADDR_SIZE) - 1) 48 + #define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ 49 + PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) 50 + #define PGTABLE_RANGE (1UL << PGTABLE_EADDR_SIZE) 51 + 52 + #if TASK_SIZE_USER64 > PGTABLE_RANGE 53 + #error TASK_SIZE_USER64 exceeds pagetable range 54 + #endif 55 + 56 + #if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) 57 + #error TASK_SIZE_USER64 exceeds user VSID range 58 + #endif 61 59 62 60 /* 63 61 * Define the address range of the vmalloc VM area. 64 62 */ 65 63 #define VMALLOC_START (0xD000000000000000ul) 66 - #define VMALLOC_SIZE (0x10000000000UL) 64 + #define VMALLOC_SIZE (0x80000000000UL) 67 65 #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) 68 66 69 67 /* ··· 172 154 #ifndef __ASSEMBLY__ 173 155 int hash_huge_page(struct mm_struct *mm, unsigned long access, 174 156 unsigned long ea, unsigned long vsid, int local); 175 - 176 - void hugetlb_mm_free_pgd(struct mm_struct *mm); 177 157 #endif /* __ASSEMBLY__ */ 178 158 179 159 #define HAVE_ARCH_UNMAPPED_AREA ··· 179 163 #else 180 164 181 165 #define hash_huge_page(mm,a,ea,vsid,local) -1 182 - #define hugetlb_mm_free_pgd(mm) do {} while (0) 183 166 184 167 #endif 185 168 ··· 212 197 #define pte_pfn(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT))) 213 198 #define pte_page(x) pfn_to_page(pte_pfn(x)) 214 199 215 - #define pmd_set(pmdp, ptep) \ 216 - (pmd_val(*(pmdp)) = __ba_to_bpn(ptep)) 200 + #define pmd_set(pmdp, ptep) ({BUG_ON((u64)ptep < KERNELBASE); pmd_val(*(pmdp)) = (unsigned long)(ptep);}) 217 201 #define pmd_none(pmd) (!pmd_val(pmd)) 218 202 #define pmd_bad(pmd) (pmd_val(pmd) == 0) 219 203 #define pmd_present(pmd) (pmd_val(pmd) != 0) 220 204 #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) 221 - #define pmd_page_kernel(pmd) (__bpn_to_ba(pmd_val(pmd))) 205 + #define pmd_page_kernel(pmd) (pmd_val(pmd)) 222 206 #define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) 223 207 224 - #define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (__ba_to_bpn(pmdp))) 208 + #define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (unsigned long)(pmdp)) 225 209 #define pud_none(pud) (!pud_val(pud)) 226 - #define pud_bad(pud) ((pud_val(pud)) == 0UL) 227 - #define pud_present(pud) (pud_val(pud) != 0UL) 228 - #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) 229 - #define pud_page(pud) (__bpn_to_ba(pud_val(pud))) 210 + #define pud_bad(pud) ((pud_val(pud)) == 0) 211 + #define pud_present(pud) (pud_val(pud) != 0) 212 + #define pud_clear(pudp) (pud_val(*(pudp)) = 0) 213 + #define pud_page(pud) (pud_val(pud)) 214 + 215 + #define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) 216 + #define pgd_none(pgd) (!pgd_val(pgd)) 217 + #define pgd_bad(pgd) (pgd_val(pgd) == 0) 218 + #define pgd_present(pgd) (pgd_val(pgd) != 0) 219 + #define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0) 220 + #define pgd_page(pgd) (pgd_val(pgd)) 230 221 231 222 /* 232 223 * Find an entry in a page-table-directory. We combine the address region 233 224 * (the high order N bits) and the pgd portion of the address. 234 225 */ 235 226 /* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */ 236 - #define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x7ff) 227 + #define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff) 237 228 238 229 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 239 230 240 - /* Find an entry in the second-level page table.. */ 241 - #define pmd_offset(pudp,addr) \ 242 - ((pmd_t *) pud_page(*(pudp)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) 231 + #define pud_offset(pgdp, addr) \ 232 + (((pud_t *) pgd_page(*(pgdp))) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) 243 233 244 - /* Find an entry in the third-level page table.. */ 234 + #define pmd_offset(pudp,addr) \ 235 + (((pmd_t *) pud_page(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) 236 + 245 237 #define pte_offset_kernel(dir,addr) \ 246 - ((pte_t *) pmd_page_kernel(*(dir)) \ 247 - + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) 238 + (((pte_t *) pmd_page_kernel(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) 248 239 249 240 #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) 250 241 #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) ··· 479 458 #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) 480 459 481 460 #define pmd_ERROR(e) \ 482 - printk("%s:%d: bad pmd %08x.\n", __FILE__, __LINE__, pmd_val(e)) 461 + printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) 462 + #define pud_ERROR(e) \ 463 + printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pud_val(e)) 483 464 #define pgd_ERROR(e) \ 484 - printk("%s:%d: bad pgd %08x.\n", __FILE__, __LINE__, pgd_val(e)) 465 + printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 485 466 486 467 extern pgd_t swapper_pg_dir[]; 487 468 488 469 extern void paging_init(void); 489 470 490 - /* 491 - * Because the huge pgtables are only 2 level, they can take 492 - * at most around 4M, much less than one hugepage which the 493 - * process is presumably entitled to use. So we don't bother 494 - * freeing up the pagetables on unmap, and wait until 495 - * destroy_context() to clean up the lot. 496 - */ 497 471 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ 498 - do { } while (0) 472 + free_pgd_range(tlb, addr, end, floor, ceiling) 499 473 500 474 /* 501 475 * This gets called at the end of handling a page fault, when
+2
include/asm-ppc64/pmc.h
··· 26 26 int reserve_pmc_hardware(perf_irq_t new_perf_irq); 27 27 void release_pmc_hardware(void); 28 28 29 + void power4_enable_pmcs(void); 30 + 29 31 #endif /* _PPC64_PMC_H */
+2 -2
include/asm-ppc64/processor.h
··· 382 382 extern struct task_struct *last_task_used_math; 383 383 extern struct task_struct *last_task_used_altivec; 384 384 385 - /* 64-bit user address space is 41-bits (2TBs user VM) */ 386 - #define TASK_SIZE_USER64 (0x0000020000000000UL) 385 + /* 64-bit user address space is 44-bits (16TB user VM) */ 386 + #define TASK_SIZE_USER64 (0x0000100000000000UL) 387 387 388 388 /* 389 389 * 32-bit user address space is 4GB - 1 page
+9 -5
include/asm-ppc64/prom.h
··· 22 22 #define RELOC(x) (*PTRRELOC(&(x))) 23 23 24 24 /* Definitions used by the flattened device tree */ 25 - #define OF_DT_HEADER 0xd00dfeed /* 4: version, 4: total size */ 26 - #define OF_DT_BEGIN_NODE 0x1 /* Start node: full name */ 25 + #define OF_DT_HEADER 0xd00dfeed /* marker */ 26 + #define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */ 27 27 #define OF_DT_END_NODE 0x2 /* End node */ 28 - #define OF_DT_PROP 0x3 /* Property: name off, size, content */ 28 + #define OF_DT_PROP 0x3 /* Property: name off, size, 29 + * content */ 30 + #define OF_DT_NOP 0x4 /* nop */ 29 31 #define OF_DT_END 0x9 30 32 31 - #define OF_DT_VERSION 1 33 + #define OF_DT_VERSION 0x10 32 34 33 35 /* 34 36 * This is what gets passed to the kernel by prom_init or kexec ··· 56 54 u32 version; /* format version */ 57 55 u32 last_comp_version; /* last compatible version */ 58 56 /* version 2 fields below */ 59 - u32 boot_cpuid_phys; /* Which physical CPU id we're booting on */ 57 + u32 boot_cpuid_phys; /* Physical CPU id we're booting on */ 58 + /* version 3 fields below */ 59 + u32 dt_strings_size; /* size of the DT strings block */ 60 60 }; 61 61 62 62
+3 -1
include/asm-ppc64/system.h
··· 88 88 DEBUGGER_BOILERPLATE(debugger_fault_handler) 89 89 90 90 #ifdef CONFIG_XMON 91 - extern void xmon_init(void); 91 + extern void xmon_init(int enable); 92 92 #endif 93 93 94 94 #else ··· 301 301 #define NET_IP_ALIGN 0 302 302 303 303 #define arch_align_stack(x) (x) 304 + 305 + extern unsigned long reloc_offset(void); 304 306 305 307 #endif /* __KERNEL__ */ 306 308 #endif
+10
include/asm-ppc64/vio.h
··· 56 56 int vio_get_irq(struct vio_dev *dev); 57 57 int vio_enable_interrupts(struct vio_dev *dev); 58 58 int vio_disable_interrupts(struct vio_dev *dev); 59 + extern struct vio_dev * __devinit vio_register_device_common( 60 + struct vio_dev *viodev, char *name, char *type, 61 + uint32_t unit_address, struct iommu_table *iommu_table); 59 62 60 63 extern struct dma_mapping_ops vio_dma_ops; 61 64 ··· 98 95 struct device dev; 99 96 }; 100 97 98 + extern struct vio_dev vio_bus_device; 99 + 101 100 static inline struct vio_dev *to_vio_dev(struct device *dev) 102 101 { 103 102 return container_of(dev, struct vio_dev, dev); 104 103 } 104 + 105 + extern int vio_bus_init(int (*is_match)(const struct vio_device_id *id, 106 + const struct vio_dev *dev), 107 + void (*)(struct vio_dev *), 108 + void (*)(struct device *)); 105 109 106 110 #endif /* _ASM_VIO_H */