Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (53 commits)
serial: Add driver for the Cell Network Processor serial port NWP device
powerpc: enable dynamic ftrace
powerpc/cell: Fix the prototype of create_vma_map()
powerpc/mm: Make clear_fixmap() actually work
powerpc/kdump: Use ppc_save_regs() in crash_setup_regs()
powerpc: Export cacheable_memzero as its now used in a driver
powerpc: Fix missing semicolons in mmu_decl.h
powerpc/pasemi: local_irq_save uses an unsigned long
powerpc/cell: Fix some u64 vs. long types
powerpc/cell: Use correct types in beat files
powerpc: Use correct type in prom_init.c
powerpc: Remove unnecessary casts
mtd/ps3vram: Use _PAGE_NO_CACHE in memory ioremap
mtd/ps3vram: Use msleep in waits
mtd/ps3vram: Use proper kernel types
mtd/ps3vram: Cleanup ps3vram driver messages
mtd/ps3vram: Remove ps3vram debug routines
mtd/ps3vram: Add modalias support to the ps3vram driver
mtd/ps3vram: Add ps3vram driver for accessing video RAM as MTD
powerpc: Fix iseries drivers build failure without CONFIG_VIOPATH
...

+3089 -502
+31 -1
Documentation/powerpc/dts-bindings/fsl/board.txt
··· 18 18 19 19 Required properities: 20 20 - compatible : should be "fsl,fpga-pixis". 21 - - reg : should contain the address and the lenght of the FPPGA register 21 + - reg : should contain the address and the length of the FPPGA register 22 22 set. 23 23 24 24 Example (MPC8610HPCD): ··· 26 26 board-control@e8000000 { 27 27 compatible = "fsl,fpga-pixis"; 28 28 reg = <0xe8000000 32>; 29 + }; 30 + 31 + * Freescale BCSR GPIO banks 32 + 33 + Some BCSR registers act as simple GPIO controllers, each such 34 + register can be represented by the gpio-controller node. 35 + 36 + Required properities: 37 + - compatible : Should be "fsl,<board>-bcsr-gpio". 38 + - reg : Should contain the address and the length of the GPIO bank 39 + register. 40 + - #gpio-cells : Should be two. The first cell is the pin number and the 41 + second cell is used to specify optional paramters (currently unused). 42 + - gpio-controller : Marks the port as GPIO controller. 43 + 44 + Example: 45 + 46 + bcsr@1,0 { 47 + #address-cells = <1>; 48 + #size-cells = <1>; 49 + compatible = "fsl,mpc8360mds-bcsr"; 50 + reg = <1 0 0x8000>; 51 + ranges = <0 1 0 0x8000>; 52 + 53 + bcsr13: gpio-controller@d { 54 + #gpio-cells = <2>; 55 + compatible = "fsl,mpc8360mds-bcsr-gpio"; 56 + reg = <0xd 1>; 57 + gpio-controller; 58 + }; 29 59 };
+6
MAINTAINERS
··· 3489 3489 L: cbe-oss-dev@ozlabs.org 3490 3490 S: Supported 3491 3491 3492 + PS3VRAM DRIVER 3493 + P: Jim Paris 3494 + M: jim@jtan.com 3495 + L: cbe-oss-dev@ozlabs.org 3496 + S: Maintained 3497 + 3492 3498 PVRUSB2 VIDEO4LINUX DRIVER 3493 3499 P: Mike Isely 3494 3500 M: isely@pobox.com
+4 -1
arch/powerpc/Kconfig
··· 108 108 config PPC 109 109 bool 110 110 default y 111 + select HAVE_FTRACE_MCOUNT_RECORD 112 + select HAVE_DYNAMIC_FTRACE 111 113 select HAVE_FUNCTION_TRACER 112 114 select ARCH_WANT_OPTIONAL_GPIOLIB 113 115 select HAVE_IDE ··· 328 326 329 327 config CRASH_DUMP 330 328 bool "Build a kdump crash kernel" 331 - depends on (PPC64 && RELOCATABLE) || 6xx 329 + depends on PPC64 || 6xx 330 + select RELOCATABLE if PPC64 332 331 help 333 332 Build a kernel suitable for use as a kdump capture kernel. 334 333 The same kernel binary can be used as production kernel and dump
+1 -1
arch/powerpc/boot/Makefile
··· 356 356 @rm -f $@; ln $< $@ 357 357 358 358 install: $(CONFIGURE) $(addprefix $(obj)/, $(image-y)) 359 - sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" $< 359 + sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" $^ 360 360 361 361 # anything not in $(targets) 362 362 clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \
+41 -2
arch/powerpc/boot/dts/mpc836x_mds.dts
··· 69 69 }; 70 70 71 71 bcsr@1,0 { 72 + #address-cells = <1>; 73 + #size-cells = <1>; 72 74 compatible = "fsl,mpc8360mds-bcsr"; 73 75 reg = <1 0 0x8000>; 76 + ranges = <0 1 0 0x8000>; 77 + 78 + bcsr13: gpio-controller@d { 79 + #gpio-cells = <2>; 80 + compatible = "fsl,mpc8360mds-bcsr-gpio"; 81 + reg = <0xd 1>; 82 + gpio-controller; 83 + }; 74 84 }; 75 85 }; 76 86 ··· 205 195 }; 206 196 207 197 par_io@1400 { 198 + #address-cells = <1>; 199 + #size-cells = <1>; 208 200 reg = <0x1400 0x100>; 201 + ranges = <0 0x1400 0x100>; 209 202 device_type = "par_io"; 210 203 num-ports = <7>; 204 + 205 + qe_pio_b: gpio-controller@18 { 206 + #gpio-cells = <2>; 207 + compatible = "fsl,mpc8360-qe-pario-bank", 208 + "fsl,mpc8323-qe-pario-bank"; 209 + reg = <0x18 0x18>; 210 + gpio-controller; 211 + }; 211 212 212 213 pio1: ucc_pin@01 { 213 214 pio-map = < ··· 303 282 }; 304 283 }; 305 284 285 + timer@440 { 286 + compatible = "fsl,mpc8360-qe-gtm", 287 + "fsl,qe-gtm", "fsl,gtm"; 288 + reg = <0x440 0x40>; 289 + clock-frequency = <132000000>; 290 + interrupts = <12 13 14 15>; 291 + interrupt-parent = <&qeic>; 292 + }; 293 + 306 294 spi@4c0 { 307 295 cell-index = <0>; 308 296 compatible = "fsl,spi"; ··· 331 301 }; 332 302 333 303 usb@6c0 { 334 - compatible = "qe_udc"; 304 + compatible = "fsl,mpc8360-qe-usb", 305 + "fsl,mpc8323-qe-usb"; 335 306 reg = <0x6c0 0x40 0x8b00 0x100>; 336 307 interrupts = <11>; 337 308 interrupt-parent = <&qeic>; 338 - mode = "slave"; 309 + fsl,fullspeed-clock = "clk21"; 310 + fsl,lowspeed-clock = "brg9"; 311 + gpios = <&qe_pio_b 2 0 /* USBOE */ 312 + &qe_pio_b 3 0 /* USBTP */ 313 + &qe_pio_b 8 0 /* USBTN */ 314 + &qe_pio_b 9 0 /* USBRP */ 315 + &qe_pio_b 11 0 /* USBRN */ 316 + &bcsr13 5 0 /* SPEED */ 317 + &bcsr13 4 1>; /* POWER */ 339 318 }; 340 319 341 320 enet0: ucc@2000 {
+17 -2
arch/powerpc/boot/dts/mpc836x_rdk.dts
··· 218 218 reg = <0x440 0x40>; 219 219 interrupts = <12 13 14 15>; 220 220 interrupt-parent = <&qeic>; 221 - /* filled by u-boot */ 222 - clock-frequency = <0>; 221 + clock-frequency = <166666666>; 222 + }; 223 + 224 + usb@6c0 { 225 + compatible = "fsl,mpc8360-qe-usb", 226 + "fsl,mpc8323-qe-usb"; 227 + reg = <0x6c0 0x40 0x8b00 0x100>; 228 + interrupts = <11>; 229 + interrupt-parent = <&qeic>; 230 + fsl,fullspeed-clock = "clk21"; 231 + gpios = <&qe_pio_b 2 0 /* USBOE */ 232 + &qe_pio_b 3 0 /* USBTP */ 233 + &qe_pio_b 8 0 /* USBTN */ 234 + &qe_pio_b 9 0 /* USBRP */ 235 + &qe_pio_b 11 0 /* USBRN */ 236 + &qe_pio_e 20 0 /* SPEED */ 237 + &qe_pio_e 21 1 /* POWER */>; 223 238 }; 224 239 225 240 spi@4c0 {
+32 -24
arch/powerpc/boot/dts/mpc8641_hpcn.dts
··· 26 26 serial1 = &serial1; 27 27 pci0 = &pci0; 28 28 pci1 = &pci1; 29 - rapidio0 = &rapidio0; 29 + /* 30 + * Only one of Rapid IO or PCI can be present due to HW limitations and 31 + * due to the fact that the 2 now share address space in the new memory 32 + * map. The most likely case is that we have PCI, so comment out the 33 + * rapidio node. Leave it here for reference. 34 + */ 35 + /* rapidio0 = &rapidio0; */ 30 36 }; 31 37 32 38 cpus { ··· 68 62 reg = <0x00000000 0x40000000>; // 1G at 0x0 69 63 }; 70 64 71 - localbus@f8005000 { 65 + localbus@ffe05000 { 72 66 #address-cells = <2>; 73 67 #size-cells = <1>; 74 68 compatible = "fsl,mpc8641-localbus", "simple-bus"; 75 - reg = <0xf8005000 0x1000>; 69 + reg = <0xffe05000 0x1000>; 76 70 interrupts = <19 2>; 77 71 interrupt-parent = <&mpic>; 78 72 79 - ranges = <0 0 0xff800000 0x00800000 80 - 1 0 0xfe000000 0x01000000 81 - 2 0 0xf8200000 0x00100000 82 - 3 0 0xf8100000 0x00100000>; 73 + ranges = <0 0 0xef800000 0x00800000 74 + 2 0 0xffdf8000 0x00008000 75 + 3 0 0xffdf0000 0x00008000>; 83 76 84 77 flash@0,0 { 85 78 compatible = "cfi-flash"; ··· 108 103 }; 109 104 }; 110 105 111 - soc8641@f8000000 { 106 + soc8641@ffe00000 { 112 107 #address-cells = <1>; 113 108 #size-cells = <1>; 114 109 device_type = "soc"; 115 110 compatible = "simple-bus"; 116 - ranges = <0x00000000 0xf8000000 0x00100000>; 117 - reg = <0xf8000000 0x00001000>; // CCSRBAR 111 + ranges = <0x00000000 0xffe00000 0x00100000>; 112 + reg = <0xffe00000 0x00001000>; // CCSRBAR 118 113 bus-frequency = <0>; 119 114 120 115 i2c@3000 { ··· 345 340 }; 346 341 }; 347 342 348 - pci0: pcie@f8008000 { 343 + pci0: pcie@ffe08000 { 349 344 cell-index = <0>; 350 345 compatible = "fsl,mpc8641-pcie"; 351 346 device_type = "pci"; 352 347 #interrupt-cells = <1>; 353 348 #size-cells = <2>; 354 349 #address-cells = <3>; 355 - reg = <0xf8008000 0x1000>; 350 + reg = <0xffe08000 0x1000>; 356 351 bus-range = <0x0 0xff>; 357 352 ranges = <0x02000000 0x0 0x80000000 0x80000000 0x0 0x20000000 358 - 0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00100000>; 353 + 0x01000000 0x0 0x00000000 0xffc00000 0x0 0x00010000>; 359 354 clock-frequency = <33333333>; 360 355 interrupt-parent = <&mpic>; 361 356 interrupts = <24 2>; ··· 486 481 487 482 0x01000000 0x0 0x00000000 488 483 0x01000000 0x0 0x00000000 489 - 0x0 0x00100000>; 484 + 0x0 0x00010000>; 490 485 uli1575@0 { 491 486 reg = <0 0 0 0 0>; 492 487 #size-cells = <2>; ··· 496 491 0x0 0x20000000 497 492 0x01000000 0x0 0x00000000 498 493 0x01000000 0x0 0x00000000 499 - 0x0 0x00100000>; 494 + 0x0 0x00010000>; 500 495 isa@1e { 501 496 device_type = "isa"; 502 497 #interrupt-cells = <2>; ··· 554 549 555 550 }; 556 551 557 - pci1: pcie@f8009000 { 552 + pci1: pcie@ffe09000 { 558 553 cell-index = <1>; 559 554 compatible = "fsl,mpc8641-pcie"; 560 555 device_type = "pci"; 561 556 #interrupt-cells = <1>; 562 557 #size-cells = <2>; 563 558 #address-cells = <3>; 564 - reg = <0xf8009000 0x1000>; 559 + reg = <0xffe09000 0x1000>; 565 560 bus-range = <0 0xff>; 566 561 ranges = <0x02000000 0x0 0xa0000000 0xa0000000 0x0 0x20000000 567 - 0x01000000 0x0 0x00000000 0xe3000000 0x0 0x00100000>; 562 + 0x01000000 0x0 0x00000000 0xffc10000 0x0 0x00010000>; 568 563 clock-frequency = <33333333>; 569 564 interrupt-parent = <&mpic>; 570 565 interrupts = <25 2>; ··· 587 582 588 583 0x01000000 0x0 0x00000000 589 584 0x01000000 0x0 0x00000000 590 - 0x0 0x00100000>; 585 + 0x0 0x00010000>; 591 586 }; 592 587 }; 593 - rapidio0: rapidio@f80c0000 { 588 + /* 589 + rapidio0: rapidio@ffec0000 { 594 590 #address-cells = <2>; 595 591 #size-cells = <2>; 596 592 compatible = "fsl,rapidio-delta"; 597 - reg = <0xf80c0000 0x20000>; 598 - ranges = <0 0 0xc0000000 0 0x20000000>; 593 + reg = <0xffec0000 0x20000>; 594 + ranges = <0 0 0x80000000 0 0x20000000>; 599 595 interrupt-parent = <&mpic>; 600 - /* err_irq bell_outb_irq bell_inb_irq 601 - msg1_tx_irq msg1_rx_irq msg2_tx_irq msg2_rx_irq */ 596 + // err_irq bell_outb_irq bell_inb_irq 597 + // msg1_tx_irq msg1_rx_irq msg2_tx_irq msg2_rx_irq 602 598 interrupts = <48 2 49 2 50 2 53 2 54 2 55 2 56 2>; 603 599 }; 600 + */ 601 + 604 602 };
+13 -1
arch/powerpc/boot/install.sh
··· 15 15 # $2 - kernel image file 16 16 # $3 - kernel map file 17 17 # $4 - default install path (blank if root directory) 18 - # $5 - kernel boot file, the zImage 18 + # $5 and more - kernel boot files; zImage*, uImage, cuImage.*, etc. 19 19 # 20 20 21 21 # User may have a custom install script ··· 38 38 39 39 cat $2 > $4/$image_name 40 40 cp $3 $4/System.map 41 + 42 + # Copy all the bootable image files 43 + path=$4 44 + shift 4 45 + while [ $# -ne 0 ]; do 46 + image_name=`basename $1` 47 + if [ -f $path/$image_name ]; then 48 + mv $path/$image_name $path/$image_name.old 49 + fi 50 + cat $1 > $path/$image_name 51 + shift 52 + done;
+28 -15
arch/powerpc/configs/85xx/mpc8572_ds_defconfig
··· 1 1 # 2 2 # Automatically generated make config: don't edit 3 - # Linux kernel version: 2.6.28-rc3 4 - # Sat Nov 8 12:40:13 2008 3 + # Linux kernel version: 2.6.28-rc8 4 + # Tue Dec 30 11:17:46 2008 5 5 # 6 6 # CONFIG_PPC64 is not set 7 7 ··· 21 21 CONFIG_FSL_EMB_PERFMON=y 22 22 # CONFIG_PHYS_64BIT is not set 23 23 CONFIG_SPE=y 24 + CONFIG_PPC_MMU_NOHASH=y 24 25 # CONFIG_PPC_MM_SLICES is not set 26 + CONFIG_SMP=y 27 + CONFIG_NR_CPUS=2 25 28 CONFIG_PPC32=y 26 29 CONFIG_WORD_SIZE=32 27 30 # CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set ··· 53 50 CONFIG_PPC_OF=y 54 51 CONFIG_OF=y 55 52 CONFIG_PPC_UDBG_16550=y 56 - # CONFIG_GENERIC_TBSYNC is not set 53 + CONFIG_GENERIC_TBSYNC=y 57 54 CONFIG_AUDIT_ARCH=y 58 55 CONFIG_GENERIC_BUG=y 59 56 CONFIG_DEFAULT_UIMAGE=y ··· 65 62 # General setup 66 63 # 67 64 CONFIG_EXPERIMENTAL=y 68 - CONFIG_BROKEN_ON_SMP=y 65 + CONFIG_LOCK_KERNEL=y 69 66 CONFIG_INIT_ENV_ARG_LIMIT=32 70 67 CONFIG_LOCALVERSION="" 71 68 CONFIG_LOCALVERSION_AUTO=y ··· 129 126 CONFIG_HAVE_KPROBES=y 130 127 CONFIG_HAVE_KRETPROBES=y 131 128 CONFIG_HAVE_ARCH_TRACEHOOK=y 129 + CONFIG_USE_GENERIC_SMP_HELPERS=y 132 130 # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 133 131 CONFIG_SLABINFO=y 134 132 CONFIG_RT_MUTEXES=y ··· 142 138 CONFIG_MODVERSIONS=y 143 139 # CONFIG_MODULE_SRCVERSION_ALL is not set 144 140 CONFIG_KMOD=y 141 + CONFIG_STOP_MACHINE=y 145 142 CONFIG_BLOCK=y 146 143 CONFIG_LBD=y 147 144 # CONFIG_BLK_DEV_IO_TRACE is not set ··· 202 197 # CONFIG_CPM2 is not set 203 198 CONFIG_FSL_ULI1575=y 204 199 # CONFIG_MPC8xxx_GPIO is not set 200 + # CONFIG_SIMPLE_GPIO is not set 205 201 206 202 # 207 203 # Kernel options ··· 230 224 CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y 231 225 CONFIG_ARCH_HAS_WALK_MEMORY=y 232 226 CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y 227 + # CONFIG_IRQ_ALL_CPUS is not set 233 228 CONFIG_ARCH_FLATMEM_ENABLE=y 234 229 CONFIG_ARCH_POPULATES_NODE_MAP=y 235 230 CONFIG_SELECT_MEMORY_MODEL=y ··· 248 241 CONFIG_BOUNCE=y 249 242 CONFIG_VIRT_TO_BUS=y 250 243 CONFIG_UNEVICTABLE_LRU=y 244 + CONFIG_PPC_4K_PAGES=y 245 + # CONFIG_PPC_16K_PAGES is not set 246 + # CONFIG_PPC_64K_PAGES is not set 251 247 CONFIG_FORCE_MAX_ZONEORDER=11 252 248 CONFIG_PROC_DEVICETREE=y 253 249 # CONFIG_CMDLINE_BOOL is not set ··· 453 443 # CONFIG_EEPROM_93CX6 is not set 454 444 # CONFIG_SGI_IOC4 is not set 455 445 # CONFIG_TIFM_CORE is not set 446 + # CONFIG_ICS932S401 is not set 456 447 # CONFIG_ENCLOSURE_SERVICES is not set 457 448 # CONFIG_HP_ILO is not set 449 + # CONFIG_C2PORT is not set 458 450 CONFIG_HAVE_IDE=y 459 451 # CONFIG_IDE is not set 460 452 ··· 796 784 CONFIG_UNIX98_PTYS=y 797 785 CONFIG_LEGACY_PTYS=y 798 786 CONFIG_LEGACY_PTY_COUNT=256 787 + # CONFIG_HVC_UDBG is not set 799 788 # CONFIG_IPMI_HANDLER is not set 800 789 CONFIG_HW_RANDOM=y 801 790 CONFIG_NVRAM=y ··· 882 869 # CONFIG_THERMAL is not set 883 870 # CONFIG_THERMAL_HWMON is not set 884 871 # CONFIG_WATCHDOG is not set 872 + CONFIG_SSB_POSSIBLE=y 885 873 886 874 # 887 875 # Sonics Silicon Backplane 888 876 # 889 - CONFIG_SSB_POSSIBLE=y 890 877 # CONFIG_SSB is not set 891 878 892 879 # ··· 899 886 # CONFIG_PMIC_DA903X is not set 900 887 # CONFIG_MFD_WM8400 is not set 901 888 # CONFIG_MFD_WM8350_I2C is not set 902 - 903 - # 904 - # Voltage and Current regulators 905 - # 906 889 # CONFIG_REGULATOR is not set 907 - # CONFIG_REGULATOR_FIXED_VOLTAGE is not set 908 - # CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set 909 - # CONFIG_REGULATOR_BQ24022 is not set 910 890 911 891 # 912 892 # Multimedia devices ··· 1258 1252 # CONFIG_USB_TMC is not set 1259 1253 1260 1254 # 1261 - # NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' 1255 + # NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; 1262 1256 # 1263 1257 1264 1258 # 1265 - # may also be needed; see USB_STORAGE Help for more information 1259 + # see USB_STORAGE Help for more information 1266 1260 # 1267 1261 CONFIG_USB_STORAGE=y 1268 1262 # CONFIG_USB_STORAGE_DEBUG is not set ··· 1354 1348 # CONFIG_RTC_DRV_M41T80 is not set 1355 1349 # CONFIG_RTC_DRV_S35390A is not set 1356 1350 # CONFIG_RTC_DRV_FM3130 is not set 1351 + # CONFIG_RTC_DRV_RX8581 is not set 1357 1352 1358 1353 # 1359 1354 # SPI RTC drivers ··· 1631 1624 # CONFIG_SAMPLES is not set 1632 1625 CONFIG_HAVE_ARCH_KGDB=y 1633 1626 # CONFIG_KGDB is not set 1627 + CONFIG_PRINT_STACK_DEPTH=64 1634 1628 # CONFIG_DEBUG_STACKOVERFLOW is not set 1635 1629 # CONFIG_DEBUG_STACK_USAGE is not set 1636 1630 # CONFIG_DEBUG_PAGEALLOC is not set ··· 1657 1649 # 1658 1650 # CONFIG_CRYPTO_FIPS is not set 1659 1651 CONFIG_CRYPTO_ALGAPI=y 1652 + CONFIG_CRYPTO_ALGAPI2=y 1660 1653 CONFIG_CRYPTO_AEAD=y 1654 + CONFIG_CRYPTO_AEAD2=y 1661 1655 CONFIG_CRYPTO_BLKCIPHER=y 1656 + CONFIG_CRYPTO_BLKCIPHER2=y 1662 1657 CONFIG_CRYPTO_HASH=y 1663 - CONFIG_CRYPTO_RNG=y 1658 + CONFIG_CRYPTO_HASH2=y 1659 + CONFIG_CRYPTO_RNG2=y 1664 1660 CONFIG_CRYPTO_MANAGER=y 1661 + CONFIG_CRYPTO_MANAGER2=y 1665 1662 # CONFIG_CRYPTO_GF128MUL is not set 1666 1663 # CONFIG_CRYPTO_NULL is not set 1667 1664 # CONFIG_CRYPTO_CRYPTD is not set
+2
arch/powerpc/include/asm/ioctls.h
··· 89 89 #define TIOCSBRK 0x5427 /* BSD compatibility */ 90 90 #define TIOCCBRK 0x5428 /* BSD compatibility */ 91 91 #define TIOCGSID 0x5429 /* Return the session ID of FD */ 92 + #define TIOCGRS485 0x542e 93 + #define TIOCSRS485 0x542f 92 94 #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ 93 95 #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ 94 96
-55
arch/powerpc/include/asm/kexec.h
··· 48 48 { 49 49 if (oldregs) 50 50 memcpy(newregs, oldregs, sizeof(*newregs)); 51 - #ifdef __powerpc64__ 52 - else { 53 - /* FIXME Merge this with xmon_save_regs ?? */ 54 - unsigned long tmp1, tmp2; 55 - __asm__ __volatile__ ( 56 - "std 0,0(%2)\n" 57 - "std 1,8(%2)\n" 58 - "std 2,16(%2)\n" 59 - "std 3,24(%2)\n" 60 - "std 4,32(%2)\n" 61 - "std 5,40(%2)\n" 62 - "std 6,48(%2)\n" 63 - "std 7,56(%2)\n" 64 - "std 8,64(%2)\n" 65 - "std 9,72(%2)\n" 66 - "std 10,80(%2)\n" 67 - "std 11,88(%2)\n" 68 - "std 12,96(%2)\n" 69 - "std 13,104(%2)\n" 70 - "std 14,112(%2)\n" 71 - "std 15,120(%2)\n" 72 - "std 16,128(%2)\n" 73 - "std 17,136(%2)\n" 74 - "std 18,144(%2)\n" 75 - "std 19,152(%2)\n" 76 - "std 20,160(%2)\n" 77 - "std 21,168(%2)\n" 78 - "std 22,176(%2)\n" 79 - "std 23,184(%2)\n" 80 - "std 24,192(%2)\n" 81 - "std 25,200(%2)\n" 82 - "std 26,208(%2)\n" 83 - "std 27,216(%2)\n" 84 - "std 28,224(%2)\n" 85 - "std 29,232(%2)\n" 86 - "std 30,240(%2)\n" 87 - "std 31,248(%2)\n" 88 - "mfmsr %0\n" 89 - "std %0, 264(%2)\n" 90 - "mfctr %0\n" 91 - "std %0, 280(%2)\n" 92 - "mflr %0\n" 93 - "std %0, 288(%2)\n" 94 - "bl 1f\n" 95 - "1: mflr %1\n" 96 - "std %1, 256(%2)\n" 97 - "mtlr %0\n" 98 - "mfxer %0\n" 99 - "std %0, 296(%2)\n" 100 - : "=&r" (tmp1), "=&r" (tmp2) 101 - : "b" (newregs) 102 - : "memory"); 103 - } 104 - #else 105 51 else 106 52 ppc_save_regs(newregs); 107 - #endif /* __powerpc64__ */ 108 53 } 109 54 110 55 extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
+2
arch/powerpc/include/asm/ps3.h
··· 320 320 321 321 enum ps3_match_sub_id { 322 322 PS3_MATCH_SUB_ID_GPU_FB = 1, 323 + PS3_MATCH_SUB_ID_GPU_RAMDISK = 2, 323 324 }; 324 325 325 326 #define PS3_MODULE_ALIAS_EHCI "ps3:1:0" ··· 333 332 #define PS3_MODULE_ALIAS_STOR_FLASH "ps3:8:0" 334 333 #define PS3_MODULE_ALIAS_SOUND "ps3:9:0" 335 334 #define PS3_MODULE_ALIAS_GPU_FB "ps3:10:1" 335 + #define PS3_MODULE_ALIAS_GPU_RAMDISK "ps3:10:2" 336 336 #define PS3_MODULE_ALIAS_LPM "ps3:11:0" 337 337 338 338 enum ps3_system_bus_device_type {
+35 -2
arch/powerpc/include/asm/qe.h
··· 17 17 #ifdef __KERNEL__ 18 18 19 19 #include <linux/spinlock.h> 20 + #include <linux/errno.h> 21 + #include <linux/err.h> 20 22 #include <asm/cpm.h> 21 23 #include <asm/immap_qe.h> 22 24 ··· 86 84 extern spinlock_t cmxgcr_lock; 87 85 88 86 /* Export QE common operations */ 87 + #ifdef CONFIG_QUICC_ENGINE 89 88 extern void __init qe_reset(void); 89 + #else 90 + static inline void qe_reset(void) {} 91 + #endif 90 92 91 93 /* QE PIO */ 92 94 #define QE_PIO_PINS 32 ··· 107 101 #endif 108 102 }; 109 103 110 - extern int par_io_init(struct device_node *np); 111 - extern int par_io_of_config(struct device_node *np); 112 104 #define QE_PIO_DIR_IN 2 113 105 #define QE_PIO_DIR_OUT 1 114 106 extern void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, 115 107 int dir, int open_drain, int assignment, 116 108 int has_irq); 109 + #ifdef CONFIG_QUICC_ENGINE 110 + extern int par_io_init(struct device_node *np); 111 + extern int par_io_of_config(struct device_node *np); 117 112 extern int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain, 118 113 int assignment, int has_irq); 119 114 extern int par_io_data_set(u8 port, u8 pin, u8 val); 115 + #else 116 + static inline int par_io_init(struct device_node *np) { return -ENOSYS; } 117 + static inline int par_io_of_config(struct device_node *np) { return -ENOSYS; } 118 + static inline int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain, 119 + int assignment, int has_irq) { return -ENOSYS; } 120 + static inline int par_io_data_set(u8 port, u8 pin, u8 val) { return -ENOSYS; } 121 + #endif /* CONFIG_QUICC_ENGINE */ 122 + 123 + /* 124 + * Pin multiplexing functions. 125 + */ 126 + struct qe_pin; 127 + #ifdef CONFIG_QE_GPIO 128 + extern struct qe_pin *qe_pin_request(struct device_node *np, int index); 129 + extern void qe_pin_free(struct qe_pin *qe_pin); 130 + extern void qe_pin_set_gpio(struct qe_pin *qe_pin); 131 + extern void qe_pin_set_dedicated(struct qe_pin *pin); 132 + #else 133 + static inline struct qe_pin *qe_pin_request(struct device_node *np, int index) 134 + { 135 + return ERR_PTR(-ENOSYS); 136 + } 137 + static inline void qe_pin_free(struct qe_pin *qe_pin) {} 138 + static inline void qe_pin_set_gpio(struct qe_pin *qe_pin) {} 139 + static inline void qe_pin_set_dedicated(struct qe_pin *pin) {} 140 + #endif /* CONFIG_QE_GPIO */ 120 141 121 142 /* QE internal API */ 122 143 int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input);
+17 -4
arch/powerpc/include/asm/qe_ic.h
··· 17 17 18 18 #include <linux/irq.h> 19 19 20 + struct device_node; 21 + struct qe_ic; 22 + 20 23 #define NUM_OF_QE_IC_GROUPS 6 21 24 22 25 /* Flags when we init the QE IC */ ··· 57 54 QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */ 58 55 }; 59 56 57 + #ifdef CONFIG_QUICC_ENGINE 60 58 void qe_ic_init(struct device_node *node, unsigned int flags, 61 59 void (*low_handler)(unsigned int irq, struct irq_desc *desc), 62 60 void (*high_handler)(unsigned int irq, struct irq_desc *desc)); 61 + unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic); 62 + unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic); 63 + #else 64 + static inline void qe_ic_init(struct device_node *node, unsigned int flags, 65 + void (*low_handler)(unsigned int irq, struct irq_desc *desc), 66 + void (*high_handler)(unsigned int irq, struct irq_desc *desc)) 67 + {} 68 + static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) 69 + { return 0; } 70 + static inline unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic) 71 + { return 0; } 72 + #endif /* CONFIG_QUICC_ENGINE */ 73 + 63 74 void qe_ic_set_highest_priority(unsigned int virq, int high); 64 75 int qe_ic_set_priority(unsigned int virq, unsigned int priority); 65 76 int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high); 66 - 67 - struct qe_ic; 68 - unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic); 69 - unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic); 70 77 71 78 static inline void qe_ic_cascade_low_ipic(unsigned int irq, 72 79 struct irq_desc *desc)
+1 -1
arch/powerpc/include/asm/spu.h
··· 128 128 int number; 129 129 unsigned int irqs[3]; 130 130 u32 node; 131 - u64 flags; 131 + unsigned long flags; 132 132 u64 class_0_pending; 133 133 u64 class_0_dar; 134 134 u64 class_1_dar;
+1 -1
arch/powerpc/kernel/Makefile
··· 29 29 obj-y := cputable.o ptrace.o syscalls.o \ 30 30 irq.o align.o signal_32.o pmc.o vdso.o \ 31 31 init_task.o process.o systbl.o idle.o \ 32 - signal.o sysfs.o 32 + signal.o sysfs.o cacheinfo.o 33 33 obj-y += vdso32/ 34 34 obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ 35 35 signal_64.o ptrace32.o \
+837
arch/powerpc/kernel/cacheinfo.c
··· 1 + /* 2 + * Processor cache information made available to userspace via sysfs; 3 + * intended to be compatible with x86 intel_cacheinfo implementation. 4 + * 5 + * Copyright 2008 IBM Corporation 6 + * Author: Nathan Lynch 7 + * 8 + * This program is free software; you can redistribute it and/or 9 + * modify it under the terms of the GNU General Public License version 10 + * 2 as published by the Free Software Foundation. 11 + */ 12 + 13 + #include <linux/cpu.h> 14 + #include <linux/cpumask.h> 15 + #include <linux/init.h> 16 + #include <linux/kernel.h> 17 + #include <linux/kobject.h> 18 + #include <linux/list.h> 19 + #include <linux/notifier.h> 20 + #include <linux/of.h> 21 + #include <linux/percpu.h> 22 + #include <asm/prom.h> 23 + 24 + #include "cacheinfo.h" 25 + 26 + /* per-cpu object for tracking: 27 + * - a "cache" kobject for the top-level directory 28 + * - a list of "index" objects representing the cpu's local cache hierarchy 29 + */ 30 + struct cache_dir { 31 + struct kobject *kobj; /* bare (not embedded) kobject for cache 32 + * directory */ 33 + struct cache_index_dir *index; /* list of index objects */ 34 + }; 35 + 36 + /* "index" object: each cpu's cache directory has an index 37 + * subdirectory corresponding to a cache object associated with the 38 + * cpu. This object's lifetime is managed via the embedded kobject. 39 + */ 40 + struct cache_index_dir { 41 + struct kobject kobj; 42 + struct cache_index_dir *next; /* next index in parent directory */ 43 + struct cache *cache; 44 + }; 45 + 46 + /* Template for determining which OF properties to query for a given 47 + * cache type */ 48 + struct cache_type_info { 49 + const char *name; 50 + const char *size_prop; 51 + 52 + /* Allow for both [di]-cache-line-size and 53 + * [di]-cache-block-size properties. According to the PowerPC 54 + * Processor binding, -line-size should be provided if it 55 + * differs from the cache block size (that which is operated 56 + * on by cache instructions), so we look for -line-size first. 57 + * See cache_get_line_size(). */ 58 + 59 + const char *line_size_props[2]; 60 + const char *nr_sets_prop; 61 + }; 62 + 63 + /* These are used to index the cache_type_info array. */ 64 + #define CACHE_TYPE_UNIFIED 0 65 + #define CACHE_TYPE_INSTRUCTION 1 66 + #define CACHE_TYPE_DATA 2 67 + 68 + static const struct cache_type_info cache_type_info[] = { 69 + { 70 + /* PowerPC Processor binding says the [di]-cache-* 71 + * must be equal on unified caches, so just use 72 + * d-cache properties. */ 73 + .name = "Unified", 74 + .size_prop = "d-cache-size", 75 + .line_size_props = { "d-cache-line-size", 76 + "d-cache-block-size", }, 77 + .nr_sets_prop = "d-cache-sets", 78 + }, 79 + { 80 + .name = "Instruction", 81 + .size_prop = "i-cache-size", 82 + .line_size_props = { "i-cache-line-size", 83 + "i-cache-block-size", }, 84 + .nr_sets_prop = "i-cache-sets", 85 + }, 86 + { 87 + .name = "Data", 88 + .size_prop = "d-cache-size", 89 + .line_size_props = { "d-cache-line-size", 90 + "d-cache-block-size", }, 91 + .nr_sets_prop = "d-cache-sets", 92 + }, 93 + }; 94 + 95 + /* Cache object: each instance of this corresponds to a distinct cache 96 + * in the system. There are separate objects for Harvard caches: one 97 + * each for instruction and data, and each refers to the same OF node. 98 + * The refcount of the OF node is elevated for the lifetime of the 99 + * cache object. A cache object is released when its shared_cpu_map 100 + * is cleared (see cache_cpu_clear). 101 + * 102 + * A cache object is on two lists: an unsorted global list 103 + * (cache_list) of cache objects; and a singly-linked list 104 + * representing the local cache hierarchy, which is ordered by level 105 + * (e.g. L1d -> L1i -> L2 -> L3). 106 + */ 107 + struct cache { 108 + struct device_node *ofnode; /* OF node for this cache, may be cpu */ 109 + struct cpumask shared_cpu_map; /* online CPUs using this cache */ 110 + int type; /* split cache disambiguation */ 111 + int level; /* level not explicit in device tree */ 112 + struct list_head list; /* global list of cache objects */ 113 + struct cache *next_local; /* next cache of >= level */ 114 + }; 115 + 116 + static DEFINE_PER_CPU(struct cache_dir *, cache_dir); 117 + 118 + /* traversal/modification of this list occurs only at cpu hotplug time; 119 + * access is serialized by cpu hotplug locking 120 + */ 121 + static LIST_HEAD(cache_list); 122 + 123 + static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k) 124 + { 125 + return container_of(k, struct cache_index_dir, kobj); 126 + } 127 + 128 + static const char *cache_type_string(const struct cache *cache) 129 + { 130 + return cache_type_info[cache->type].name; 131 + } 132 + 133 + static void __cpuinit cache_init(struct cache *cache, int type, int level, struct device_node *ofnode) 134 + { 135 + cache->type = type; 136 + cache->level = level; 137 + cache->ofnode = of_node_get(ofnode); 138 + INIT_LIST_HEAD(&cache->list); 139 + list_add(&cache->list, &cache_list); 140 + } 141 + 142 + static struct cache *__cpuinit new_cache(int type, int level, struct device_node *ofnode) 143 + { 144 + struct cache *cache; 145 + 146 + cache = kzalloc(sizeof(*cache), GFP_KERNEL); 147 + if (cache) 148 + cache_init(cache, type, level, ofnode); 149 + 150 + return cache; 151 + } 152 + 153 + static void release_cache_debugcheck(struct cache *cache) 154 + { 155 + struct cache *iter; 156 + 157 + list_for_each_entry(iter, &cache_list, list) 158 + WARN_ONCE(iter->next_local == cache, 159 + "cache for %s(%s) refers to cache for %s(%s)\n", 160 + iter->ofnode->full_name, 161 + cache_type_string(iter), 162 + cache->ofnode->full_name, 163 + cache_type_string(cache)); 164 + } 165 + 166 + static void release_cache(struct cache *cache) 167 + { 168 + if (!cache) 169 + return; 170 + 171 + pr_debug("freeing L%d %s cache for %s\n", cache->level, 172 + cache_type_string(cache), cache->ofnode->full_name); 173 + 174 + release_cache_debugcheck(cache); 175 + list_del(&cache->list); 176 + of_node_put(cache->ofnode); 177 + kfree(cache); 178 + } 179 + 180 + static void cache_cpu_set(struct cache *cache, int cpu) 181 + { 182 + struct cache *next = cache; 183 + 184 + while (next) { 185 + WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map), 186 + "CPU %i already accounted in %s(%s)\n", 187 + cpu, next->ofnode->full_name, 188 + cache_type_string(next)); 189 + cpumask_set_cpu(cpu, &next->shared_cpu_map); 190 + next = next->next_local; 191 + } 192 + } 193 + 194 + static int cache_size(const struct cache *cache, unsigned int *ret) 195 + { 196 + const char *propname; 197 + const u32 *cache_size; 198 + 199 + propname = cache_type_info[cache->type].size_prop; 200 + 201 + cache_size = of_get_property(cache->ofnode, propname, NULL); 202 + if (!cache_size) 203 + return -ENODEV; 204 + 205 + *ret = *cache_size; 206 + return 0; 207 + } 208 + 209 + static int cache_size_kb(const struct cache *cache, unsigned int *ret) 210 + { 211 + unsigned int size; 212 + 213 + if (cache_size(cache, &size)) 214 + return -ENODEV; 215 + 216 + *ret = size / 1024; 217 + return 0; 218 + } 219 + 220 + /* not cache_line_size() because that's a macro in include/linux/cache.h */ 221 + static int cache_get_line_size(const struct cache *cache, unsigned int *ret) 222 + { 223 + const u32 *line_size; 224 + int i, lim; 225 + 226 + lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props); 227 + 228 + for (i = 0; i < lim; i++) { 229 + const char *propname; 230 + 231 + propname = cache_type_info[cache->type].line_size_props[i]; 232 + line_size = of_get_property(cache->ofnode, propname, NULL); 233 + if (line_size) 234 + break; 235 + } 236 + 237 + if (!line_size) 238 + return -ENODEV; 239 + 240 + *ret = *line_size; 241 + return 0; 242 + } 243 + 244 + static int cache_nr_sets(const struct cache *cache, unsigned int *ret) 245 + { 246 + const char *propname; 247 + const u32 *nr_sets; 248 + 249 + propname = cache_type_info[cache->type].nr_sets_prop; 250 + 251 + nr_sets = of_get_property(cache->ofnode, propname, NULL); 252 + if (!nr_sets) 253 + return -ENODEV; 254 + 255 + *ret = *nr_sets; 256 + return 0; 257 + } 258 + 259 + static int cache_associativity(const struct cache *cache, unsigned int *ret) 260 + { 261 + unsigned int line_size; 262 + unsigned int nr_sets; 263 + unsigned int size; 264 + 265 + if (cache_nr_sets(cache, &nr_sets)) 266 + goto err; 267 + 268 + /* If the cache is fully associative, there is no need to 269 + * check the other properties. 270 + */ 271 + if (nr_sets == 1) { 272 + *ret = 0; 273 + return 0; 274 + } 275 + 276 + if (cache_get_line_size(cache, &line_size)) 277 + goto err; 278 + if (cache_size(cache, &size)) 279 + goto err; 280 + 281 + if (!(nr_sets > 0 && size > 0 && line_size > 0)) 282 + goto err; 283 + 284 + *ret = (size / nr_sets) / line_size; 285 + return 0; 286 + err: 287 + return -ENODEV; 288 + } 289 + 290 + /* helper for dealing with split caches */ 291 + static struct cache *cache_find_first_sibling(struct cache *cache) 292 + { 293 + struct cache *iter; 294 + 295 + if (cache->type == CACHE_TYPE_UNIFIED) 296 + return cache; 297 + 298 + list_for_each_entry(iter, &cache_list, list) 299 + if (iter->ofnode == cache->ofnode && iter->next_local == cache) 300 + return iter; 301 + 302 + return cache; 303 + } 304 + 305 + /* return the first cache on a local list matching node */ 306 + static struct cache *cache_lookup_by_node(const struct device_node *node) 307 + { 308 + struct cache *cache = NULL; 309 + struct cache *iter; 310 + 311 + list_for_each_entry(iter, &cache_list, list) { 312 + if (iter->ofnode != node) 313 + continue; 314 + cache = cache_find_first_sibling(iter); 315 + break; 316 + } 317 + 318 + return cache; 319 + } 320 + 321 + static bool cache_node_is_unified(const struct device_node *np) 322 + { 323 + return of_get_property(np, "cache-unified", NULL); 324 + } 325 + 326 + static struct cache *__cpuinit cache_do_one_devnode_unified(struct device_node *node, int level) 327 + { 328 + struct cache *cache; 329 + 330 + pr_debug("creating L%d ucache for %s\n", level, node->full_name); 331 + 332 + cache = new_cache(CACHE_TYPE_UNIFIED, level, node); 333 + 334 + return cache; 335 + } 336 + 337 + static struct cache *__cpuinit cache_do_one_devnode_split(struct device_node *node, int level) 338 + { 339 + struct cache *dcache, *icache; 340 + 341 + pr_debug("creating L%d dcache and icache for %s\n", level, 342 + node->full_name); 343 + 344 + dcache = new_cache(CACHE_TYPE_DATA, level, node); 345 + icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node); 346 + 347 + if (!dcache || !icache) 348 + goto err; 349 + 350 + dcache->next_local = icache; 351 + 352 + return dcache; 353 + err: 354 + release_cache(dcache); 355 + release_cache(icache); 356 + return NULL; 357 + } 358 + 359 + static struct cache *__cpuinit cache_do_one_devnode(struct device_node *node, int level) 360 + { 361 + struct cache *cache; 362 + 363 + if (cache_node_is_unified(node)) 364 + cache = cache_do_one_devnode_unified(node, level); 365 + else 366 + cache = cache_do_one_devnode_split(node, level); 367 + 368 + return cache; 369 + } 370 + 371 + static struct cache *__cpuinit cache_lookup_or_instantiate(struct device_node *node, int level) 372 + { 373 + struct cache *cache; 374 + 375 + cache = cache_lookup_by_node(node); 376 + 377 + WARN_ONCE(cache && cache->level != level, 378 + "cache level mismatch on lookup (got %d, expected %d)\n", 379 + cache->level, level); 380 + 381 + if (!cache) 382 + cache = cache_do_one_devnode(node, level); 383 + 384 + return cache; 385 + } 386 + 387 + static void __cpuinit link_cache_lists(struct cache *smaller, struct cache *bigger) 388 + { 389 + while (smaller->next_local) { 390 + if (smaller->next_local == bigger) 391 + return; /* already linked */ 392 + smaller = smaller->next_local; 393 + } 394 + 395 + smaller->next_local = bigger; 396 + } 397 + 398 + static void __cpuinit do_subsidiary_caches_debugcheck(struct cache *cache) 399 + { 400 + WARN_ON_ONCE(cache->level != 1); 401 + WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu")); 402 + } 403 + 404 + static void __cpuinit do_subsidiary_caches(struct cache *cache) 405 + { 406 + struct device_node *subcache_node; 407 + int level = cache->level; 408 + 409 + do_subsidiary_caches_debugcheck(cache); 410 + 411 + while ((subcache_node = of_find_next_cache_node(cache->ofnode))) { 412 + struct cache *subcache; 413 + 414 + level++; 415 + subcache = cache_lookup_or_instantiate(subcache_node, level); 416 + of_node_put(subcache_node); 417 + if (!subcache) 418 + break; 419 + 420 + link_cache_lists(cache, subcache); 421 + cache = subcache; 422 + } 423 + } 424 + 425 + static struct cache *__cpuinit cache_chain_instantiate(unsigned int cpu_id) 426 + { 427 + struct device_node *cpu_node; 428 + struct cache *cpu_cache = NULL; 429 + 430 + pr_debug("creating cache object(s) for CPU %i\n", cpu_id); 431 + 432 + cpu_node = of_get_cpu_node(cpu_id, NULL); 433 + WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id); 434 + if (!cpu_node) 435 + goto out; 436 + 437 + cpu_cache = cache_lookup_or_instantiate(cpu_node, 1); 438 + if (!cpu_cache) 439 + goto out; 440 + 441 + do_subsidiary_caches(cpu_cache); 442 + 443 + cache_cpu_set(cpu_cache, cpu_id); 444 + out: 445 + of_node_put(cpu_node); 446 + 447 + return cpu_cache; 448 + } 449 + 450 + static struct cache_dir *__cpuinit cacheinfo_create_cache_dir(unsigned int cpu_id) 451 + { 452 + struct cache_dir *cache_dir; 453 + struct sys_device *sysdev; 454 + struct kobject *kobj = NULL; 455 + 456 + sysdev = get_cpu_sysdev(cpu_id); 457 + WARN_ONCE(!sysdev, "no sysdev for CPU %i\n", cpu_id); 458 + if (!sysdev) 459 + goto err; 460 + 461 + kobj = kobject_create_and_add("cache", &sysdev->kobj); 462 + if (!kobj) 463 + goto err; 464 + 465 + cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL); 466 + if (!cache_dir) 467 + goto err; 468 + 469 + cache_dir->kobj = kobj; 470 + 471 + WARN_ON_ONCE(per_cpu(cache_dir, cpu_id) != NULL); 472 + 473 + per_cpu(cache_dir, cpu_id) = cache_dir; 474 + 475 + return cache_dir; 476 + err: 477 + kobject_put(kobj); 478 + return NULL; 479 + } 480 + 481 + static void cache_index_release(struct kobject *kobj) 482 + { 483 + struct cache_index_dir *index; 484 + 485 + index = kobj_to_cache_index_dir(kobj); 486 + 487 + pr_debug("freeing index directory for L%d %s cache\n", 488 + index->cache->level, cache_type_string(index->cache)); 489 + 490 + kfree(index); 491 + } 492 + 493 + static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf) 494 + { 495 + struct kobj_attribute *kobj_attr; 496 + 497 + kobj_attr = container_of(attr, struct kobj_attribute, attr); 498 + 499 + return kobj_attr->show(k, kobj_attr, buf); 500 + } 501 + 502 + static struct cache *index_kobj_to_cache(struct kobject *k) 503 + { 504 + struct cache_index_dir *index; 505 + 506 + index = kobj_to_cache_index_dir(k); 507 + 508 + return index->cache; 509 + } 510 + 511 + static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 512 + { 513 + unsigned int size_kb; 514 + struct cache *cache; 515 + 516 + cache = index_kobj_to_cache(k); 517 + 518 + if (cache_size_kb(cache, &size_kb)) 519 + return -ENODEV; 520 + 521 + return sprintf(buf, "%uK\n", size_kb); 522 + } 523 + 524 + static struct kobj_attribute cache_size_attr = 525 + __ATTR(size, 0444, size_show, NULL); 526 + 527 + 528 + static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 529 + { 530 + unsigned int line_size; 531 + struct cache *cache; 532 + 533 + cache = index_kobj_to_cache(k); 534 + 535 + if (cache_get_line_size(cache, &line_size)) 536 + return -ENODEV; 537 + 538 + return sprintf(buf, "%u\n", line_size); 539 + } 540 + 541 + static struct kobj_attribute cache_line_size_attr = 542 + __ATTR(coherency_line_size, 0444, line_size_show, NULL); 543 + 544 + static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 545 + { 546 + unsigned int nr_sets; 547 + struct cache *cache; 548 + 549 + cache = index_kobj_to_cache(k); 550 + 551 + if (cache_nr_sets(cache, &nr_sets)) 552 + return -ENODEV; 553 + 554 + return sprintf(buf, "%u\n", nr_sets); 555 + } 556 + 557 + static struct kobj_attribute cache_nr_sets_attr = 558 + __ATTR(number_of_sets, 0444, nr_sets_show, NULL); 559 + 560 + static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 561 + { 562 + unsigned int associativity; 563 + struct cache *cache; 564 + 565 + cache = index_kobj_to_cache(k); 566 + 567 + if (cache_associativity(cache, &associativity)) 568 + return -ENODEV; 569 + 570 + return sprintf(buf, "%u\n", associativity); 571 + } 572 + 573 + static struct kobj_attribute cache_assoc_attr = 574 + __ATTR(ways_of_associativity, 0444, associativity_show, NULL); 575 + 576 + static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 577 + { 578 + struct cache *cache; 579 + 580 + cache = index_kobj_to_cache(k); 581 + 582 + return sprintf(buf, "%s\n", cache_type_string(cache)); 583 + } 584 + 585 + static struct kobj_attribute cache_type_attr = 586 + __ATTR(type, 0444, type_show, NULL); 587 + 588 + static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 589 + { 590 + struct cache_index_dir *index; 591 + struct cache *cache; 592 + 593 + index = kobj_to_cache_index_dir(k); 594 + cache = index->cache; 595 + 596 + return sprintf(buf, "%d\n", cache->level); 597 + } 598 + 599 + static struct kobj_attribute cache_level_attr = 600 + __ATTR(level, 0444, level_show, NULL); 601 + 602 + static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 603 + { 604 + struct cache_index_dir *index; 605 + struct cache *cache; 606 + int len; 607 + int n = 0; 608 + 609 + index = kobj_to_cache_index_dir(k); 610 + cache = index->cache; 611 + len = PAGE_SIZE - 2; 612 + 613 + if (len > 1) { 614 + n = cpumask_scnprintf(buf, len, &cache->shared_cpu_map); 615 + buf[n++] = '\n'; 616 + buf[n] = '\0'; 617 + } 618 + return n; 619 + } 620 + 621 + static struct kobj_attribute cache_shared_cpu_map_attr = 622 + __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL); 623 + 624 + /* Attributes which should always be created -- the kobject/sysfs core 625 + * does this automatically via kobj_type->default_attrs. This is the 626 + * minimum data required to uniquely identify a cache. 627 + */ 628 + static struct attribute *cache_index_default_attrs[] = { 629 + &cache_type_attr.attr, 630 + &cache_level_attr.attr, 631 + &cache_shared_cpu_map_attr.attr, 632 + NULL, 633 + }; 634 + 635 + /* Attributes which should be created if the cache device node has the 636 + * right properties -- see cacheinfo_create_index_opt_attrs 637 + */ 638 + static struct kobj_attribute *cache_index_opt_attrs[] = { 639 + &cache_size_attr, 640 + &cache_line_size_attr, 641 + &cache_nr_sets_attr, 642 + &cache_assoc_attr, 643 + }; 644 + 645 + static struct sysfs_ops cache_index_ops = { 646 + .show = cache_index_show, 647 + }; 648 + 649 + static struct kobj_type cache_index_type = { 650 + .release = cache_index_release, 651 + .sysfs_ops = &cache_index_ops, 652 + .default_attrs = cache_index_default_attrs, 653 + }; 654 + 655 + static void __cpuinit cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir) 656 + { 657 + const char *cache_name; 658 + const char *cache_type; 659 + struct cache *cache; 660 + char *buf; 661 + int i; 662 + 663 + buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 664 + if (!buf) 665 + return; 666 + 667 + cache = dir->cache; 668 + cache_name = cache->ofnode->full_name; 669 + cache_type = cache_type_string(cache); 670 + 671 + /* We don't want to create an attribute that can't provide a 672 + * meaningful value. Check the return value of each optional 673 + * attribute's ->show method before registering the 674 + * attribute. 675 + */ 676 + for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) { 677 + struct kobj_attribute *attr; 678 + ssize_t rc; 679 + 680 + attr = cache_index_opt_attrs[i]; 681 + 682 + rc = attr->show(&dir->kobj, attr, buf); 683 + if (rc <= 0) { 684 + pr_debug("not creating %s attribute for " 685 + "%s(%s) (rc = %zd)\n", 686 + attr->attr.name, cache_name, 687 + cache_type, rc); 688 + continue; 689 + } 690 + if (sysfs_create_file(&dir->kobj, &attr->attr)) 691 + pr_debug("could not create %s attribute for %s(%s)\n", 692 + attr->attr.name, cache_name, cache_type); 693 + } 694 + 695 + kfree(buf); 696 + } 697 + 698 + static void __cpuinit cacheinfo_create_index_dir(struct cache *cache, int index, struct cache_dir *cache_dir) 699 + { 700 + struct cache_index_dir *index_dir; 701 + int rc; 702 + 703 + index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL); 704 + if (!index_dir) 705 + goto err; 706 + 707 + index_dir->cache = cache; 708 + 709 + rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type, 710 + cache_dir->kobj, "index%d", index); 711 + if (rc) 712 + goto err; 713 + 714 + index_dir->next = cache_dir->index; 715 + cache_dir->index = index_dir; 716 + 717 + cacheinfo_create_index_opt_attrs(index_dir); 718 + 719 + return; 720 + err: 721 + kfree(index_dir); 722 + } 723 + 724 + static void __cpuinit cacheinfo_sysfs_populate(unsigned int cpu_id, struct cache *cache_list) 725 + { 726 + struct cache_dir *cache_dir; 727 + struct cache *cache; 728 + int index = 0; 729 + 730 + cache_dir = cacheinfo_create_cache_dir(cpu_id); 731 + if (!cache_dir) 732 + return; 733 + 734 + cache = cache_list; 735 + while (cache) { 736 + cacheinfo_create_index_dir(cache, index, cache_dir); 737 + index++; 738 + cache = cache->next_local; 739 + } 740 + } 741 + 742 + void __cpuinit cacheinfo_cpu_online(unsigned int cpu_id) 743 + { 744 + struct cache *cache; 745 + 746 + cache = cache_chain_instantiate(cpu_id); 747 + if (!cache) 748 + return; 749 + 750 + cacheinfo_sysfs_populate(cpu_id, cache); 751 + } 752 + 753 + #ifdef CONFIG_HOTPLUG_CPU /* functions needed for cpu offline */ 754 + 755 + static struct cache *cache_lookup_by_cpu(unsigned int cpu_id) 756 + { 757 + struct device_node *cpu_node; 758 + struct cache *cache; 759 + 760 + cpu_node = of_get_cpu_node(cpu_id, NULL); 761 + WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id); 762 + if (!cpu_node) 763 + return NULL; 764 + 765 + cache = cache_lookup_by_node(cpu_node); 766 + of_node_put(cpu_node); 767 + 768 + return cache; 769 + } 770 + 771 + static void remove_index_dirs(struct cache_dir *cache_dir) 772 + { 773 + struct cache_index_dir *index; 774 + 775 + index = cache_dir->index; 776 + 777 + while (index) { 778 + struct cache_index_dir *next; 779 + 780 + next = index->next; 781 + kobject_put(&index->kobj); 782 + index = next; 783 + } 784 + } 785 + 786 + static void remove_cache_dir(struct cache_dir *cache_dir) 787 + { 788 + remove_index_dirs(cache_dir); 789 + 790 + kobject_put(cache_dir->kobj); 791 + 792 + kfree(cache_dir); 793 + } 794 + 795 + static void cache_cpu_clear(struct cache *cache, int cpu) 796 + { 797 + while (cache) { 798 + struct cache *next = cache->next_local; 799 + 800 + WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map), 801 + "CPU %i not accounted in %s(%s)\n", 802 + cpu, cache->ofnode->full_name, 803 + cache_type_string(cache)); 804 + 805 + cpumask_clear_cpu(cpu, &cache->shared_cpu_map); 806 + 807 + /* Release the cache object if all the cpus using it 808 + * are offline */ 809 + if (cpumask_empty(&cache->shared_cpu_map)) 810 + release_cache(cache); 811 + 812 + cache = next; 813 + } 814 + } 815 + 816 + void cacheinfo_cpu_offline(unsigned int cpu_id) 817 + { 818 + struct cache_dir *cache_dir; 819 + struct cache *cache; 820 + 821 + /* Prevent userspace from seeing inconsistent state - remove 822 + * the sysfs hierarchy first */ 823 + cache_dir = per_cpu(cache_dir, cpu_id); 824 + 825 + /* careful, sysfs population may have failed */ 826 + if (cache_dir) 827 + remove_cache_dir(cache_dir); 828 + 829 + per_cpu(cache_dir, cpu_id) = NULL; 830 + 831 + /* clear the CPU's bit in its cache chain, possibly freeing 832 + * cache objects */ 833 + cache = cache_lookup_by_cpu(cpu_id); 834 + if (cache) 835 + cache_cpu_clear(cache, cpu_id); 836 + } 837 + #endif /* CONFIG_HOTPLUG_CPU */
+8
arch/powerpc/kernel/cacheinfo.h
··· 1 + #ifndef _PPC_CACHEINFO_H 2 + #define _PPC_CACHEINFO_H 3 + 4 + /* These are just hooks for sysfs.c to use. */ 5 + extern void cacheinfo_cpu_online(unsigned int cpu_id); 6 + extern void cacheinfo_cpu_offline(unsigned int cpu_id); 7 + 8 + #endif /* _PPC_CACHEINFO_H */
+70 -1
arch/powerpc/kernel/pci-common.c
··· 16 16 * 2 of the License, or (at your option) any later version. 17 17 */ 18 18 19 - #undef DEBUG 19 + #define DEBUG 20 20 21 21 #include <linux/kernel.h> 22 22 #include <linux/pci.h> ··· 1356 1356 } 1357 1357 } 1358 1358 1359 + static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus) 1360 + { 1361 + struct pci_controller *hose = pci_bus_to_host(bus); 1362 + resource_size_t offset; 1363 + struct resource *res, *pres; 1364 + int i; 1365 + 1366 + pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus)); 1367 + 1368 + /* Check for IO */ 1369 + if (!(hose->io_resource.flags & IORESOURCE_IO)) 1370 + goto no_io; 1371 + offset = (unsigned long)hose->io_base_virt - _IO_BASE; 1372 + res = kzalloc(sizeof(struct resource), GFP_KERNEL); 1373 + BUG_ON(res == NULL); 1374 + res->name = "Legacy IO"; 1375 + res->flags = IORESOURCE_IO; 1376 + res->start = offset; 1377 + res->end = (offset + 0xfff) & 0xfffffffful; 1378 + pr_debug("Candidate legacy IO: %pR\n", res); 1379 + if (request_resource(&hose->io_resource, res)) { 1380 + printk(KERN_DEBUG 1381 + "PCI %04x:%02x Cannot reserve Legacy IO %pR\n", 1382 + pci_domain_nr(bus), bus->number, res); 1383 + kfree(res); 1384 + } 1385 + 1386 + no_io: 1387 + /* Check for memory */ 1388 + offset = hose->pci_mem_offset; 1389 + pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset); 1390 + for (i = 0; i < 3; i++) { 1391 + pres = &hose->mem_resources[i]; 1392 + if (!(pres->flags & IORESOURCE_MEM)) 1393 + continue; 1394 + pr_debug("hose mem res: %pR\n", pres); 1395 + if ((pres->start - offset) <= 0xa0000 && 1396 + (pres->end - offset) >= 0xbffff) 1397 + break; 1398 + } 1399 + if (i >= 3) 1400 + return; 1401 + res = kzalloc(sizeof(struct resource), GFP_KERNEL); 1402 + BUG_ON(res == NULL); 1403 + res->name = "Legacy VGA memory"; 1404 + res->flags = IORESOURCE_MEM; 1405 + res->start = 0xa0000 + offset; 1406 + res->end = 0xbffff + offset; 1407 + pr_debug("Candidate VGA memory: %pR\n", res); 1408 + if (request_resource(pres, res)) { 1409 + printk(KERN_DEBUG 1410 + "PCI %04x:%02x Cannot reserve VGA memory %pR\n", 1411 + pci_domain_nr(bus), bus->number, res); 1412 + kfree(res); 1413 + } 1414 + } 1415 + 1359 1416 void __init pcibios_resource_survey(void) 1360 1417 { 1361 1418 struct pci_bus *b; ··· 1428 1371 pcibios_allocate_resources(1); 1429 1372 } 1430 1373 1374 + /* Before we start assigning unassigned resource, we try to reserve 1375 + * the low IO area and the VGA memory area if they intersect the 1376 + * bus available resources to avoid allocating things on top of them 1377 + */ 1378 + if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) { 1379 + list_for_each_entry(b, &pci_root_buses, node) 1380 + pcibios_reserve_legacy_regions(b); 1381 + } 1382 + 1383 + /* Now, if the platform didn't decide to blindly trust the firmware, 1384 + * we proceed to assigning things that were left unassigned 1385 + */ 1431 1386 if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) { 1432 1387 pr_debug("PCI: Assigning unassigned resouces...\n"); 1433 1388 pci_assign_unassigned_resources();
+7 -2
arch/powerpc/kernel/pci_64.c
··· 560 560 * G5 machines... So when something asks for bus 0 io base 561 561 * (bus 0 is HT root), we return the AGP one instead. 562 562 */ 563 - if (machine_is_compatible("MacRISC4")) 564 - if (in_bus == 0) 563 + if (in_bus == 0 && machine_is_compatible("MacRISC4")) { 564 + struct device_node *agp; 565 + 566 + agp = of_find_compatible_node(NULL, NULL, "u3-agp"); 567 + if (agp) 565 568 in_bus = 0xf0; 569 + of_node_put(agp); 570 + } 566 571 567 572 /* That syscall isn't quite compatible with PCI domains, but it's 568 573 * used on pre-domains setup. We return the first match
+1
arch/powerpc/kernel/ppc_ksyms.c
··· 165 165 EXPORT_SYMBOL(irq_desc); 166 166 EXPORT_SYMBOL(tb_ticks_per_jiffy); 167 167 EXPORT_SYMBOL(cacheable_memcpy); 168 + EXPORT_SYMBOL(cacheable_memzero); 168 169 #endif 169 170 170 171 #ifdef CONFIG_PPC32
+7 -7
arch/powerpc/kernel/prom.c
··· 824 824 #endif 825 825 826 826 #ifdef CONFIG_KEXEC 827 - lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL); 827 + lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL); 828 828 if (lprop) 829 829 crashk_res.start = *lprop; 830 830 831 - lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL); 831 + lprop = of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL); 832 832 if (lprop) 833 833 crashk_res.end = crashk_res.start + *lprop - 1; 834 834 #endif ··· 893 893 u64 base, size, lmb_size; 894 894 unsigned int is_kexec_kdump = 0, rngs; 895 895 896 - ls = (cell_t *)of_get_flat_dt_prop(node, "ibm,lmb-size", &l); 896 + ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l); 897 897 if (ls == NULL || l < dt_root_size_cells * sizeof(cell_t)) 898 898 return 0; 899 899 lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls); 900 900 901 - dm = (cell_t *)of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l); 901 + dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l); 902 902 if (dm == NULL || l < sizeof(cell_t)) 903 903 return 0; 904 904 ··· 907 907 return 0; 908 908 909 909 /* check if this is a kexec/kdump kernel. */ 910 - usm = (cell_t *)of_get_flat_dt_prop(node, "linux,drconf-usable-memory", 910 + usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory", 911 911 &l); 912 912 if (usm != NULL) 913 913 is_kexec_kdump = 1; ··· 981 981 } else if (strcmp(type, "memory") != 0) 982 982 return 0; 983 983 984 - reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l); 984 + reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l); 985 985 if (reg == NULL) 986 - reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l); 986 + reg = of_get_flat_dt_prop(node, "reg", &l); 987 987 if (reg == NULL) 988 988 return 0; 989 989
+1 -1
arch/powerpc/kernel/prom_init.c
··· 1210 1210 /* Initialize the table to have a one-to-one mapping 1211 1211 * over the allocated size. 1212 1212 */ 1213 - tce_entryp = (unsigned long *)base; 1213 + tce_entryp = (u64 *)base; 1214 1214 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) { 1215 1215 tce_entry = (i << PAGE_SHIFT); 1216 1216 tce_entry |= 0x3;
+4 -296
arch/powerpc/kernel/sysfs.c
··· 18 18 #include <asm/machdep.h> 19 19 #include <asm/smp.h> 20 20 21 + #include "cacheinfo.h" 22 + 21 23 #ifdef CONFIG_PPC64 22 24 #include <asm/paca.h> 23 25 #include <asm/lppaca.h> 24 26 #endif 25 27 26 28 static DEFINE_PER_CPU(struct cpu, cpu_devices); 27 - 28 - static DEFINE_PER_CPU(struct kobject *, cache_toplevel); 29 29 30 30 /* 31 31 * SMT snooze delay stuff, 64-bit only for now ··· 343 343 #endif /* HAS_PPC_PMC_PA6T */ 344 344 #endif /* HAS_PPC_PMC_CLASSIC */ 345 345 346 - struct cache_desc { 347 - struct kobject kobj; 348 - struct cache_desc *next; 349 - const char *type; /* Instruction, Data, or Unified */ 350 - u32 size; /* total cache size in KB */ 351 - u32 line_size; /* in bytes */ 352 - u32 nr_sets; /* number of sets */ 353 - u32 level; /* e.g. 1, 2, 3... */ 354 - u32 associativity; /* e.g. 8-way... 0 is fully associative */ 355 - }; 356 - 357 - DEFINE_PER_CPU(struct cache_desc *, cache_desc); 358 - 359 - static struct cache_desc *kobj_to_cache_desc(struct kobject *k) 360 - { 361 - return container_of(k, struct cache_desc, kobj); 362 - } 363 - 364 - static void cache_desc_release(struct kobject *k) 365 - { 366 - struct cache_desc *desc = kobj_to_cache_desc(k); 367 - 368 - pr_debug("%s: releasing %s\n", __func__, kobject_name(k)); 369 - 370 - if (desc->next) 371 - kobject_put(&desc->next->kobj); 372 - 373 - kfree(kobj_to_cache_desc(k)); 374 - } 375 - 376 - static ssize_t cache_desc_show(struct kobject *k, struct attribute *attr, char *buf) 377 - { 378 - struct kobj_attribute *kobj_attr; 379 - 380 - kobj_attr = container_of(attr, struct kobj_attribute, attr); 381 - 382 - return kobj_attr->show(k, kobj_attr, buf); 383 - } 384 - 385 - static struct sysfs_ops cache_desc_sysfs_ops = { 386 - .show = cache_desc_show, 387 - }; 388 - 389 - static struct kobj_type cache_desc_type = { 390 - .release = cache_desc_release, 391 - .sysfs_ops = &cache_desc_sysfs_ops, 392 - }; 393 - 394 - static ssize_t cache_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 395 - { 396 - struct cache_desc *cache = kobj_to_cache_desc(k); 397 - 398 - return sprintf(buf, "%uK\n", cache->size); 399 - } 400 - 401 - static struct kobj_attribute cache_size_attr = 402 - __ATTR(size, 0444, cache_size_show, NULL); 403 - 404 - static ssize_t cache_line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 405 - { 406 - struct cache_desc *cache = kobj_to_cache_desc(k); 407 - 408 - return sprintf(buf, "%u\n", cache->line_size); 409 - } 410 - 411 - static struct kobj_attribute cache_line_size_attr = 412 - __ATTR(coherency_line_size, 0444, cache_line_size_show, NULL); 413 - 414 - static ssize_t cache_nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 415 - { 416 - struct cache_desc *cache = kobj_to_cache_desc(k); 417 - 418 - return sprintf(buf, "%u\n", cache->nr_sets); 419 - } 420 - 421 - static struct kobj_attribute cache_nr_sets_attr = 422 - __ATTR(number_of_sets, 0444, cache_nr_sets_show, NULL); 423 - 424 - static ssize_t cache_type_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 425 - { 426 - struct cache_desc *cache = kobj_to_cache_desc(k); 427 - 428 - return sprintf(buf, "%s\n", cache->type); 429 - } 430 - 431 - static struct kobj_attribute cache_type_attr = 432 - __ATTR(type, 0444, cache_type_show, NULL); 433 - 434 - static ssize_t cache_level_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 435 - { 436 - struct cache_desc *cache = kobj_to_cache_desc(k); 437 - 438 - return sprintf(buf, "%u\n", cache->level); 439 - } 440 - 441 - static struct kobj_attribute cache_level_attr = 442 - __ATTR(level, 0444, cache_level_show, NULL); 443 - 444 - static ssize_t cache_assoc_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 445 - { 446 - struct cache_desc *cache = kobj_to_cache_desc(k); 447 - 448 - return sprintf(buf, "%u\n", cache->associativity); 449 - } 450 - 451 - static struct kobj_attribute cache_assoc_attr = 452 - __ATTR(ways_of_associativity, 0444, cache_assoc_show, NULL); 453 - 454 - struct cache_desc_info { 455 - const char *type; 456 - const char *size_prop; 457 - const char *line_size_prop; 458 - const char *nr_sets_prop; 459 - }; 460 - 461 - /* PowerPC Processor binding says the [di]-cache-* must be equal on 462 - * unified caches, so just use d-cache properties. */ 463 - static struct cache_desc_info ucache_info = { 464 - .type = "Unified", 465 - .size_prop = "d-cache-size", 466 - .line_size_prop = "d-cache-line-size", 467 - .nr_sets_prop = "d-cache-sets", 468 - }; 469 - 470 - static struct cache_desc_info dcache_info = { 471 - .type = "Data", 472 - .size_prop = "d-cache-size", 473 - .line_size_prop = "d-cache-line-size", 474 - .nr_sets_prop = "d-cache-sets", 475 - }; 476 - 477 - static struct cache_desc_info icache_info = { 478 - .type = "Instruction", 479 - .size_prop = "i-cache-size", 480 - .line_size_prop = "i-cache-line-size", 481 - .nr_sets_prop = "i-cache-sets", 482 - }; 483 - 484 - static struct cache_desc * __cpuinit create_cache_desc(struct device_node *np, struct kobject *parent, int index, int level, struct cache_desc_info *info) 485 - { 486 - const u32 *cache_line_size; 487 - struct cache_desc *new; 488 - const u32 *cache_size; 489 - const u32 *nr_sets; 490 - int rc; 491 - 492 - new = kzalloc(sizeof(*new), GFP_KERNEL); 493 - if (!new) 494 - return NULL; 495 - 496 - rc = kobject_init_and_add(&new->kobj, &cache_desc_type, parent, 497 - "index%d", index); 498 - if (rc) 499 - goto err; 500 - 501 - /* type */ 502 - new->type = info->type; 503 - rc = sysfs_create_file(&new->kobj, &cache_type_attr.attr); 504 - WARN_ON(rc); 505 - 506 - /* level */ 507 - new->level = level; 508 - rc = sysfs_create_file(&new->kobj, &cache_level_attr.attr); 509 - WARN_ON(rc); 510 - 511 - /* size */ 512 - cache_size = of_get_property(np, info->size_prop, NULL); 513 - if (cache_size) { 514 - new->size = *cache_size / 1024; 515 - rc = sysfs_create_file(&new->kobj, 516 - &cache_size_attr.attr); 517 - WARN_ON(rc); 518 - } 519 - 520 - /* coherency_line_size */ 521 - cache_line_size = of_get_property(np, info->line_size_prop, NULL); 522 - if (cache_line_size) { 523 - new->line_size = *cache_line_size; 524 - rc = sysfs_create_file(&new->kobj, 525 - &cache_line_size_attr.attr); 526 - WARN_ON(rc); 527 - } 528 - 529 - /* number_of_sets */ 530 - nr_sets = of_get_property(np, info->nr_sets_prop, NULL); 531 - if (nr_sets) { 532 - new->nr_sets = *nr_sets; 533 - rc = sysfs_create_file(&new->kobj, 534 - &cache_nr_sets_attr.attr); 535 - WARN_ON(rc); 536 - } 537 - 538 - /* ways_of_associativity */ 539 - if (new->nr_sets == 1) { 540 - /* fully associative */ 541 - new->associativity = 0; 542 - goto create_assoc; 543 - } 544 - 545 - if (new->nr_sets && new->size && new->line_size) { 546 - /* If we have values for all of these we can derive 547 - * the associativity. */ 548 - new->associativity = 549 - ((new->size * 1024) / new->nr_sets) / new->line_size; 550 - create_assoc: 551 - rc = sysfs_create_file(&new->kobj, 552 - &cache_assoc_attr.attr); 553 - WARN_ON(rc); 554 - } 555 - 556 - return new; 557 - err: 558 - kfree(new); 559 - return NULL; 560 - } 561 - 562 - static bool cache_is_unified(struct device_node *np) 563 - { 564 - return of_get_property(np, "cache-unified", NULL); 565 - } 566 - 567 - static struct cache_desc * __cpuinit create_cache_index_info(struct device_node *np, struct kobject *parent, int index, int level) 568 - { 569 - struct device_node *next_cache; 570 - struct cache_desc *new, **end; 571 - 572 - pr_debug("%s(node = %s, index = %d)\n", __func__, np->full_name, index); 573 - 574 - if (cache_is_unified(np)) { 575 - new = create_cache_desc(np, parent, index, level, 576 - &ucache_info); 577 - } else { 578 - new = create_cache_desc(np, parent, index, level, 579 - &dcache_info); 580 - if (new) { 581 - index++; 582 - new->next = create_cache_desc(np, parent, index, level, 583 - &icache_info); 584 - } 585 - } 586 - if (!new) 587 - return NULL; 588 - 589 - end = &new->next; 590 - while (*end) 591 - end = &(*end)->next; 592 - 593 - next_cache = of_find_next_cache_node(np); 594 - if (!next_cache) 595 - goto out; 596 - 597 - *end = create_cache_index_info(next_cache, parent, ++index, ++level); 598 - 599 - of_node_put(next_cache); 600 - out: 601 - return new; 602 - } 603 - 604 - static void __cpuinit create_cache_info(struct sys_device *sysdev) 605 - { 606 - struct kobject *cache_toplevel; 607 - struct device_node *np = NULL; 608 - int cpu = sysdev->id; 609 - 610 - cache_toplevel = kobject_create_and_add("cache", &sysdev->kobj); 611 - if (!cache_toplevel) 612 - return; 613 - per_cpu(cache_toplevel, cpu) = cache_toplevel; 614 - np = of_get_cpu_node(cpu, NULL); 615 - if (np != NULL) { 616 - per_cpu(cache_desc, cpu) = 617 - create_cache_index_info(np, cache_toplevel, 0, 1); 618 - of_node_put(np); 619 - } 620 - return; 621 - } 622 - 623 346 static void __cpuinit register_cpu_online(unsigned int cpu) 624 347 { 625 348 struct cpu *c = &per_cpu(cpu_devices, cpu); ··· 407 684 sysdev_create_file(s, &attr_dscr); 408 685 #endif /* CONFIG_PPC64 */ 409 686 410 - create_cache_info(s); 687 + cacheinfo_cpu_online(cpu); 411 688 } 412 689 413 690 #ifdef CONFIG_HOTPLUG_CPU 414 - static void remove_cache_info(struct sys_device *sysdev) 415 - { 416 - struct kobject *cache_toplevel; 417 - struct cache_desc *cache_desc; 418 - int cpu = sysdev->id; 419 - 420 - cache_desc = per_cpu(cache_desc, cpu); 421 - if (cache_desc != NULL) 422 - kobject_put(&cache_desc->kobj); 423 - 424 - cache_toplevel = per_cpu(cache_toplevel, cpu); 425 - if (cache_toplevel != NULL) 426 - kobject_put(cache_toplevel); 427 - } 428 - 429 691 static void unregister_cpu_online(unsigned int cpu) 430 692 { 431 693 struct cpu *c = &per_cpu(cpu_devices, cpu); ··· 477 769 sysdev_remove_file(s, &attr_dscr); 478 770 #endif /* CONFIG_PPC64 */ 479 771 480 - remove_cache_info(s); 772 + cacheinfo_cpu_offline(cpu); 481 773 } 482 774 #endif /* CONFIG_HOTPLUG_CPU */ 483 775
+3 -3
arch/powerpc/mm/mmu_decl.h
··· 30 30 #if defined(CONFIG_40x) || defined(CONFIG_8xx) 31 31 static inline void _tlbil_all(void) 32 32 { 33 - asm volatile ("sync; tlbia; isync" : : : "memory") 33 + asm volatile ("sync; tlbia; isync" : : : "memory"); 34 34 } 35 35 static inline void _tlbil_pid(unsigned int pid) 36 36 { 37 - asm volatile ("sync; tlbia; isync" : : : "memory") 37 + asm volatile ("sync; tlbia; isync" : : : "memory"); 38 38 } 39 39 #else /* CONFIG_40x || CONFIG_8xx */ 40 40 extern void _tlbil_all(void); ··· 47 47 #ifdef CONFIG_8xx 48 48 static inline void _tlbil_va(unsigned long address, unsigned int pid) 49 49 { 50 - asm volatile ("tlbie %0; sync" : : "r" (address) : "memory") 50 + asm volatile ("tlbie %0; sync" : : "r" (address) : "memory"); 51 51 } 52 52 #else /* CONFIG_8xx */ 53 53 extern void _tlbil_va(unsigned long address, unsigned int pid);
+34 -28
arch/powerpc/mm/numa.c
··· 822 822 * required. nid is the preferred node and end is the physical address of 823 823 * the highest address in the node. 824 824 * 825 - * Returns the physical address of the memory. 825 + * Returns the virtual address of the memory. 826 826 */ 827 - static void __init *careful_allocation(int nid, unsigned long size, 827 + static void __init *careful_zallocation(int nid, unsigned long size, 828 828 unsigned long align, 829 829 unsigned long end_pfn) 830 830 { 831 + void *ret; 831 832 int new_nid; 832 - unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT); 833 + unsigned long ret_paddr; 834 + 835 + ret_paddr = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT); 833 836 834 837 /* retry over all memory */ 835 - if (!ret) 836 - ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM()); 838 + if (!ret_paddr) 839 + ret_paddr = __lmb_alloc_base(size, align, lmb_end_of_DRAM()); 837 840 838 - if (!ret) 839 - panic("numa.c: cannot allocate %lu bytes on node %d", 841 + if (!ret_paddr) 842 + panic("numa.c: cannot allocate %lu bytes for node %d", 840 843 size, nid); 841 844 845 + ret = __va(ret_paddr); 846 + 842 847 /* 843 - * If the memory came from a previously allocated node, we must 844 - * retry with the bootmem allocator. 848 + * We initialize the nodes in numeric order: 0, 1, 2... 849 + * and hand over control from the LMB allocator to the 850 + * bootmem allocator. If this function is called for 851 + * node 5, then we know that all nodes <5 are using the 852 + * bootmem allocator instead of the LMB allocator. 853 + * 854 + * So, check the nid from which this allocation came 855 + * and double check to see if we need to use bootmem 856 + * instead of the LMB. We don't free the LMB memory 857 + * since it would be useless. 845 858 */ 846 - new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT); 859 + new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT); 847 860 if (new_nid < nid) { 848 - ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid), 861 + ret = __alloc_bootmem_node(NODE_DATA(new_nid), 849 862 size, align, 0); 850 863 851 - if (!ret) 852 - panic("numa.c: cannot allocate %lu bytes on node %d", 853 - size, new_nid); 854 - 855 - ret = __pa(ret); 856 - 857 - dbg("alloc_bootmem %lx %lx\n", ret, size); 864 + dbg("alloc_bootmem %p %lx\n", ret, size); 858 865 } 859 866 860 - return (void *)ret; 867 + memset(ret, 0, size); 868 + return ret; 861 869 } 862 870 863 871 static struct notifier_block __cpuinitdata ppc64_numa_nb = { ··· 960 952 961 953 for_each_online_node(nid) { 962 954 unsigned long start_pfn, end_pfn; 963 - unsigned long bootmem_paddr; 955 + void *bootmem_vaddr; 964 956 unsigned long bootmap_pages; 965 957 966 958 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); ··· 972 964 * previous nodes' bootmem to be initialized and have 973 965 * all reserved areas marked. 974 966 */ 975 - NODE_DATA(nid) = careful_allocation(nid, 967 + NODE_DATA(nid) = careful_zallocation(nid, 976 968 sizeof(struct pglist_data), 977 969 SMP_CACHE_BYTES, end_pfn); 978 - NODE_DATA(nid) = __va(NODE_DATA(nid)); 979 - memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); 980 970 981 971 dbg("node %d\n", nid); 982 972 dbg("NODE_DATA() = %p\n", NODE_DATA(nid)); ··· 990 984 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT); 991 985 992 986 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 993 - bootmem_paddr = (unsigned long)careful_allocation(nid, 987 + bootmem_vaddr = careful_zallocation(nid, 994 988 bootmap_pages << PAGE_SHIFT, 995 989 PAGE_SIZE, end_pfn); 996 - memset(__va(bootmem_paddr), 0, bootmap_pages << PAGE_SHIFT); 997 990 998 - dbg("bootmap_paddr = %lx\n", bootmem_paddr); 991 + dbg("bootmap_vaddr = %p\n", bootmem_vaddr); 999 992 1000 - init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, 993 + init_bootmem_node(NODE_DATA(nid), 994 + __pa(bootmem_vaddr) >> PAGE_SHIFT, 1001 995 start_pfn, end_pfn); 1002 996 1003 997 free_bootmem_with_active_regions(nid, end_pfn); 1004 998 /* 1005 999 * Be very careful about moving this around. Future 1006 - * calls to careful_allocation() depend on this getting 1000 + * calls to careful_zallocation() depend on this getting 1007 1001 * done correctly. 1008 1002 */ 1009 1003 mark_reserved_regions_for_nid(nid);
+2 -1
arch/powerpc/mm/pgtable_32.c
··· 266 266 /* The PTE should never be already set nor present in the 267 267 * hash table 268 268 */ 269 - BUG_ON(pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)); 269 + BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) && 270 + flags); 270 271 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, 271 272 __pgprot(flags))); 272 273 }
+2 -1
arch/powerpc/mm/tlb_nohash.c
··· 189 189 smp_call_function(do_flush_tlb_mm_ipi, NULL, 1); 190 190 _tlbil_pid(0); 191 191 preempt_enable(); 192 - #endif 192 + #else 193 193 _tlbil_pid(0); 194 + #endif 194 195 } 195 196 EXPORT_SYMBOL(flush_tlb_kernel_range); 196 197
+1 -1
arch/powerpc/oprofile/cell/pr_util.h
··· 79 79 * the vma-to-fileoffset map. 80 80 */ 81 81 struct vma_to_fileoffset_map *create_vma_map(const struct spu *spu, 82 - u64 objectid); 82 + unsigned long objectid); 83 83 unsigned int vma_map_lookup(struct vma_to_fileoffset_map *map, 84 84 unsigned int vma, const struct spu *aSpu, 85 85 int *grd_val);
+1 -1
arch/powerpc/platforms/52xx/mpc52xx_common.c
··· 42 42 * from interrupt context while node mapping (which calls ioremap()) 43 43 * cannot be used at such point. 44 44 */ 45 - static spinlock_t mpc52xx_lock = SPIN_LOCK_UNLOCKED; 45 + static DEFINE_SPINLOCK(mpc52xx_lock); 46 46 static struct mpc52xx_gpt __iomem *mpc52xx_wdt; 47 47 static struct mpc52xx_cdm __iomem *mpc52xx_cdm; 48 48
+1 -1
arch/powerpc/platforms/83xx/mpc831x_rdb.c
··· 42 42 mpc831x_usb_cfg(); 43 43 } 44 44 45 - void __init mpc831x_rdb_init_IRQ(void) 45 + static void __init mpc831x_rdb_init_IRQ(void) 46 46 { 47 47 struct device_node *np; 48 48
+4 -5
arch/powerpc/platforms/83xx/mpc832x_mds.c
··· 49 49 #define DBG(fmt...) 50 50 #endif 51 51 52 - static u8 *bcsr_regs = NULL; 53 - 54 52 /* ************************************************************************ 55 53 * 56 54 * Setup the architecture ··· 57 59 static void __init mpc832x_sys_setup_arch(void) 58 60 { 59 61 struct device_node *np; 62 + u8 __iomem *bcsr_regs = NULL; 60 63 61 64 if (ppc_md.progress) 62 65 ppc_md.progress("mpc832x_sys_setup_arch()", 0); 63 66 64 67 /* Map BCSR area */ 65 68 np = of_find_node_by_name(NULL, "bcsr"); 66 - if (np != 0) { 69 + if (np) { 67 70 struct resource res; 68 71 69 72 of_address_to_resource(np, 0, &res); ··· 92 93 != NULL){ 93 94 /* Reset the Ethernet PHYs */ 94 95 #define BCSR8_FETH_RST 0x50 95 - bcsr_regs[8] &= ~BCSR8_FETH_RST; 96 + clrbits8(&bcsr_regs[8], BCSR8_FETH_RST); 96 97 udelay(1000); 97 - bcsr_regs[8] |= BCSR8_FETH_RST; 98 + setbits8(&bcsr_regs[8], BCSR8_FETH_RST); 98 99 iounmap(bcsr_regs); 99 100 of_node_put(np); 100 101 }
+3 -2
arch/powerpc/platforms/83xx/mpc832x_rdb.c
··· 38 38 #define DBG(fmt...) 39 39 #endif 40 40 41 + #ifdef CONFIG_QUICC_ENGINE 41 42 static void mpc83xx_spi_activate_cs(u8 cs, u8 polarity) 42 43 { 43 44 pr_debug("%s %d %d\n", __func__, cs, polarity); ··· 78 77 mpc83xx_spi_activate_cs, 79 78 mpc83xx_spi_deactivate_cs); 80 79 } 81 - 82 80 machine_device_initcall(mpc832x_rdb, mpc832x_spi_init); 81 + #endif /* CONFIG_QUICC_ENGINE */ 83 82 84 83 /* ************************************************************************ 85 84 * ··· 131 130 } 132 131 machine_device_initcall(mpc832x_rdb, mpc832x_declare_of_platform_devices); 133 132 134 - void __init mpc832x_rdb_init_IRQ(void) 133 + static void __init mpc832x_rdb_init_IRQ(void) 135 134 { 136 135 137 136 struct device_node *np;
+78 -3
arch/powerpc/platforms/83xx/mpc836x_mds.c
··· 18 18 19 19 #include <linux/stddef.h> 20 20 #include <linux/kernel.h> 21 + #include <linux/compiler.h> 21 22 #include <linux/init.h> 22 23 #include <linux/errno.h> 23 24 #include <linux/reboot.h> ··· 44 43 #include <asm/udbg.h> 45 44 #include <sysdev/fsl_soc.h> 46 45 #include <sysdev/fsl_pci.h> 46 + #include <sysdev/simple_gpio.h> 47 47 #include <asm/qe.h> 48 48 #include <asm/qe_ic.h> 49 49 ··· 57 55 #define DBG(fmt...) 58 56 #endif 59 57 60 - static u8 *bcsr_regs = NULL; 61 - 62 58 /* ************************************************************************ 63 59 * 64 60 * Setup the architecture ··· 65 65 static void __init mpc836x_mds_setup_arch(void) 66 66 { 67 67 struct device_node *np; 68 + u8 __iomem *bcsr_regs = NULL; 68 69 69 70 if (ppc_md.progress) 70 71 ppc_md.progress("mpc836x_mds_setup_arch()", 0); 71 72 72 73 /* Map BCSR area */ 73 74 np = of_find_node_by_name(NULL, "bcsr"); 74 - if (np != 0) { 75 + if (np) { 75 76 struct resource res; 76 77 77 78 of_address_to_resource(np, 0, &res); ··· 94 93 95 94 for (np = NULL; (np = of_find_node_by_name(np, "ucc")) != NULL;) 96 95 par_io_of_config(np); 96 + #ifdef CONFIG_QE_USB 97 + /* Must fixup Par IO before QE GPIO chips are registered. */ 98 + par_io_config_pin(1, 2, 1, 0, 3, 0); /* USBOE */ 99 + par_io_config_pin(1, 3, 1, 0, 3, 0); /* USBTP */ 100 + par_io_config_pin(1, 8, 1, 0, 1, 0); /* USBTN */ 101 + par_io_config_pin(1, 10, 2, 0, 3, 0); /* USBRXD */ 102 + par_io_config_pin(1, 9, 2, 1, 3, 0); /* USBRP */ 103 + par_io_config_pin(1, 11, 2, 1, 3, 0); /* USBRN */ 104 + par_io_config_pin(2, 20, 2, 0, 1, 0); /* CLK21 */ 105 + #endif /* CONFIG_QE_USB */ 97 106 } 98 107 99 108 if ((np = of_find_compatible_node(NULL, "network", "ucc_geth")) ··· 161 150 return 0; 162 151 } 163 152 machine_device_initcall(mpc836x_mds, mpc836x_declare_of_platform_devices); 153 + 154 + #ifdef CONFIG_QE_USB 155 + static int __init mpc836x_usb_cfg(void) 156 + { 157 + u8 __iomem *bcsr; 158 + struct device_node *np; 159 + const char *mode; 160 + int ret = 0; 161 + 162 + np = of_find_compatible_node(NULL, NULL, "fsl,mpc8360mds-bcsr"); 163 + if (!np) 164 + return -ENODEV; 165 + 166 + bcsr = of_iomap(np, 0); 167 + of_node_put(np); 168 + if (!bcsr) 169 + return -ENOMEM; 170 + 171 + np = of_find_compatible_node(NULL, NULL, "fsl,mpc8323-qe-usb"); 172 + if (!np) { 173 + ret = -ENODEV; 174 + goto err; 175 + } 176 + 177 + #define BCSR8_TSEC1M_MASK (0x3 << 6) 178 + #define BCSR8_TSEC1M_RGMII (0x0 << 6) 179 + #define BCSR8_TSEC2M_MASK (0x3 << 4) 180 + #define BCSR8_TSEC2M_RGMII (0x0 << 4) 181 + /* 182 + * Default is GMII (2), but we should set it to RGMII (0) if we use 183 + * USB (Eth PHY is in RGMII mode anyway). 184 + */ 185 + clrsetbits_8(&bcsr[8], BCSR8_TSEC1M_MASK | BCSR8_TSEC2M_MASK, 186 + BCSR8_TSEC1M_RGMII | BCSR8_TSEC2M_RGMII); 187 + 188 + #define BCSR13_USBMASK 0x0f 189 + #define BCSR13_nUSBEN 0x08 /* 1 - Disable, 0 - Enable */ 190 + #define BCSR13_USBSPEED 0x04 /* 1 - Full, 0 - Low */ 191 + #define BCSR13_USBMODE 0x02 /* 1 - Host, 0 - Function */ 192 + #define BCSR13_nUSBVCC 0x01 /* 1 - gets VBUS, 0 - supplies VBUS */ 193 + 194 + clrsetbits_8(&bcsr[13], BCSR13_USBMASK, BCSR13_USBSPEED); 195 + 196 + mode = of_get_property(np, "mode", NULL); 197 + if (mode && !strcmp(mode, "peripheral")) { 198 + setbits8(&bcsr[13], BCSR13_nUSBVCC); 199 + qe_usb_clock_set(QE_CLK21, 48000000); 200 + } else { 201 + setbits8(&bcsr[13], BCSR13_USBMODE); 202 + /* 203 + * The BCSR GPIOs are used to control power and 204 + * speed of the USB transceiver. This is needed for 205 + * the USB Host only. 206 + */ 207 + simple_gpiochip_init("fsl,mpc8360mds-bcsr-gpio"); 208 + } 209 + 210 + of_node_put(np); 211 + err: 212 + iounmap(bcsr); 213 + return ret; 214 + } 215 + machine_arch_initcall(mpc836x_mds, mpc836x_usb_cfg); 216 + #endif /* CONFIG_QE_USB */ 164 217 165 218 static void __init mpc836x_mds_init_IRQ(void) 166 219 {
+4 -2
arch/powerpc/platforms/83xx/mpc836x_rdk.c
··· 51 51 for_each_compatible_node(np, "pci", "fsl,mpc8349-pci") 52 52 mpc83xx_add_bridge(np); 53 53 #endif 54 - 54 + #ifdef CONFIG_QUICC_ENGINE 55 55 qe_reset(); 56 + #endif 56 57 } 57 58 58 59 static void __init mpc836x_rdk_init_IRQ(void) ··· 72 71 */ 73 72 ipic_set_default_priority(); 74 73 of_node_put(np); 75 - 74 + #ifdef CONFIG_QUICC_ENGINE 76 75 np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic"); 77 76 if (!np) 78 77 return; 79 78 80 79 qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic); 81 80 of_node_put(np); 81 + #endif 82 82 } 83 83 84 84 /*
-1
arch/powerpc/platforms/83xx/mpc837x_mds.c
··· 26 26 #define BCSR12_USB_SER_MASK 0x8a 27 27 #define BCSR12_USB_SER_PIN 0x80 28 28 #define BCSR12_USB_SER_DEVICE 0x02 29 - extern int mpc837x_usb_cfg(void); 30 29 31 30 static int mpc837xmds_usb_cfg(void) 32 31 {
-2
arch/powerpc/platforms/83xx/mpc837x_rdb.c
··· 21 21 22 22 #include "mpc83xx.h" 23 23 24 - extern int mpc837x_usb_cfg(void); 25 - 26 24 /* ************************************************************************ 27 25 * 28 26 * Setup the architecture
+1
arch/powerpc/platforms/83xx/mpc83xx.h
··· 61 61 62 62 extern void mpc83xx_restart(char *cmd); 63 63 extern long mpc83xx_time_init(void); 64 + extern int mpc837x_usb_cfg(void); 64 65 extern int mpc834x_usb_cfg(void); 65 66 extern int mpc831x_usb_cfg(void); 66 67
+7
arch/powerpc/platforms/85xx/mpc85xx_ds.c
··· 148 148 /* 149 149 * Setup the architecture 150 150 */ 151 + #ifdef CONFIG_SMP 152 + extern void __init mpc85xx_smp_init(void); 153 + #endif 151 154 static void __init mpc85xx_ds_setup_arch(void) 152 155 { 153 156 #ifdef CONFIG_PCI ··· 174 171 } 175 172 176 173 ppc_md.pci_exclude_device = mpc85xx_exclude_device; 174 + #endif 175 + 176 + #ifdef CONFIG_SMP 177 + mpc85xx_smp_init(); 177 178 #endif 178 179 179 180 printk("MPC85xx DS board from Freescale Semiconductor\n");
+1
arch/powerpc/platforms/85xx/smp.c
··· 58 58 59 59 if (cpu_rel_addr == NULL) { 60 60 printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr); 61 + local_irq_restore(flags); 61 62 return; 62 63 } 63 64
+11
arch/powerpc/platforms/Kconfig
··· 312 312 Say Y here if you're going to use hardware that connects to the 313 313 MPC831x/834x/837x/8572/8610 GPIOs. 314 314 315 + config SIMPLE_GPIO 316 + bool "Support for simple, memory-mapped GPIO controllers" 317 + depends on PPC 318 + select GENERIC_GPIO 319 + select ARCH_REQUIRE_GPIOLIB 320 + help 321 + Say Y here to support simple, memory-mapped GPIO controllers. 322 + These are usually BCSRs used to control board's switches, LEDs, 323 + chip-selects, Ethernet/USB PHY's power and various other small 324 + on-board peripherals. 325 + 315 326 endmenu
+1 -1
arch/powerpc/platforms/Kconfig.cputype
··· 231 231 If in doubt, say Y here. 232 232 233 233 config SMP 234 - depends on PPC_STD_MMU 234 + depends on PPC_STD_MMU || FSL_BOOKE 235 235 bool "Symmetric multi-processing support" 236 236 ---help--- 237 237 This enables support for systems with more than one CPU. If you have
+11 -10
arch/powerpc/platforms/cell/beat_htab.c
··· 44 44 45 45 static inline unsigned int beat_read_mask(unsigned hpte_group) 46 46 { 47 - unsigned long hpte_v[5]; 48 47 unsigned long rmask = 0; 48 + u64 hpte_v[5]; 49 49 50 50 beat_read_htab_entries(0, hpte_group + 0, hpte_v); 51 51 if (!(hpte_v[0] & HPTE_V_BOLTED)) ··· 93 93 int psize, int ssize) 94 94 { 95 95 unsigned long lpar_rc; 96 - unsigned long slot; 97 - unsigned long hpte_v, hpte_r; 96 + u64 hpte_v, hpte_r, slot; 98 97 99 98 /* same as iseries */ 100 99 if (vflags & HPTE_V_SECONDARY) ··· 152 153 153 154 static unsigned long beat_lpar_hpte_getword0(unsigned long slot) 154 155 { 155 - unsigned long dword0, dword[5]; 156 + unsigned long dword0; 156 157 unsigned long lpar_rc; 158 + u64 dword[5]; 157 159 158 160 lpar_rc = beat_read_htab_entries(0, slot & ~3UL, dword); 159 161 ··· 170 170 unsigned long size_bytes = 1UL << ppc64_pft_size; 171 171 unsigned long hpte_count = size_bytes >> 4; 172 172 int i; 173 - unsigned long dummy0, dummy1; 173 + u64 dummy0, dummy1; 174 174 175 175 /* TODO: Use bulk call */ 176 176 for (i = 0; i < hpte_count; i++) ··· 189 189 int psize, int ssize, int local) 190 190 { 191 191 unsigned long lpar_rc; 192 - unsigned long dummy0, dummy1, want_v; 192 + u64 dummy0, dummy1; 193 + unsigned long want_v; 193 194 194 195 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 195 196 ··· 256 255 unsigned long ea, 257 256 int psize, int ssize) 258 257 { 259 - unsigned long lpar_rc, slot, vsid, va, dummy0, dummy1; 258 + unsigned long lpar_rc, slot, vsid, va; 259 + u64 dummy0, dummy1; 260 260 261 261 vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M); 262 262 va = (vsid << 28) | (ea & 0x0fffffff); ··· 278 276 { 279 277 unsigned long want_v; 280 278 unsigned long lpar_rc; 281 - unsigned long dummy1, dummy2; 279 + u64 dummy1, dummy2; 282 280 unsigned long flags; 283 281 284 282 DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", ··· 317 315 int psize, int ssize) 318 316 { 319 317 unsigned long lpar_rc; 320 - unsigned long slot; 321 - unsigned long hpte_v, hpte_r; 318 + u64 hpte_v, hpte_r, slot; 322 319 323 320 /* same as iseries */ 324 321 if (vflags & HPTE_V_SECONDARY)
+2 -2
arch/powerpc/platforms/cell/beat_udbg.c
··· 40 40 } 41 41 42 42 /* Buffered chars getc */ 43 - static long inbuflen; 44 - static long inbuf[2]; /* must be 2 longs */ 43 + static u64 inbuflen; 44 + static u64 inbuf[2]; /* must be 2 u64s */ 45 45 46 46 static int udbg_getc_poll_beat(void) 47 47 {
+1 -1
arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
··· 54 54 { 55 55 struct cbe_pmd_regs __iomem *pmd_regs; 56 56 struct cbe_mic_tm_regs __iomem *mic_tm_regs; 57 - u64 flags; 57 + unsigned long flags; 58 58 u64 value; 59 59 #ifdef DEBUG 60 60 long time;
+1 -1
arch/powerpc/platforms/cell/interrupt.c
··· 148 148 149 149 iic = &__get_cpu_var(iic); 150 150 *(unsigned long *) &pending = 151 - in_be64((unsigned long __iomem *) &iic->regs->pending_destr); 151 + in_be64((u64 __iomem *) &iic->regs->pending_destr); 152 152 if (!(pending.flags & CBE_IIC_IRQ_VALID)) 153 153 return NO_IRQ; 154 154 virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending));
+2 -2
arch/powerpc/platforms/cell/io-workarounds.c
··· 130 130 131 131 }; 132 132 133 - static void __iomem *iowa_ioremap(unsigned long addr, unsigned long size, 133 + static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size, 134 134 unsigned long flags) 135 135 { 136 136 struct iowa_bus *bus; 137 137 void __iomem *res = __ioremap(addr, size, flags); 138 138 int busno; 139 139 140 - bus = iowa_pci_find(0, addr); 140 + bus = iowa_pci_find(0, (unsigned long)addr); 141 141 if (bus != NULL) { 142 142 busno = bus - iowa_busses; 143 143 PCI_SET_ADDR_TOKEN(res, busno + 1);
+2 -2
arch/powerpc/platforms/cell/iommu.c
··· 150 150 static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte, 151 151 long n_ptes) 152 152 { 153 - unsigned long __iomem *reg; 154 - unsigned long val; 153 + u64 __iomem *reg; 154 + u64 val; 155 155 long n; 156 156 157 157 reg = iommu->xlate_regs + IOC_IOPT_CacheInvd;
+3 -2
arch/powerpc/platforms/iseries/Kconfig
··· 10 10 config VIODASD 11 11 tristate "iSeries Virtual I/O disk support" 12 12 depends on BLOCK 13 + select VIOPATH 13 14 help 14 15 If you are running on an iSeries system and you want to use 15 16 virtual disks created and managed by OS/400, say Y. 16 17 17 18 config VIOCD 18 19 tristate "iSeries Virtual I/O CD support" 20 + select VIOPATH 19 21 help 20 22 If you are running Linux on an IBM iSeries system and you want to 21 23 read a CD drive owned by OS/400, say Y here. 22 24 23 25 config VIOTAPE 24 26 tristate "iSeries Virtual Tape Support" 27 + select VIOPATH 25 28 help 26 29 If you are running Linux on an iSeries system and you want Linux 27 30 to read and/or write a tape drive owned by OS/400, say Y here. ··· 33 30 34 31 config VIOPATH 35 32 bool 36 - depends on VIODASD || VIOCD || VIOTAPE || ISERIES_VETH 37 - default y
+11
arch/powerpc/platforms/iseries/setup.c
··· 23 23 #include <linux/string.h> 24 24 #include <linux/seq_file.h> 25 25 #include <linux/kdev_t.h> 26 + #include <linux/kexec.h> 26 27 #include <linux/major.h> 27 28 #include <linux/root_dev.h> 28 29 #include <linux/kernel.h> ··· 639 638 return 1; 640 639 } 641 640 641 + #ifdef CONFIG_KEXEC 642 + static int iseries_kexec_prepare(struct kimage *image) 643 + { 644 + return -ENOSYS; 645 + } 646 + #endif 647 + 642 648 define_machine(iseries) { 643 649 .name = "iSeries", 644 650 .setup_arch = iSeries_setup_arch, ··· 666 658 .probe = iseries_probe, 667 659 .ioremap = iseries_ioremap, 668 660 .iounmap = iseries_iounmap, 661 + #ifdef CONFIG_KEXEC 662 + .machine_kexec_prepare = iseries_kexec_prepare, 663 + #endif 669 664 /* XXX Implement enable_pmcs for iSeries */ 670 665 }; 671 666
+1 -1
arch/powerpc/platforms/pasemi/cpufreq.c
··· 112 112 113 113 static void set_astate(int cpu, unsigned int astate) 114 114 { 115 - u64 flags; 115 + unsigned long flags; 116 116 117 117 /* Return if called before init has run */ 118 118 if (unlikely(!sdcasr_mapbase))
+1 -1
arch/powerpc/platforms/pasemi/dma_lib.c
··· 509 509 */ 510 510 int pasemi_dma_init(void) 511 511 { 512 - static spinlock_t init_lock = SPIN_LOCK_UNLOCKED; 512 + static DEFINE_SPINLOCK(init_lock); 513 513 struct pci_dev *iob_pdev; 514 514 struct pci_dev *pdev; 515 515 struct resource res;
+2
arch/powerpc/platforms/powermac/pci.c
··· 661 661 pci_find_hose_for_OF_device(np); 662 662 if (!hose) { 663 663 printk(KERN_ERR "Can't find PCI hose for OHare2 !\n"); 664 + of_node_put(np); 664 665 return; 665 666 } 666 667 early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd); ··· 670 669 early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd); 671 670 } 672 671 has_second_ohare = 1; 672 + of_node_put(np); 673 673 } 674 674 675 675 /*
+7 -4
arch/powerpc/platforms/powermac/time.c
··· 265 265 struct resource rsrc; 266 266 267 267 vias = of_find_node_by_name(NULL, "via-cuda"); 268 - if (vias == 0) 268 + if (vias == NULL) 269 269 vias = of_find_node_by_name(NULL, "via-pmu"); 270 - if (vias == 0) 270 + if (vias == NULL) 271 271 vias = of_find_node_by_name(NULL, "via"); 272 - if (vias == 0 || of_address_to_resource(vias, 0, &rsrc)) 272 + if (vias == NULL || of_address_to_resource(vias, 0, &rsrc)) { 273 + of_node_put(vias); 273 274 return 0; 275 + } 276 + of_node_put(vias); 274 277 via = ioremap(rsrc.start, rsrc.end - rsrc.start + 1); 275 278 if (via == NULL) { 276 279 printk(KERN_ERR "Failed to map VIA for timer calibration !\n"); ··· 300 297 ppc_tb_freq = (dstart - dend) * 100 / 6; 301 298 302 299 iounmap(via); 303 - 300 + 304 301 return 1; 305 302 } 306 303 #endif
+37
arch/powerpc/platforms/ps3/device-init.c
··· 518 518 return result; 519 519 } 520 520 521 + static int __init ps3_register_ramdisk_device(void) 522 + { 523 + int result; 524 + struct layout { 525 + struct ps3_system_bus_device dev; 526 + } *p; 527 + 528 + pr_debug(" -> %s:%d\n", __func__, __LINE__); 529 + 530 + p = kzalloc(sizeof(struct layout), GFP_KERNEL); 531 + 532 + if (!p) 533 + return -ENOMEM; 534 + 535 + p->dev.match_id = PS3_MATCH_ID_GPU; 536 + p->dev.match_sub_id = PS3_MATCH_SUB_ID_GPU_RAMDISK; 537 + p->dev.dev_type = PS3_DEVICE_TYPE_IOC0; 538 + 539 + result = ps3_system_bus_device_register(&p->dev); 540 + 541 + if (result) { 542 + pr_debug("%s:%d ps3_system_bus_device_register failed\n", 543 + __func__, __LINE__); 544 + goto fail_device_register; 545 + } 546 + 547 + pr_debug(" <- %s:%d\n", __func__, __LINE__); 548 + return 0; 549 + 550 + fail_device_register: 551 + kfree(p); 552 + pr_debug(" <- %s:%d failed\n", __func__, __LINE__); 553 + return result; 554 + } 555 + 521 556 /** 522 557 * ps3_setup_dynamic_device - Setup a dynamic device from the repository 523 558 */ ··· 980 945 ps3_register_sound_devices(); 981 946 982 947 ps3_register_lpm_devices(); 948 + 949 + ps3_register_ramdisk_device(); 983 950 984 951 pr_debug(" <- %s:%d\n", __func__, __LINE__); 985 952 return 0;
+1
arch/powerpc/sysdev/Makefile
··· 17 17 obj-$(CONFIG_FSL_LBC) += fsl_lbc.o 18 18 obj-$(CONFIG_FSL_GTM) += fsl_gtm.o 19 19 obj-$(CONFIG_MPC8xxx_GPIO) += mpc8xxx_gpio.o 20 + obj-$(CONFIG_SIMPLE_GPIO) += simple_gpio.o 20 21 obj-$(CONFIG_RAPIDIO) += fsl_rio.o 21 22 obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o 22 23 obj-$(CONFIG_QUICC_ENGINE) += qe_lib/
+4 -3
arch/powerpc/sysdev/fsl_pci.c
··· 29 29 30 30 #if defined(CONFIG_PPC_85xx) || defined(CONFIG_PPC_86xx) 31 31 /* atmu setup for fsl pci/pcie controller */ 32 - void __init setup_pci_atmu(struct pci_controller *hose, struct resource *rsrc) 32 + static void __init setup_pci_atmu(struct pci_controller *hose, 33 + struct resource *rsrc) 33 34 { 34 35 struct ccsr_pci __iomem *pci; 35 36 int i; ··· 87 86 out_be32(&pci->piw[2].piwar, PIWAR_2G); 88 87 } 89 88 90 - void __init setup_pci_cmd(struct pci_controller *hose) 89 + static void __init setup_pci_cmd(struct pci_controller *hose) 91 90 { 92 91 u16 cmd; 93 92 int cap_x; ··· 131 130 return ; 132 131 } 133 132 134 - int __init fsl_pcie_check_link(struct pci_controller *hose) 133 + static int __init fsl_pcie_check_link(struct pci_controller *hose) 135 134 { 136 135 u32 val; 137 136 early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val);
+5
arch/powerpc/sysdev/fsl_soc.h
··· 5 5 #include <asm/mmu.h> 6 6 7 7 extern phys_addr_t get_immrbase(void); 8 + #if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx) 8 9 extern u32 get_brgfreq(void); 9 10 extern u32 get_baudrate(void); 11 + #else 12 + static inline u32 get_brgfreq(void) { return -1; } 13 + static inline u32 get_baudrate(void) { return -1; } 14 + #endif 10 15 extern u32 fsl_get_sys_freq(void); 11 16 12 17 struct spi_board_info;
+2 -1
arch/powerpc/sysdev/qe_lib/Kconfig
··· 22 22 23 23 config QE_USB 24 24 bool 25 + default y if USB_GADGET_FSL_QE 25 26 help 26 - QE USB Host Controller support 27 + QE USB Controller support
+195
arch/powerpc/sysdev/qe_lib/gpio.c
··· 14 14 #include <linux/kernel.h> 15 15 #include <linux/init.h> 16 16 #include <linux/spinlock.h> 17 + #include <linux/err.h> 17 18 #include <linux/io.h> 18 19 #include <linux/of.h> 19 20 #include <linux/of_gpio.h> ··· 25 24 struct of_mm_gpio_chip mm_gc; 26 25 spinlock_t lock; 27 26 27 + unsigned long pin_flags[QE_PIO_PINS]; 28 + #define QE_PIN_REQUESTED 0 29 + 28 30 /* shadowed data register to clear/set bits safely */ 29 31 u32 cpdata; 32 + 33 + /* saved_regs used to restore dedicated functions */ 34 + struct qe_pio_regs saved_regs; 30 35 }; 31 36 32 37 static inline struct qe_gpio_chip * ··· 47 40 struct qe_pio_regs __iomem *regs = mm_gc->regs; 48 41 49 42 qe_gc->cpdata = in_be32(&regs->cpdata); 43 + qe_gc->saved_regs.cpdata = qe_gc->cpdata; 44 + qe_gc->saved_regs.cpdir1 = in_be32(&regs->cpdir1); 45 + qe_gc->saved_regs.cpdir2 = in_be32(&regs->cpdir2); 46 + qe_gc->saved_regs.cppar1 = in_be32(&regs->cppar1); 47 + qe_gc->saved_regs.cppar2 = in_be32(&regs->cppar2); 48 + qe_gc->saved_regs.cpodr = in_be32(&regs->cpodr); 50 49 } 51 50 52 51 static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio) ··· 115 102 116 103 return 0; 117 104 } 105 + 106 + struct qe_pin { 107 + /* 108 + * The qe_gpio_chip name is unfortunate, we should change that to 109 + * something like qe_pio_controller. Someday. 110 + */ 111 + struct qe_gpio_chip *controller; 112 + int num; 113 + }; 114 + 115 + /** 116 + * qe_pin_request - Request a QE pin 117 + * @np: device node to get a pin from 118 + * @index: index of a pin in the device tree 119 + * Context: non-atomic 120 + * 121 + * This function return qe_pin so that you could use it with the rest of 122 + * the QE Pin Multiplexing API. 123 + */ 124 + struct qe_pin *qe_pin_request(struct device_node *np, int index) 125 + { 126 + struct qe_pin *qe_pin; 127 + struct device_node *gc; 128 + struct of_gpio_chip *of_gc = NULL; 129 + struct of_mm_gpio_chip *mm_gc; 130 + struct qe_gpio_chip *qe_gc; 131 + int err; 132 + int size; 133 + const void *gpio_spec; 134 + const u32 *gpio_cells; 135 + unsigned long flags; 136 + 137 + qe_pin = kzalloc(sizeof(*qe_pin), GFP_KERNEL); 138 + if (!qe_pin) { 139 + pr_debug("%s: can't allocate memory\n", __func__); 140 + return ERR_PTR(-ENOMEM); 141 + } 142 + 143 + err = of_parse_phandles_with_args(np, "gpios", "#gpio-cells", index, 144 + &gc, &gpio_spec); 145 + if (err) { 146 + pr_debug("%s: can't parse gpios property\n", __func__); 147 + goto err0; 148 + } 149 + 150 + if (!of_device_is_compatible(gc, "fsl,mpc8323-qe-pario-bank")) { 151 + pr_debug("%s: tried to get a non-qe pin\n", __func__); 152 + err = -EINVAL; 153 + goto err1; 154 + } 155 + 156 + of_gc = gc->data; 157 + if (!of_gc) { 158 + pr_debug("%s: gpio controller %s isn't registered\n", 159 + np->full_name, gc->full_name); 160 + err = -ENODEV; 161 + goto err1; 162 + } 163 + 164 + gpio_cells = of_get_property(gc, "#gpio-cells", &size); 165 + if (!gpio_cells || size != sizeof(*gpio_cells) || 166 + *gpio_cells != of_gc->gpio_cells) { 167 + pr_debug("%s: wrong #gpio-cells for %s\n", 168 + np->full_name, gc->full_name); 169 + err = -EINVAL; 170 + goto err1; 171 + } 172 + 173 + err = of_gc->xlate(of_gc, np, gpio_spec, NULL); 174 + if (err < 0) 175 + goto err1; 176 + 177 + mm_gc = to_of_mm_gpio_chip(&of_gc->gc); 178 + qe_gc = to_qe_gpio_chip(mm_gc); 179 + 180 + spin_lock_irqsave(&qe_gc->lock, flags); 181 + 182 + if (test_and_set_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[err]) == 0) { 183 + qe_pin->controller = qe_gc; 184 + qe_pin->num = err; 185 + err = 0; 186 + } else { 187 + err = -EBUSY; 188 + } 189 + 190 + spin_unlock_irqrestore(&qe_gc->lock, flags); 191 + 192 + if (!err) 193 + return qe_pin; 194 + err1: 195 + of_node_put(gc); 196 + err0: 197 + kfree(qe_pin); 198 + pr_debug("%s failed with status %d\n", __func__, err); 199 + return ERR_PTR(err); 200 + } 201 + EXPORT_SYMBOL(qe_pin_request); 202 + 203 + /** 204 + * qe_pin_free - Free a pin 205 + * @qe_pin: pointer to the qe_pin structure 206 + * Context: any 207 + * 208 + * This function frees the qe_pin structure and makes a pin available 209 + * for further qe_pin_request() calls. 210 + */ 211 + void qe_pin_free(struct qe_pin *qe_pin) 212 + { 213 + struct qe_gpio_chip *qe_gc = qe_pin->controller; 214 + unsigned long flags; 215 + const int pin = qe_pin->num; 216 + 217 + spin_lock_irqsave(&qe_gc->lock, flags); 218 + test_and_clear_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[pin]); 219 + spin_unlock_irqrestore(&qe_gc->lock, flags); 220 + 221 + kfree(qe_pin); 222 + } 223 + EXPORT_SYMBOL(qe_pin_free); 224 + 225 + /** 226 + * qe_pin_set_dedicated - Revert a pin to a dedicated peripheral function mode 227 + * @qe_pin: pointer to the qe_pin structure 228 + * Context: any 229 + * 230 + * This function resets a pin to a dedicated peripheral function that 231 + * has been set up by the firmware. 232 + */ 233 + void qe_pin_set_dedicated(struct qe_pin *qe_pin) 234 + { 235 + struct qe_gpio_chip *qe_gc = qe_pin->controller; 236 + struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs; 237 + struct qe_pio_regs *sregs = &qe_gc->saved_regs; 238 + int pin = qe_pin->num; 239 + u32 mask1 = 1 << (QE_PIO_PINS - (pin + 1)); 240 + u32 mask2 = 0x3 << (QE_PIO_PINS - (pin % (QE_PIO_PINS / 2) + 1) * 2); 241 + bool second_reg = pin > (QE_PIO_PINS / 2) - 1; 242 + unsigned long flags; 243 + 244 + spin_lock_irqsave(&qe_gc->lock, flags); 245 + 246 + if (second_reg) { 247 + clrsetbits_be32(&regs->cpdir2, mask2, sregs->cpdir2 & mask2); 248 + clrsetbits_be32(&regs->cppar2, mask2, sregs->cppar2 & mask2); 249 + } else { 250 + clrsetbits_be32(&regs->cpdir1, mask2, sregs->cpdir1 & mask2); 251 + clrsetbits_be32(&regs->cppar1, mask2, sregs->cppar1 & mask2); 252 + } 253 + 254 + if (sregs->cpdata & mask1) 255 + qe_gc->cpdata |= mask1; 256 + else 257 + qe_gc->cpdata &= ~mask1; 258 + 259 + out_be32(&regs->cpdata, qe_gc->cpdata); 260 + clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1); 261 + 262 + spin_unlock_irqrestore(&qe_gc->lock, flags); 263 + } 264 + EXPORT_SYMBOL(qe_pin_set_dedicated); 265 + 266 + /** 267 + * qe_pin_set_gpio - Set a pin to the GPIO mode 268 + * @qe_pin: pointer to the qe_pin structure 269 + * Context: any 270 + * 271 + * This function sets a pin to the GPIO mode. 272 + */ 273 + void qe_pin_set_gpio(struct qe_pin *qe_pin) 274 + { 275 + struct qe_gpio_chip *qe_gc = qe_pin->controller; 276 + struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs; 277 + unsigned long flags; 278 + 279 + spin_lock_irqsave(&qe_gc->lock, flags); 280 + 281 + /* Let's make it input by default, GPIO API is able to change that. */ 282 + __par_io_config_pin(regs, qe_pin->num, QE_PIO_DIR_IN, 0, 0, 0); 283 + 284 + spin_unlock_irqrestore(&qe_gc->lock, flags); 285 + } 286 + EXPORT_SYMBOL(qe_pin_set_gpio); 118 287 119 288 static int __init qe_add_gpiochips(void) 120 289 {
+155
arch/powerpc/sysdev/simple_gpio.c
··· 1 + /* 2 + * Simple Memory-Mapped GPIOs 3 + * 4 + * Copyright (c) MontaVista Software, Inc. 2008. 5 + * 6 + * Author: Anton Vorontsov <avorontsov@ru.mvista.com> 7 + * 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms of the GNU General Public License as published by the 10 + * Free Software Foundation; either version 2 of the License, or (at your 11 + * option) any later version. 12 + */ 13 + 14 + #include <linux/init.h> 15 + #include <linux/kernel.h> 16 + #include <linux/module.h> 17 + #include <linux/spinlock.h> 18 + #include <linux/types.h> 19 + #include <linux/ioport.h> 20 + #include <linux/io.h> 21 + #include <linux/of.h> 22 + #include <linux/of_gpio.h> 23 + #include <linux/gpio.h> 24 + #include <asm/prom.h> 25 + #include "simple_gpio.h" 26 + 27 + struct u8_gpio_chip { 28 + struct of_mm_gpio_chip mm_gc; 29 + spinlock_t lock; 30 + 31 + /* shadowed data register to clear/set bits safely */ 32 + u8 data; 33 + }; 34 + 35 + static struct u8_gpio_chip *to_u8_gpio_chip(struct of_mm_gpio_chip *mm_gc) 36 + { 37 + return container_of(mm_gc, struct u8_gpio_chip, mm_gc); 38 + } 39 + 40 + static u8 u8_pin2mask(unsigned int pin) 41 + { 42 + return 1 << (8 - 1 - pin); 43 + } 44 + 45 + static int u8_gpio_get(struct gpio_chip *gc, unsigned int gpio) 46 + { 47 + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 48 + 49 + return in_8(mm_gc->regs) & u8_pin2mask(gpio); 50 + } 51 + 52 + static void u8_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) 53 + { 54 + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 55 + struct u8_gpio_chip *u8_gc = to_u8_gpio_chip(mm_gc); 56 + unsigned long flags; 57 + 58 + spin_lock_irqsave(&u8_gc->lock, flags); 59 + 60 + if (val) 61 + u8_gc->data |= u8_pin2mask(gpio); 62 + else 63 + u8_gc->data &= ~u8_pin2mask(gpio); 64 + 65 + out_8(mm_gc->regs, u8_gc->data); 66 + 67 + spin_unlock_irqrestore(&u8_gc->lock, flags); 68 + } 69 + 70 + static int u8_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) 71 + { 72 + return 0; 73 + } 74 + 75 + static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) 76 + { 77 + u8_gpio_set(gc, gpio, val); 78 + return 0; 79 + } 80 + 81 + static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc) 82 + { 83 + struct u8_gpio_chip *u8_gc = to_u8_gpio_chip(mm_gc); 84 + 85 + u8_gc->data = in_8(mm_gc->regs); 86 + } 87 + 88 + static int __init u8_simple_gpiochip_add(struct device_node *np) 89 + { 90 + int ret; 91 + struct u8_gpio_chip *u8_gc; 92 + struct of_mm_gpio_chip *mm_gc; 93 + struct of_gpio_chip *of_gc; 94 + struct gpio_chip *gc; 95 + 96 + u8_gc = kzalloc(sizeof(*u8_gc), GFP_KERNEL); 97 + if (!u8_gc) 98 + return -ENOMEM; 99 + 100 + spin_lock_init(&u8_gc->lock); 101 + 102 + mm_gc = &u8_gc->mm_gc; 103 + of_gc = &mm_gc->of_gc; 104 + gc = &of_gc->gc; 105 + 106 + mm_gc->save_regs = u8_gpio_save_regs; 107 + of_gc->gpio_cells = 2; 108 + gc->ngpio = 8; 109 + gc->direction_input = u8_gpio_dir_in; 110 + gc->direction_output = u8_gpio_dir_out; 111 + gc->get = u8_gpio_get; 112 + gc->set = u8_gpio_set; 113 + 114 + ret = of_mm_gpiochip_add(np, mm_gc); 115 + if (ret) 116 + goto err; 117 + return 0; 118 + err: 119 + kfree(u8_gc); 120 + return ret; 121 + } 122 + 123 + void __init simple_gpiochip_init(const char *compatible) 124 + { 125 + struct device_node *np; 126 + 127 + for_each_compatible_node(np, NULL, compatible) { 128 + int ret; 129 + struct resource r; 130 + 131 + ret = of_address_to_resource(np, 0, &r); 132 + if (ret) 133 + goto err; 134 + 135 + switch (resource_size(&r)) { 136 + case 1: 137 + ret = u8_simple_gpiochip_add(np); 138 + if (ret) 139 + goto err; 140 + break; 141 + default: 142 + /* 143 + * Whenever you need support for GPIO bank width > 1, 144 + * please just turn u8_ code into huge macros, and 145 + * construct needed uX_ code with it. 146 + */ 147 + ret = -ENOSYS; 148 + goto err; 149 + } 150 + continue; 151 + err: 152 + pr_err("%s: registration failed, status %d\n", 153 + np->full_name, ret); 154 + } 155 + }
+12
arch/powerpc/sysdev/simple_gpio.h
··· 1 + #ifndef __SYSDEV_SIMPLE_GPIO_H 2 + #define __SYSDEV_SIMPLE_GPIO_H 3 + 4 + #include <linux/errno.h> 5 + 6 + #ifdef CONFIG_SIMPLE_GPIO 7 + extern void simple_gpiochip_init(const char *compatible); 8 + #else 9 + static inline void simple_gpiochip_init(const char *compatible) {} 10 + #endif /* CONFIG_SIMPLE_GPIO */ 11 + 12 + #endif /* __SYSDEV_SIMPLE_GPIO_H */
+1
drivers/char/Kconfig
··· 616 616 default y 617 617 select HVC_DRIVER 618 618 select HVC_IRQ 619 + select VIOPATH 619 620 help 620 621 iSeries machines support a hypervisor virtual console. 621 622
+2 -2
drivers/char/hvc_beat.c
··· 44 44 static unsigned char q[sizeof(unsigned long) * 2] 45 45 __attribute__((aligned(sizeof(unsigned long)))); 46 46 static int qlen = 0; 47 - unsigned long got; 47 + u64 got; 48 48 49 49 again: 50 50 if (qlen) { ··· 63 63 } 64 64 } 65 65 if (beat_get_term_char(vtermno, &got, 66 - ((unsigned long *)q), ((unsigned long *)q) + 1) == 0) { 66 + ((u64 *)q), ((u64 *)q) + 1) == 0) { 67 67 qlen = got; 68 68 goto again; 69 69 }
+7
drivers/mtd/devices/Kconfig
··· 120 120 doesn't have access to, memory beyond the mem=xxx limit, nvram, 121 121 memory on the video card, etc... 122 122 123 + config MTD_PS3VRAM 124 + tristate "PS3 video RAM" 125 + depends on FB_PS3 126 + help 127 + This driver allows you to use excess PS3 video RAM as volatile 128 + storage or system swap. 129 + 123 130 config MTD_LART 124 131 tristate "28F160xx flash driver for LART" 125 132 depends on SA1100_LART
+1
drivers/mtd/devices/Makefile
··· 16 16 obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o 17 17 obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o 18 18 obj-$(CONFIG_MTD_M25P80) += m25p80.o 19 + obj-$(CONFIG_MTD_PS3VRAM) += ps3vram.o
+768
drivers/mtd/devices/ps3vram.c
··· 1 + /** 2 + * ps3vram - Use extra PS3 video ram as MTD block device. 3 + * 4 + * Copyright (c) 2007-2008 Jim Paris <jim@jtan.com> 5 + * Added support RSX DMA Vivien Chappelier <vivien.chappelier@free.fr> 6 + */ 7 + 8 + #include <linux/io.h> 9 + #include <linux/mm.h> 10 + #include <linux/init.h> 11 + #include <linux/kernel.h> 12 + #include <linux/list.h> 13 + #include <linux/module.h> 14 + #include <linux/moduleparam.h> 15 + #include <linux/slab.h> 16 + #include <linux/version.h> 17 + #include <linux/gfp.h> 18 + #include <linux/delay.h> 19 + #include <linux/mtd/mtd.h> 20 + 21 + #include <asm/lv1call.h> 22 + #include <asm/ps3.h> 23 + 24 + #define DEVICE_NAME "ps3vram" 25 + 26 + #define XDR_BUF_SIZE (2 * 1024 * 1024) /* XDR buffer (must be 1MiB aligned) */ 27 + #define XDR_IOIF 0x0c000000 28 + 29 + #define FIFO_BASE XDR_IOIF 30 + #define FIFO_SIZE (64 * 1024) 31 + 32 + #define DMA_PAGE_SIZE (4 * 1024) 33 + 34 + #define CACHE_PAGE_SIZE (256 * 1024) 35 + #define CACHE_PAGE_COUNT ((XDR_BUF_SIZE - FIFO_SIZE) / CACHE_PAGE_SIZE) 36 + 37 + #define CACHE_OFFSET CACHE_PAGE_SIZE 38 + #define FIFO_OFFSET 0 39 + 40 + #define CTRL_PUT 0x10 41 + #define CTRL_GET 0x11 42 + #define CTRL_TOP 0x15 43 + 44 + #define UPLOAD_SUBCH 1 45 + #define DOWNLOAD_SUBCH 2 46 + 47 + #define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c 48 + #define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104 49 + 50 + #define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT 0x601 51 + 52 + struct mtd_info ps3vram_mtd; 53 + 54 + #define CACHE_PAGE_PRESENT 1 55 + #define CACHE_PAGE_DIRTY 2 56 + 57 + struct ps3vram_tag { 58 + unsigned int address; 59 + unsigned int flags; 60 + }; 61 + 62 + struct ps3vram_cache { 63 + unsigned int page_count; 64 + unsigned int page_size; 65 + struct ps3vram_tag *tags; 66 + }; 67 + 68 + struct ps3vram_priv { 69 + u64 memory_handle; 70 + u64 context_handle; 71 + u32 *ctrl; 72 + u32 *reports; 73 + u8 __iomem *ddr_base; 74 + u8 *xdr_buf; 75 + 76 + u32 *fifo_base; 77 + u32 *fifo_ptr; 78 + 79 + struct device *dev; 80 + struct ps3vram_cache cache; 81 + 82 + /* Used to serialize cache/DMA operations */ 83 + struct mutex lock; 84 + }; 85 + 86 + #define DMA_NOTIFIER_HANDLE_BASE 0x66604200 /* first DMA notifier handle */ 87 + #define DMA_NOTIFIER_OFFSET_BASE 0x1000 /* first DMA notifier offset */ 88 + #define DMA_NOTIFIER_SIZE 0x40 89 + #define NOTIFIER 7 /* notifier used for completion report */ 90 + 91 + /* A trailing '-' means to subtract off ps3fb_videomemory.size */ 92 + char *size = "256M-"; 93 + module_param(size, charp, 0); 94 + MODULE_PARM_DESC(size, "memory size"); 95 + 96 + static u32 *ps3vram_get_notifier(u32 *reports, int notifier) 97 + { 98 + return (void *) reports + 99 + DMA_NOTIFIER_OFFSET_BASE + 100 + DMA_NOTIFIER_SIZE * notifier; 101 + } 102 + 103 + static void ps3vram_notifier_reset(struct mtd_info *mtd) 104 + { 105 + int i; 106 + 107 + struct ps3vram_priv *priv = mtd->priv; 108 + u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER); 109 + for (i = 0; i < 4; i++) 110 + notify[i] = 0xffffffff; 111 + } 112 + 113 + static int ps3vram_notifier_wait(struct mtd_info *mtd, unsigned int timeout_ms) 114 + { 115 + struct ps3vram_priv *priv = mtd->priv; 116 + u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER); 117 + unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); 118 + 119 + do { 120 + if (!notify[3]) 121 + return 0; 122 + msleep(1); 123 + } while (time_before(jiffies, timeout)); 124 + 125 + return -ETIMEDOUT; 126 + } 127 + 128 + static void ps3vram_init_ring(struct mtd_info *mtd) 129 + { 130 + struct ps3vram_priv *priv = mtd->priv; 131 + 132 + priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET; 133 + priv->ctrl[CTRL_GET] = FIFO_BASE + FIFO_OFFSET; 134 + } 135 + 136 + static int ps3vram_wait_ring(struct mtd_info *mtd, unsigned int timeout_ms) 137 + { 138 + struct ps3vram_priv *priv = mtd->priv; 139 + unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); 140 + 141 + do { 142 + if (priv->ctrl[CTRL_PUT] == priv->ctrl[CTRL_GET]) 143 + return 0; 144 + msleep(1); 145 + } while (time_before(jiffies, timeout)); 146 + 147 + dev_dbg(priv->dev, "%s:%d: FIFO timeout (%08x/%08x/%08x)\n", __func__, 148 + __LINE__, priv->ctrl[CTRL_PUT], priv->ctrl[CTRL_GET], 149 + priv->ctrl[CTRL_TOP]); 150 + 151 + return -ETIMEDOUT; 152 + } 153 + 154 + static void ps3vram_out_ring(struct ps3vram_priv *priv, u32 data) 155 + { 156 + *(priv->fifo_ptr)++ = data; 157 + } 158 + 159 + static void ps3vram_begin_ring(struct ps3vram_priv *priv, u32 chan, 160 + u32 tag, u32 size) 161 + { 162 + ps3vram_out_ring(priv, (size << 18) | (chan << 13) | tag); 163 + } 164 + 165 + static void ps3vram_rewind_ring(struct mtd_info *mtd) 166 + { 167 + struct ps3vram_priv *priv = mtd->priv; 168 + u64 status; 169 + 170 + ps3vram_out_ring(priv, 0x20000000 | (FIFO_BASE + FIFO_OFFSET)); 171 + 172 + priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET; 173 + 174 + /* asking the HV for a blit will kick the fifo */ 175 + status = lv1_gpu_context_attribute(priv->context_handle, 176 + L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT, 177 + 0, 0, 0, 0); 178 + if (status) 179 + dev_err(priv->dev, "%s:%d: lv1_gpu_context_attribute failed\n", 180 + __func__, __LINE__); 181 + 182 + priv->fifo_ptr = priv->fifo_base; 183 + } 184 + 185 + static void ps3vram_fire_ring(struct mtd_info *mtd) 186 + { 187 + struct ps3vram_priv *priv = mtd->priv; 188 + u64 status; 189 + 190 + mutex_lock(&ps3_gpu_mutex); 191 + 192 + priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET + 193 + (priv->fifo_ptr - priv->fifo_base) * sizeof(u32); 194 + 195 + /* asking the HV for a blit will kick the fifo */ 196 + status = lv1_gpu_context_attribute(priv->context_handle, 197 + L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT, 198 + 0, 0, 0, 0); 199 + if (status) 200 + dev_err(priv->dev, "%s:%d: lv1_gpu_context_attribute failed\n", 201 + __func__, __LINE__); 202 + 203 + if ((priv->fifo_ptr - priv->fifo_base) * sizeof(u32) > 204 + FIFO_SIZE - 1024) { 205 + dev_dbg(priv->dev, "%s:%d: fifo full, rewinding\n", __func__, 206 + __LINE__); 207 + ps3vram_wait_ring(mtd, 200); 208 + ps3vram_rewind_ring(mtd); 209 + } 210 + 211 + mutex_unlock(&ps3_gpu_mutex); 212 + } 213 + 214 + static void ps3vram_bind(struct mtd_info *mtd) 215 + { 216 + struct ps3vram_priv *priv = mtd->priv; 217 + 218 + ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0, 1); 219 + ps3vram_out_ring(priv, 0x31337303); 220 + ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x180, 3); 221 + ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER); 222 + ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */ 223 + ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */ 224 + 225 + ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0, 1); 226 + ps3vram_out_ring(priv, 0x3137c0de); 227 + ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x180, 3); 228 + ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER); 229 + ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */ 230 + ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */ 231 + 232 + ps3vram_fire_ring(mtd); 233 + } 234 + 235 + static int ps3vram_upload(struct mtd_info *mtd, unsigned int src_offset, 236 + unsigned int dst_offset, int len, int count) 237 + { 238 + struct ps3vram_priv *priv = mtd->priv; 239 + 240 + ps3vram_begin_ring(priv, UPLOAD_SUBCH, 241 + NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); 242 + ps3vram_out_ring(priv, XDR_IOIF + src_offset); 243 + ps3vram_out_ring(priv, dst_offset); 244 + ps3vram_out_ring(priv, len); 245 + ps3vram_out_ring(priv, len); 246 + ps3vram_out_ring(priv, len); 247 + ps3vram_out_ring(priv, count); 248 + ps3vram_out_ring(priv, (1 << 8) | 1); 249 + ps3vram_out_ring(priv, 0); 250 + 251 + ps3vram_notifier_reset(mtd); 252 + ps3vram_begin_ring(priv, UPLOAD_SUBCH, 253 + NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1); 254 + ps3vram_out_ring(priv, 0); 255 + ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x100, 1); 256 + ps3vram_out_ring(priv, 0); 257 + ps3vram_fire_ring(mtd); 258 + if (ps3vram_notifier_wait(mtd, 200) < 0) { 259 + dev_dbg(priv->dev, "%s:%d: notifier timeout\n", __func__, 260 + __LINE__); 261 + return -1; 262 + } 263 + 264 + return 0; 265 + } 266 + 267 + static int ps3vram_download(struct mtd_info *mtd, unsigned int src_offset, 268 + unsigned int dst_offset, int len, int count) 269 + { 270 + struct ps3vram_priv *priv = mtd->priv; 271 + 272 + ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 273 + NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); 274 + ps3vram_out_ring(priv, src_offset); 275 + ps3vram_out_ring(priv, XDR_IOIF + dst_offset); 276 + ps3vram_out_ring(priv, len); 277 + ps3vram_out_ring(priv, len); 278 + ps3vram_out_ring(priv, len); 279 + ps3vram_out_ring(priv, count); 280 + ps3vram_out_ring(priv, (1 << 8) | 1); 281 + ps3vram_out_ring(priv, 0); 282 + 283 + ps3vram_notifier_reset(mtd); 284 + ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 285 + NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1); 286 + ps3vram_out_ring(priv, 0); 287 + ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x100, 1); 288 + ps3vram_out_ring(priv, 0); 289 + ps3vram_fire_ring(mtd); 290 + if (ps3vram_notifier_wait(mtd, 200) < 0) { 291 + dev_dbg(priv->dev, "%s:%d: notifier timeout\n", __func__, 292 + __LINE__); 293 + return -1; 294 + } 295 + 296 + return 0; 297 + } 298 + 299 + static void ps3vram_cache_evict(struct mtd_info *mtd, int entry) 300 + { 301 + struct ps3vram_priv *priv = mtd->priv; 302 + struct ps3vram_cache *cache = &priv->cache; 303 + 304 + if (cache->tags[entry].flags & CACHE_PAGE_DIRTY) { 305 + dev_dbg(priv->dev, "%s:%d: flushing %d : 0x%08x\n", __func__, 306 + __LINE__, entry, cache->tags[entry].address); 307 + if (ps3vram_upload(mtd, 308 + CACHE_OFFSET + entry * cache->page_size, 309 + cache->tags[entry].address, 310 + DMA_PAGE_SIZE, 311 + cache->page_size / DMA_PAGE_SIZE) < 0) { 312 + dev_dbg(priv->dev, "%s:%d: failed to upload from " 313 + "0x%x to 0x%x size 0x%x\n", __func__, __LINE__, 314 + entry * cache->page_size, 315 + cache->tags[entry].address, cache->page_size); 316 + } 317 + cache->tags[entry].flags &= ~CACHE_PAGE_DIRTY; 318 + } 319 + } 320 + 321 + static void ps3vram_cache_load(struct mtd_info *mtd, int entry, 322 + unsigned int address) 323 + { 324 + struct ps3vram_priv *priv = mtd->priv; 325 + struct ps3vram_cache *cache = &priv->cache; 326 + 327 + dev_dbg(priv->dev, "%s:%d: fetching %d : 0x%08x\n", __func__, __LINE__, 328 + entry, address); 329 + if (ps3vram_download(mtd, 330 + address, 331 + CACHE_OFFSET + entry * cache->page_size, 332 + DMA_PAGE_SIZE, 333 + cache->page_size / DMA_PAGE_SIZE) < 0) { 334 + dev_err(priv->dev, "%s:%d: failed to download from " 335 + "0x%x to 0x%x size 0x%x\n", __func__, __LINE__, address, 336 + entry * cache->page_size, cache->page_size); 337 + } 338 + 339 + cache->tags[entry].address = address; 340 + cache->tags[entry].flags |= CACHE_PAGE_PRESENT; 341 + } 342 + 343 + 344 + static void ps3vram_cache_flush(struct mtd_info *mtd) 345 + { 346 + struct ps3vram_priv *priv = mtd->priv; 347 + struct ps3vram_cache *cache = &priv->cache; 348 + int i; 349 + 350 + dev_dbg(priv->dev, "%s:%d: FLUSH\n", __func__, __LINE__); 351 + for (i = 0; i < cache->page_count; i++) { 352 + ps3vram_cache_evict(mtd, i); 353 + cache->tags[i].flags = 0; 354 + } 355 + } 356 + 357 + static unsigned int ps3vram_cache_match(struct mtd_info *mtd, loff_t address) 358 + { 359 + struct ps3vram_priv *priv = mtd->priv; 360 + struct ps3vram_cache *cache = &priv->cache; 361 + unsigned int base; 362 + unsigned int offset; 363 + int i; 364 + static int counter; 365 + 366 + offset = (unsigned int) (address & (cache->page_size - 1)); 367 + base = (unsigned int) (address - offset); 368 + 369 + /* fully associative check */ 370 + for (i = 0; i < cache->page_count; i++) { 371 + if ((cache->tags[i].flags & CACHE_PAGE_PRESENT) && 372 + cache->tags[i].address == base) { 373 + dev_dbg(priv->dev, "%s:%d: found entry %d : 0x%08x\n", 374 + __func__, __LINE__, i, cache->tags[i].address); 375 + return i; 376 + } 377 + } 378 + 379 + /* choose a random entry */ 380 + i = (jiffies + (counter++)) % cache->page_count; 381 + dev_dbg(priv->dev, "%s:%d: using entry %d\n", __func__, __LINE__, i); 382 + 383 + ps3vram_cache_evict(mtd, i); 384 + ps3vram_cache_load(mtd, i, base); 385 + 386 + return i; 387 + } 388 + 389 + static int ps3vram_cache_init(struct mtd_info *mtd) 390 + { 391 + struct ps3vram_priv *priv = mtd->priv; 392 + 393 + priv->cache.page_count = CACHE_PAGE_COUNT; 394 + priv->cache.page_size = CACHE_PAGE_SIZE; 395 + priv->cache.tags = kzalloc(sizeof(struct ps3vram_tag) * 396 + CACHE_PAGE_COUNT, GFP_KERNEL); 397 + if (priv->cache.tags == NULL) { 398 + dev_err(priv->dev, "%s:%d: could not allocate cache tags\n", 399 + __func__, __LINE__); 400 + return -ENOMEM; 401 + } 402 + 403 + dev_info(priv->dev, "created ram cache: %d entries, %d KiB each\n", 404 + CACHE_PAGE_COUNT, CACHE_PAGE_SIZE / 1024); 405 + 406 + return 0; 407 + } 408 + 409 + static void ps3vram_cache_cleanup(struct mtd_info *mtd) 410 + { 411 + struct ps3vram_priv *priv = mtd->priv; 412 + 413 + ps3vram_cache_flush(mtd); 414 + kfree(priv->cache.tags); 415 + } 416 + 417 + static int ps3vram_erase(struct mtd_info *mtd, struct erase_info *instr) 418 + { 419 + struct ps3vram_priv *priv = mtd->priv; 420 + 421 + if (instr->addr + instr->len > mtd->size) 422 + return -EINVAL; 423 + 424 + mutex_lock(&priv->lock); 425 + 426 + ps3vram_cache_flush(mtd); 427 + 428 + /* Set bytes to 0xFF */ 429 + memset_io(priv->ddr_base + instr->addr, 0xFF, instr->len); 430 + 431 + mutex_unlock(&priv->lock); 432 + 433 + instr->state = MTD_ERASE_DONE; 434 + mtd_erase_callback(instr); 435 + 436 + return 0; 437 + } 438 + 439 + static int ps3vram_read(struct mtd_info *mtd, loff_t from, size_t len, 440 + size_t *retlen, u_char *buf) 441 + { 442 + struct ps3vram_priv *priv = mtd->priv; 443 + unsigned int cached, count; 444 + 445 + dev_dbg(priv->dev, "%s:%d: from=0x%08x len=0x%zx\n", __func__, __LINE__, 446 + (unsigned int)from, len); 447 + 448 + if (from >= mtd->size) 449 + return -EINVAL; 450 + 451 + if (len > mtd->size - from) 452 + len = mtd->size - from; 453 + 454 + /* Copy from vram to buf */ 455 + count = len; 456 + while (count) { 457 + unsigned int offset, avail; 458 + unsigned int entry; 459 + 460 + offset = (unsigned int) (from & (priv->cache.page_size - 1)); 461 + avail = priv->cache.page_size - offset; 462 + 463 + mutex_lock(&priv->lock); 464 + 465 + entry = ps3vram_cache_match(mtd, from); 466 + cached = CACHE_OFFSET + entry * priv->cache.page_size + offset; 467 + 468 + dev_dbg(priv->dev, "%s:%d: from=%08x cached=%08x offset=%08x " 469 + "avail=%08x count=%08x\n", __func__, __LINE__, 470 + (unsigned int)from, cached, offset, avail, count); 471 + 472 + if (avail > count) 473 + avail = count; 474 + memcpy(buf, priv->xdr_buf + cached, avail); 475 + 476 + mutex_unlock(&priv->lock); 477 + 478 + buf += avail; 479 + count -= avail; 480 + from += avail; 481 + } 482 + 483 + *retlen = len; 484 + return 0; 485 + } 486 + 487 + static int ps3vram_write(struct mtd_info *mtd, loff_t to, size_t len, 488 + size_t *retlen, const u_char *buf) 489 + { 490 + struct ps3vram_priv *priv = mtd->priv; 491 + unsigned int cached, count; 492 + 493 + if (to >= mtd->size) 494 + return -EINVAL; 495 + 496 + if (len > mtd->size - to) 497 + len = mtd->size - to; 498 + 499 + /* Copy from buf to vram */ 500 + count = len; 501 + while (count) { 502 + unsigned int offset, avail; 503 + unsigned int entry; 504 + 505 + offset = (unsigned int) (to & (priv->cache.page_size - 1)); 506 + avail = priv->cache.page_size - offset; 507 + 508 + mutex_lock(&priv->lock); 509 + 510 + entry = ps3vram_cache_match(mtd, to); 511 + cached = CACHE_OFFSET + entry * priv->cache.page_size + offset; 512 + 513 + dev_dbg(priv->dev, "%s:%d: to=%08x cached=%08x offset=%08x " 514 + "avail=%08x count=%08x\n", __func__, __LINE__, 515 + (unsigned int)to, cached, offset, avail, count); 516 + 517 + if (avail > count) 518 + avail = count; 519 + memcpy(priv->xdr_buf + cached, buf, avail); 520 + 521 + priv->cache.tags[entry].flags |= CACHE_PAGE_DIRTY; 522 + 523 + mutex_unlock(&priv->lock); 524 + 525 + buf += avail; 526 + count -= avail; 527 + to += avail; 528 + } 529 + 530 + *retlen = len; 531 + return 0; 532 + } 533 + 534 + static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev) 535 + { 536 + struct ps3vram_priv *priv; 537 + int status; 538 + u64 ddr_lpar; 539 + u64 ctrl_lpar; 540 + u64 info_lpar; 541 + u64 reports_lpar; 542 + u64 ddr_size; 543 + u64 reports_size; 544 + int ret = -ENOMEM; 545 + char *rest; 546 + 547 + ret = -EIO; 548 + ps3vram_mtd.priv = kzalloc(sizeof(struct ps3vram_priv), GFP_KERNEL); 549 + if (!ps3vram_mtd.priv) 550 + goto out; 551 + priv = ps3vram_mtd.priv; 552 + 553 + mutex_init(&priv->lock); 554 + priv->dev = &dev->core; 555 + 556 + /* Allocate XDR buffer (1MiB aligned) */ 557 + priv->xdr_buf = (void *)__get_free_pages(GFP_KERNEL, 558 + get_order(XDR_BUF_SIZE)); 559 + if (priv->xdr_buf == NULL) { 560 + dev_dbg(&dev->core, "%s:%d: could not allocate XDR buffer\n", 561 + __func__, __LINE__); 562 + ret = -ENOMEM; 563 + goto out_free_priv; 564 + } 565 + 566 + /* Put FIFO at begginning of XDR buffer */ 567 + priv->fifo_base = (u32 *) (priv->xdr_buf + FIFO_OFFSET); 568 + priv->fifo_ptr = priv->fifo_base; 569 + 570 + /* XXX: Need to open GPU, in case ps3fb or snd_ps3 aren't loaded */ 571 + if (ps3_open_hv_device(dev)) { 572 + dev_err(&dev->core, "%s:%d: ps3_open_hv_device failed\n", 573 + __func__, __LINE__); 574 + ret = -EAGAIN; 575 + goto out_close_gpu; 576 + } 577 + 578 + /* Request memory */ 579 + status = -1; 580 + ddr_size = memparse(size, &rest); 581 + if (*rest == '-') 582 + ddr_size -= ps3fb_videomemory.size; 583 + ddr_size = ALIGN(ddr_size, 1024*1024); 584 + if (ddr_size <= 0) { 585 + dev_err(&dev->core, "%s:%d: specified size is too small\n", 586 + __func__, __LINE__); 587 + ret = -EINVAL; 588 + goto out_close_gpu; 589 + } 590 + 591 + while (ddr_size > 0) { 592 + status = lv1_gpu_memory_allocate(ddr_size, 0, 0, 0, 0, 593 + &priv->memory_handle, 594 + &ddr_lpar); 595 + if (!status) 596 + break; 597 + ddr_size -= 1024*1024; 598 + } 599 + if (status || ddr_size <= 0) { 600 + dev_err(&dev->core, "%s:%d: lv1_gpu_memory_allocate failed\n", 601 + __func__, __LINE__); 602 + ret = -ENOMEM; 603 + goto out_free_xdr_buf; 604 + } 605 + 606 + /* Request context */ 607 + status = lv1_gpu_context_allocate(priv->memory_handle, 608 + 0, 609 + &priv->context_handle, 610 + &ctrl_lpar, 611 + &info_lpar, 612 + &reports_lpar, 613 + &reports_size); 614 + if (status) { 615 + dev_err(&dev->core, "%s:%d: lv1_gpu_context_allocate failed\n", 616 + __func__, __LINE__); 617 + ret = -ENOMEM; 618 + goto out_free_memory; 619 + } 620 + 621 + /* Map XDR buffer to RSX */ 622 + status = lv1_gpu_context_iomap(priv->context_handle, XDR_IOIF, 623 + ps3_mm_phys_to_lpar(__pa(priv->xdr_buf)), 624 + XDR_BUF_SIZE, 0); 625 + if (status) { 626 + dev_err(&dev->core, "%s:%d: lv1_gpu_context_iomap failed\n", 627 + __func__, __LINE__); 628 + ret = -ENOMEM; 629 + goto out_free_context; 630 + } 631 + 632 + priv->ddr_base = ioremap_flags(ddr_lpar, ddr_size, _PAGE_NO_CACHE); 633 + 634 + if (!priv->ddr_base) { 635 + dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__, 636 + __LINE__); 637 + ret = -ENOMEM; 638 + goto out_free_context; 639 + } 640 + 641 + priv->ctrl = ioremap(ctrl_lpar, 64 * 1024); 642 + if (!priv->ctrl) { 643 + dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__, 644 + __LINE__); 645 + ret = -ENOMEM; 646 + goto out_unmap_vram; 647 + } 648 + 649 + priv->reports = ioremap(reports_lpar, reports_size); 650 + if (!priv->reports) { 651 + dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__, 652 + __LINE__); 653 + ret = -ENOMEM; 654 + goto out_unmap_ctrl; 655 + } 656 + 657 + mutex_lock(&ps3_gpu_mutex); 658 + ps3vram_init_ring(&ps3vram_mtd); 659 + mutex_unlock(&ps3_gpu_mutex); 660 + 661 + ps3vram_mtd.name = "ps3vram"; 662 + ps3vram_mtd.size = ddr_size; 663 + ps3vram_mtd.flags = MTD_CAP_RAM; 664 + ps3vram_mtd.erase = ps3vram_erase; 665 + ps3vram_mtd.point = NULL; 666 + ps3vram_mtd.unpoint = NULL; 667 + ps3vram_mtd.read = ps3vram_read; 668 + ps3vram_mtd.write = ps3vram_write; 669 + ps3vram_mtd.owner = THIS_MODULE; 670 + ps3vram_mtd.type = MTD_RAM; 671 + ps3vram_mtd.erasesize = CACHE_PAGE_SIZE; 672 + ps3vram_mtd.writesize = 1; 673 + 674 + ps3vram_bind(&ps3vram_mtd); 675 + 676 + mutex_lock(&ps3_gpu_mutex); 677 + ret = ps3vram_wait_ring(&ps3vram_mtd, 100); 678 + mutex_unlock(&ps3_gpu_mutex); 679 + if (ret < 0) { 680 + dev_err(&dev->core, "%s:%d: failed to initialize channels\n", 681 + __func__, __LINE__); 682 + ret = -ETIMEDOUT; 683 + goto out_unmap_reports; 684 + } 685 + 686 + ps3vram_cache_init(&ps3vram_mtd); 687 + 688 + if (add_mtd_device(&ps3vram_mtd)) { 689 + dev_err(&dev->core, "%s:%d: add_mtd_device failed\n", 690 + __func__, __LINE__); 691 + ret = -EAGAIN; 692 + goto out_cache_cleanup; 693 + } 694 + 695 + dev_info(&dev->core, "reserved %u MiB of gpu memory\n", 696 + (unsigned int)(ddr_size / 1024 / 1024)); 697 + 698 + return 0; 699 + 700 + out_cache_cleanup: 701 + ps3vram_cache_cleanup(&ps3vram_mtd); 702 + out_unmap_reports: 703 + iounmap(priv->reports); 704 + out_unmap_ctrl: 705 + iounmap(priv->ctrl); 706 + out_unmap_vram: 707 + iounmap(priv->ddr_base); 708 + out_free_context: 709 + lv1_gpu_context_free(priv->context_handle); 710 + out_free_memory: 711 + lv1_gpu_memory_free(priv->memory_handle); 712 + out_close_gpu: 713 + ps3_close_hv_device(dev); 714 + out_free_xdr_buf: 715 + free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE)); 716 + out_free_priv: 717 + kfree(ps3vram_mtd.priv); 718 + ps3vram_mtd.priv = NULL; 719 + out: 720 + return ret; 721 + } 722 + 723 + static int ps3vram_shutdown(struct ps3_system_bus_device *dev) 724 + { 725 + struct ps3vram_priv *priv; 726 + 727 + priv = ps3vram_mtd.priv; 728 + 729 + del_mtd_device(&ps3vram_mtd); 730 + ps3vram_cache_cleanup(&ps3vram_mtd); 731 + iounmap(priv->reports); 732 + iounmap(priv->ctrl); 733 + iounmap(priv->ddr_base); 734 + lv1_gpu_context_free(priv->context_handle); 735 + lv1_gpu_memory_free(priv->memory_handle); 736 + ps3_close_hv_device(dev); 737 + free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE)); 738 + kfree(priv); 739 + return 0; 740 + } 741 + 742 + static struct ps3_system_bus_driver ps3vram_driver = { 743 + .match_id = PS3_MATCH_ID_GPU, 744 + .match_sub_id = PS3_MATCH_SUB_ID_GPU_RAMDISK, 745 + .core.name = DEVICE_NAME, 746 + .core.owner = THIS_MODULE, 747 + .probe = ps3vram_probe, 748 + .remove = ps3vram_shutdown, 749 + .shutdown = ps3vram_shutdown, 750 + }; 751 + 752 + static int __init ps3vram_init(void) 753 + { 754 + return ps3_system_bus_driver_register(&ps3vram_driver); 755 + } 756 + 757 + static void __exit ps3vram_exit(void) 758 + { 759 + ps3_system_bus_driver_unregister(&ps3vram_driver); 760 + } 761 + 762 + module_init(ps3vram_init); 763 + module_exit(ps3vram_exit); 764 + 765 + MODULE_LICENSE("GPL"); 766 + MODULE_AUTHOR("Jim Paris <jim@jtan.com>"); 767 + MODULE_DESCRIPTION("MTD driver for PS3 video RAM"); 768 + MODULE_ALIAS(PS3_MODULE_ALIAS_GPU_RAMDISK);
+1
drivers/scsi/Kconfig
··· 884 884 tristate "IBM Virtual SCSI support" 885 885 depends on PPC_PSERIES || PPC_ISERIES 886 886 select SCSI_SRP_ATTRS 887 + select VIOPATH if PPC_ISERIES 887 888 help 888 889 This is the IBM POWER Virtual SCSI Client 889 890
+18 -1
drivers/serial/Kconfig
··· 1320 1320 config SERIAL_OF_PLATFORM 1321 1321 tristate "Serial port on Open Firmware platform bus" 1322 1322 depends on PPC_OF 1323 - depends on SERIAL_8250 1323 + depends on SERIAL_8250 || SERIAL_OF_PLATFORM_NWPSERIAL 1324 1324 help 1325 1325 If you have a PowerPC based system that has serial ports 1326 1326 on a platform specific bus, you should enable this option. 1327 1327 Currently, only 8250 compatible ports are supported, but 1328 1328 others can easily be added. 1329 + 1330 + config SERIAL_OF_PLATFORM_NWPSERIAL 1331 + tristate "NWP serial port driver" 1332 + depends on PPC_OF && PPC_DCR 1333 + select SERIAL_OF_PLATFORM 1334 + select SERIAL_CORE_CONSOLE 1335 + select SERIAL_CORE 1336 + help 1337 + This driver supports the cell network processor nwp serial 1338 + device. 1339 + 1340 + config SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE 1341 + bool "Console on NWP serial port" 1342 + depends on SERIAL_OF_PLATFORM_NWPSERIAL=y 1343 + select SERIAL_CORE_CONSOLE 1344 + help 1345 + Support for Console on the NWP serial ports. 1329 1346 1330 1347 config SERIAL_QE 1331 1348 tristate "Freescale QUICC Engine serial port support"
+1
drivers/serial/Makefile
··· 72 72 obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o 73 73 obj-$(CONFIG_SERIAL_NETX) += netx-serial.o 74 74 obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o 75 + obj-$(CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL) += nwpserial.o 75 76 obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o 76 77 obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o 77 78 obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
+475
drivers/serial/nwpserial.c
··· 1 + /* 2 + * Serial Port driver for a NWP uart device 3 + * 4 + * Copyright (C) 2008 IBM Corp., Benjamin Krill <ben@codiert.org> 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License 8 + * as published by the Free Software Foundation; either version 9 + * 2 of the License, or (at your option) any later version. 10 + * 11 + */ 12 + #include <linux/init.h> 13 + #include <linux/console.h> 14 + #include <linux/serial.h> 15 + #include <linux/serial_reg.h> 16 + #include <linux/serial_core.h> 17 + #include <linux/tty.h> 18 + #include <linux/irqreturn.h> 19 + #include <linux/mutex.h> 20 + #include <linux/of_platform.h> 21 + #include <linux/of_device.h> 22 + #include <linux/nwpserial.h> 23 + #include <asm/prom.h> 24 + #include <asm/dcr.h> 25 + 26 + #define NWPSERIAL_NR 2 27 + 28 + #define NWPSERIAL_STATUS_RXVALID 0x1 29 + #define NWPSERIAL_STATUS_TXFULL 0x2 30 + 31 + struct nwpserial_port { 32 + struct uart_port port; 33 + dcr_host_t dcr_host; 34 + unsigned int ier; 35 + unsigned int mcr; 36 + }; 37 + 38 + static DEFINE_MUTEX(nwpserial_mutex); 39 + static struct nwpserial_port nwpserial_ports[NWPSERIAL_NR]; 40 + 41 + static void wait_for_bits(struct nwpserial_port *up, int bits) 42 + { 43 + unsigned int status, tmout = 10000; 44 + 45 + /* Wait up to 10ms for the character(s) to be sent. */ 46 + do { 47 + status = dcr_read(up->dcr_host, UART_LSR); 48 + 49 + if (--tmout == 0) 50 + break; 51 + udelay(1); 52 + } while ((status & bits) != bits); 53 + } 54 + 55 + #ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE 56 + static void nwpserial_console_putchar(struct uart_port *port, int c) 57 + { 58 + struct nwpserial_port *up; 59 + up = container_of(port, struct nwpserial_port, port); 60 + /* check if tx buffer is full */ 61 + wait_for_bits(up, UART_LSR_THRE); 62 + dcr_write(up->dcr_host, UART_TX, c); 63 + up->port.icount.tx++; 64 + } 65 + 66 + static void 67 + nwpserial_console_write(struct console *co, const char *s, unsigned int count) 68 + { 69 + struct nwpserial_port *up = &nwpserial_ports[co->index]; 70 + unsigned long flags; 71 + int locked = 1; 72 + 73 + if (oops_in_progress) 74 + locked = spin_trylock_irqsave(&up->port.lock, flags); 75 + else 76 + spin_lock_irqsave(&up->port.lock, flags); 77 + 78 + /* save and disable interrupt */ 79 + up->ier = dcr_read(up->dcr_host, UART_IER); 80 + dcr_write(up->dcr_host, UART_IER, up->ier & ~UART_IER_RDI); 81 + 82 + uart_console_write(&up->port, s, count, nwpserial_console_putchar); 83 + 84 + /* wait for transmitter to become emtpy */ 85 + while ((dcr_read(up->dcr_host, UART_LSR) & UART_LSR_THRE) == 0) 86 + cpu_relax(); 87 + 88 + /* restore interrupt state */ 89 + dcr_write(up->dcr_host, UART_IER, up->ier); 90 + 91 + if (locked) 92 + spin_unlock_irqrestore(&up->port.lock, flags); 93 + } 94 + 95 + static struct uart_driver nwpserial_reg; 96 + static struct console nwpserial_console = { 97 + .name = "ttySQ", 98 + .write = nwpserial_console_write, 99 + .device = uart_console_device, 100 + .flags = CON_PRINTBUFFER, 101 + .index = -1, 102 + .data = &nwpserial_reg, 103 + }; 104 + #define NWPSERIAL_CONSOLE (&nwpserial_console) 105 + #else 106 + #define NWPSERIAL_CONSOLE NULL 107 + #endif /* CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE */ 108 + 109 + /**************************************************************************/ 110 + 111 + static int nwpserial_request_port(struct uart_port *port) 112 + { 113 + return 0; 114 + } 115 + 116 + static void nwpserial_release_port(struct uart_port *port) 117 + { 118 + /* N/A */ 119 + } 120 + 121 + static void nwpserial_config_port(struct uart_port *port, int flags) 122 + { 123 + port->type = PORT_NWPSERIAL; 124 + } 125 + 126 + static irqreturn_t nwpserial_interrupt(int irq, void *dev_id) 127 + { 128 + struct nwpserial_port *up = dev_id; 129 + struct tty_struct *tty = up->port.info->port.tty; 130 + irqreturn_t ret; 131 + unsigned int iir; 132 + unsigned char ch; 133 + 134 + spin_lock(&up->port.lock); 135 + 136 + /* check if the uart was the interrupt source. */ 137 + iir = dcr_read(up->dcr_host, UART_IIR); 138 + if (!iir) { 139 + ret = IRQ_NONE; 140 + goto out; 141 + } 142 + 143 + do { 144 + up->port.icount.rx++; 145 + ch = dcr_read(up->dcr_host, UART_RX); 146 + if (up->port.ignore_status_mask != NWPSERIAL_STATUS_RXVALID) 147 + tty_insert_flip_char(tty, ch, TTY_NORMAL); 148 + } while (dcr_read(up->dcr_host, UART_RX) & UART_LSR_DR); 149 + 150 + tty_flip_buffer_push(tty); 151 + ret = IRQ_HANDLED; 152 + 153 + out: 154 + spin_unlock(&up->port.lock); 155 + return ret; 156 + } 157 + 158 + static int nwpserial_startup(struct uart_port *port) 159 + { 160 + struct nwpserial_port *up; 161 + int err; 162 + 163 + up = container_of(port, struct nwpserial_port, port); 164 + 165 + /* disable flow control by default */ 166 + up->mcr = dcr_read(up->dcr_host, UART_MCR) & ~UART_MCR_AFE; 167 + dcr_write(up->dcr_host, UART_MCR, up->mcr); 168 + 169 + /* register interrupt handler */ 170 + err = request_irq(up->port.irq, nwpserial_interrupt, 171 + IRQF_SHARED, "nwpserial", up); 172 + if (err) 173 + return err; 174 + 175 + /* enable interrupts */ 176 + up->ier = UART_IER_RDI; 177 + dcr_write(up->dcr_host, UART_IER, up->ier); 178 + 179 + /* enable receiving */ 180 + up->port.ignore_status_mask &= ~NWPSERIAL_STATUS_RXVALID; 181 + 182 + return 0; 183 + } 184 + 185 + static void nwpserial_shutdown(struct uart_port *port) 186 + { 187 + struct nwpserial_port *up; 188 + up = container_of(port, struct nwpserial_port, port); 189 + 190 + /* disable receiving */ 191 + up->port.ignore_status_mask |= NWPSERIAL_STATUS_RXVALID; 192 + 193 + /* disable interrupts from this port */ 194 + up->ier = 0; 195 + dcr_write(up->dcr_host, UART_IER, up->ier); 196 + 197 + /* free irq */ 198 + free_irq(up->port.irq, port); 199 + } 200 + 201 + static int nwpserial_verify_port(struct uart_port *port, 202 + struct serial_struct *ser) 203 + { 204 + return -EINVAL; 205 + } 206 + 207 + static const char *nwpserial_type(struct uart_port *port) 208 + { 209 + return port->type == PORT_NWPSERIAL ? "nwpserial" : NULL; 210 + } 211 + 212 + static void nwpserial_set_termios(struct uart_port *port, 213 + struct ktermios *termios, struct ktermios *old) 214 + { 215 + struct nwpserial_port *up; 216 + up = container_of(port, struct nwpserial_port, port); 217 + 218 + up->port.read_status_mask = NWPSERIAL_STATUS_RXVALID 219 + | NWPSERIAL_STATUS_TXFULL; 220 + 221 + up->port.ignore_status_mask = 0; 222 + /* ignore all characters if CREAD is not set */ 223 + if ((termios->c_cflag & CREAD) == 0) 224 + up->port.ignore_status_mask |= NWPSERIAL_STATUS_RXVALID; 225 + 226 + /* Copy back the old hardware settings */ 227 + if (old) 228 + tty_termios_copy_hw(termios, old); 229 + } 230 + 231 + static void nwpserial_break_ctl(struct uart_port *port, int ctl) 232 + { 233 + /* N/A */ 234 + } 235 + 236 + static void nwpserial_enable_ms(struct uart_port *port) 237 + { 238 + /* N/A */ 239 + } 240 + 241 + static void nwpserial_stop_rx(struct uart_port *port) 242 + { 243 + struct nwpserial_port *up; 244 + up = container_of(port, struct nwpserial_port, port); 245 + /* don't forward any more data (like !CREAD) */ 246 + up->port.ignore_status_mask = NWPSERIAL_STATUS_RXVALID; 247 + } 248 + 249 + static void nwpserial_putchar(struct nwpserial_port *up, unsigned char c) 250 + { 251 + /* check if tx buffer is full */ 252 + wait_for_bits(up, UART_LSR_THRE); 253 + dcr_write(up->dcr_host, UART_TX, c); 254 + up->port.icount.tx++; 255 + } 256 + 257 + static void nwpserial_start_tx(struct uart_port *port) 258 + { 259 + struct nwpserial_port *up; 260 + struct circ_buf *xmit; 261 + up = container_of(port, struct nwpserial_port, port); 262 + xmit = &up->port.info->xmit; 263 + 264 + if (port->x_char) { 265 + nwpserial_putchar(up, up->port.x_char); 266 + port->x_char = 0; 267 + } 268 + 269 + while (!(uart_circ_empty(xmit) || uart_tx_stopped(&up->port))) { 270 + nwpserial_putchar(up, xmit->buf[xmit->tail]); 271 + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1); 272 + } 273 + } 274 + 275 + static unsigned int nwpserial_get_mctrl(struct uart_port *port) 276 + { 277 + return 0; 278 + } 279 + 280 + static void nwpserial_set_mctrl(struct uart_port *port, unsigned int mctrl) 281 + { 282 + /* N/A */ 283 + } 284 + 285 + static void nwpserial_stop_tx(struct uart_port *port) 286 + { 287 + /* N/A */ 288 + } 289 + 290 + static unsigned int nwpserial_tx_empty(struct uart_port *port) 291 + { 292 + struct nwpserial_port *up; 293 + unsigned long flags; 294 + int ret; 295 + up = container_of(port, struct nwpserial_port, port); 296 + 297 + spin_lock_irqsave(&up->port.lock, flags); 298 + ret = dcr_read(up->dcr_host, UART_LSR); 299 + spin_unlock_irqrestore(&up->port.lock, flags); 300 + 301 + return ret & UART_LSR_TEMT ? TIOCSER_TEMT : 0; 302 + } 303 + 304 + static struct uart_ops nwpserial_pops = { 305 + .tx_empty = nwpserial_tx_empty, 306 + .set_mctrl = nwpserial_set_mctrl, 307 + .get_mctrl = nwpserial_get_mctrl, 308 + .stop_tx = nwpserial_stop_tx, 309 + .start_tx = nwpserial_start_tx, 310 + .stop_rx = nwpserial_stop_rx, 311 + .enable_ms = nwpserial_enable_ms, 312 + .break_ctl = nwpserial_break_ctl, 313 + .startup = nwpserial_startup, 314 + .shutdown = nwpserial_shutdown, 315 + .set_termios = nwpserial_set_termios, 316 + .type = nwpserial_type, 317 + .release_port = nwpserial_release_port, 318 + .request_port = nwpserial_request_port, 319 + .config_port = nwpserial_config_port, 320 + .verify_port = nwpserial_verify_port, 321 + }; 322 + 323 + static struct uart_driver nwpserial_reg = { 324 + .owner = THIS_MODULE, 325 + .driver_name = "nwpserial", 326 + .dev_name = "ttySQ", 327 + .major = TTY_MAJOR, 328 + .minor = 68, 329 + .nr = NWPSERIAL_NR, 330 + .cons = NWPSERIAL_CONSOLE, 331 + }; 332 + 333 + int nwpserial_register_port(struct uart_port *port) 334 + { 335 + struct nwpserial_port *up = NULL; 336 + int ret = -1; 337 + int i; 338 + static int first = 1; 339 + int dcr_len; 340 + int dcr_base; 341 + struct device_node *dn; 342 + 343 + mutex_lock(&nwpserial_mutex); 344 + 345 + dn = to_of_device(port->dev)->node; 346 + if (dn == NULL) 347 + goto out; 348 + 349 + /* get dcr base. */ 350 + dcr_base = dcr_resource_start(dn, 0); 351 + 352 + /* find matching entry */ 353 + for (i = 0; i < NWPSERIAL_NR; i++) 354 + if (nwpserial_ports[i].port.iobase == dcr_base) { 355 + up = &nwpserial_ports[i]; 356 + break; 357 + } 358 + 359 + /* we didn't find a mtching entry, search for a free port */ 360 + if (up == NULL) 361 + for (i = 0; i < NWPSERIAL_NR; i++) 362 + if (nwpserial_ports[i].port.type == PORT_UNKNOWN && 363 + nwpserial_ports[i].port.iobase == 0) { 364 + up = &nwpserial_ports[i]; 365 + break; 366 + } 367 + 368 + if (up == NULL) { 369 + ret = -EBUSY; 370 + goto out; 371 + } 372 + 373 + if (first) 374 + uart_register_driver(&nwpserial_reg); 375 + first = 0; 376 + 377 + up->port.membase = port->membase; 378 + up->port.irq = port->irq; 379 + up->port.uartclk = port->uartclk; 380 + up->port.fifosize = port->fifosize; 381 + up->port.regshift = port->regshift; 382 + up->port.iotype = port->iotype; 383 + up->port.flags = port->flags; 384 + up->port.mapbase = port->mapbase; 385 + up->port.private_data = port->private_data; 386 + 387 + if (port->dev) 388 + up->port.dev = port->dev; 389 + 390 + if (up->port.iobase != dcr_base) { 391 + up->port.ops = &nwpserial_pops; 392 + up->port.fifosize = 16; 393 + 394 + spin_lock_init(&up->port.lock); 395 + 396 + up->port.iobase = dcr_base; 397 + dcr_len = dcr_resource_len(dn, 0); 398 + 399 + up->dcr_host = dcr_map(dn, dcr_base, dcr_len); 400 + if (!DCR_MAP_OK(up->dcr_host)) { 401 + printk(KERN_ERR "Cannot map DCR resources for NWPSERIAL"); 402 + goto out; 403 + } 404 + } 405 + 406 + ret = uart_add_one_port(&nwpserial_reg, &up->port); 407 + if (ret == 0) 408 + ret = up->port.line; 409 + 410 + out: 411 + mutex_unlock(&nwpserial_mutex); 412 + 413 + return ret; 414 + } 415 + EXPORT_SYMBOL(nwpserial_register_port); 416 + 417 + void nwpserial_unregister_port(int line) 418 + { 419 + struct nwpserial_port *up = &nwpserial_ports[line]; 420 + mutex_lock(&nwpserial_mutex); 421 + uart_remove_one_port(&nwpserial_reg, &up->port); 422 + 423 + up->port.type = PORT_UNKNOWN; 424 + 425 + mutex_unlock(&nwpserial_mutex); 426 + } 427 + EXPORT_SYMBOL(nwpserial_unregister_port); 428 + 429 + #ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE 430 + static int __init nwpserial_console_init(void) 431 + { 432 + struct nwpserial_port *up = NULL; 433 + struct device_node *dn; 434 + const char *name; 435 + int dcr_base; 436 + int dcr_len; 437 + int i; 438 + 439 + /* search for a free port */ 440 + for (i = 0; i < NWPSERIAL_NR; i++) 441 + if (nwpserial_ports[i].port.type == PORT_UNKNOWN) { 442 + up = &nwpserial_ports[i]; 443 + break; 444 + } 445 + 446 + if (up == NULL) 447 + return -1; 448 + 449 + name = of_get_property(of_chosen, "linux,stdout-path", NULL); 450 + if (name == NULL) 451 + return -1; 452 + 453 + dn = of_find_node_by_path(name); 454 + if (!dn) 455 + return -1; 456 + 457 + spin_lock_init(&up->port.lock); 458 + up->port.ops = &nwpserial_pops; 459 + up->port.type = PORT_NWPSERIAL; 460 + up->port.fifosize = 16; 461 + 462 + dcr_base = dcr_resource_start(dn, 0); 463 + dcr_len = dcr_resource_len(dn, 0); 464 + up->port.iobase = dcr_base; 465 + 466 + up->dcr_host = dcr_map(dn, dcr_base, dcr_len); 467 + if (!DCR_MAP_OK(up->dcr_host)) { 468 + printk("Cannot map DCR resources for SERIAL"); 469 + return -1; 470 + } 471 + register_console(&nwpserial_console); 472 + return 0; 473 + } 474 + console_initcall(nwpserial_console_init); 475 + #endif /* CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE */
+19
drivers/serial/of_serial.c
··· 14 14 #include <linux/serial_core.h> 15 15 #include <linux/serial_8250.h> 16 16 #include <linux/of_platform.h> 17 + #include <linux/nwpserial.h> 17 18 18 19 #include <asm/prom.h> 19 20 ··· 100 99 goto out; 101 100 102 101 switch (port_type) { 102 + #ifdef CONFIG_SERIAL_8250 103 103 case PORT_8250 ... PORT_MAX_8250: 104 104 ret = serial8250_register_port(&port); 105 105 break; 106 + #endif 107 + #ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL 108 + case PORT_NWPSERIAL: 109 + ret = nwpserial_register_port(&port); 110 + break; 111 + #endif 106 112 default: 107 113 /* need to add code for these */ 108 114 case PORT_UNKNOWN: ··· 137 129 { 138 130 struct of_serial_info *info = ofdev->dev.driver_data; 139 131 switch (info->type) { 132 + #ifdef CONFIG_SERIAL_8250 140 133 case PORT_8250 ... PORT_MAX_8250: 141 134 serial8250_unregister_port(info->line); 142 135 break; 136 + #endif 137 + #ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL 138 + case PORT_NWPSERIAL: 139 + nwpserial_unregister_port(info->line); 140 + break; 141 + #endif 143 142 default: 144 143 /* need to add code for these */ 145 144 break; ··· 163 148 { .type = "serial", .compatible = "ns16450", .data = (void *)PORT_16450, }, 164 149 { .type = "serial", .compatible = "ns16550", .data = (void *)PORT_16550, }, 165 150 { .type = "serial", .compatible = "ns16750", .data = (void *)PORT_16750, }, 151 + #ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL 152 + { .type = "serial", .compatible = "ibm,qpace-nwp-serial", 153 + .data = (void *)PORT_NWPSERIAL, }, 154 + #endif 166 155 { .type = "serial", .data = (void *)PORT_UNKNOWN, }, 167 156 { /* end of list */ }, 168 157 };
+18
include/linux/nwpserial.h
··· 1 + /* 2 + * Serial Port driver for a NWP uart device 3 + * 4 + * Copyright (C) 2008 IBM Corp., Benjamin Krill <ben@codiert.org> 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License 8 + * as published by the Free Software Foundation; either version 9 + * 2 of the License, or (at your option) any later version. 10 + * 11 + */ 12 + #ifndef _NWPSERIAL_H 13 + #define _NWPSERIAL_H 14 + 15 + int nwpserial_register_port(struct uart_port *port); 16 + void nwpserial_unregister_port(int line); 17 + 18 + #endif /* _NWPSERIAL_H */
+3
include/linux/serial_core.h
··· 161 161 162 162 #define PORT_S3C6400 84 163 163 164 + /* NWPSERIAL */ 165 + #define PORT_NWPSERIAL 85 166 + 164 167 #ifdef __KERNEL__ 165 168 166 169 #include <linux/compiler.h>