Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6:
sparc64: Fix IRQ ->set_affinity() methods.
sparc: cpumask_of_node() should handle -1 as a node
sparc64: Update defconfig.
sparc: Add missing SW perf fault events.
sparc64: Fully support both performance counters.
sparc64: Add perf callchain support.
sparc: convert to arch_gettimeoffset()
sparc: leds_resource.end assigned to itself in clock_board_probe()
sparc32: Fix page_to_phys().
sparc: Simplify param.h by simply including <asm-generic/param.h>
sparc32: Update defconfig.
SPARC: use helpers for rlimits
sparc: copy_from_user() should not return -EFAULT

+744 -419
+4 -1
arch/sparc/Kconfig
··· 64 64 default 64 if SPARC64 65 65 66 66 config GENERIC_TIME 67 + def_bool y 68 + 69 + config ARCH_USES_GETTIMEOFFSET 67 70 bool 68 - default y if SPARC64 71 + default y if SPARC32 69 72 70 73 config GENERIC_CMOS_UPDATE 71 74 bool
+69 -18
arch/sparc/configs/sparc32_defconfig
··· 1 1 # 2 2 # Automatically generated make config: don't edit 3 - # Linux kernel version: 2.6.31 4 - # Wed Sep 16 00:03:43 2009 3 + # Linux kernel version: 2.6.33-rc2 4 + # Mon Jan 11 23:20:31 2010 5 5 # 6 6 # CONFIG_64BIT is not set 7 7 CONFIG_SPARC=y ··· 41 41 # 42 42 CONFIG_TREE_RCU=y 43 43 # CONFIG_TREE_PREEMPT_RCU is not set 44 + # CONFIG_TINY_RCU is not set 44 45 # CONFIG_RCU_TRACE is not set 45 46 CONFIG_RCU_FANOUT=32 46 47 # CONFIG_RCU_FANOUT_EXACT is not set ··· 89 88 CONFIG_EVENTFD=y 90 89 CONFIG_SHMEM=y 91 90 CONFIG_AIO=y 92 - CONFIG_HAVE_PERF_COUNTERS=y 91 + CONFIG_HAVE_PERF_EVENTS=y 92 + CONFIG_PERF_USE_VMALLOC=y 93 93 94 94 # 95 - # Performance Counters 95 + # Kernel Performance Events And Counters 96 96 # 97 + # CONFIG_PERF_EVENTS is not set 97 98 # CONFIG_PERF_COUNTERS is not set 98 99 CONFIG_VM_EVENT_COUNTERS=y 99 100 CONFIG_PCI_QUIRKS=y 100 - # CONFIG_STRIP_ASM_SYMS is not set 101 101 CONFIG_COMPAT_BRK=y 102 102 CONFIG_SLAB=y 103 103 # CONFIG_SLUB is not set 104 104 # CONFIG_SLOB is not set 105 105 # CONFIG_PROFILING is not set 106 - # CONFIG_MARKERS is not set 107 106 CONFIG_HAVE_OPROFILE=y 108 107 CONFIG_HAVE_ARCH_TRACEHOOK=y 109 108 CONFIG_HAVE_DMA_ATTRS=y ··· 132 131 # IO Schedulers 133 132 # 134 133 CONFIG_IOSCHED_NOOP=y 135 - CONFIG_IOSCHED_AS=y 136 134 CONFIG_IOSCHED_DEADLINE=y 137 135 CONFIG_IOSCHED_CFQ=y 138 - # CONFIG_DEFAULT_AS is not set 139 136 # CONFIG_DEFAULT_DEADLINE is not set 140 137 CONFIG_DEFAULT_CFQ=y 141 138 # CONFIG_DEFAULT_NOOP is not set 142 139 CONFIG_DEFAULT_IOSCHED="cfq" 140 + # CONFIG_INLINE_SPIN_TRYLOCK is not set 141 + # CONFIG_INLINE_SPIN_TRYLOCK_BH is not set 142 + # CONFIG_INLINE_SPIN_LOCK is not set 143 + # CONFIG_INLINE_SPIN_LOCK_BH is not set 144 + # CONFIG_INLINE_SPIN_LOCK_IRQ is not set 145 + # CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set 146 + CONFIG_INLINE_SPIN_UNLOCK=y 147 + # CONFIG_INLINE_SPIN_UNLOCK_BH is not set 148 + CONFIG_INLINE_SPIN_UNLOCK_IRQ=y 149 + # CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set 150 + # CONFIG_INLINE_READ_TRYLOCK is not set 151 + # CONFIG_INLINE_READ_LOCK is not set 152 + # CONFIG_INLINE_READ_LOCK_BH is not set 153 + # CONFIG_INLINE_READ_LOCK_IRQ is not set 154 + # CONFIG_INLINE_READ_LOCK_IRQSAVE is not set 155 + CONFIG_INLINE_READ_UNLOCK=y 156 + # CONFIG_INLINE_READ_UNLOCK_BH is not set 157 + CONFIG_INLINE_READ_UNLOCK_IRQ=y 158 + # CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set 159 + # CONFIG_INLINE_WRITE_TRYLOCK is not set 160 + # CONFIG_INLINE_WRITE_LOCK is not set 161 + # CONFIG_INLINE_WRITE_LOCK_BH is not set 162 + # CONFIG_INLINE_WRITE_LOCK_IRQ is not set 163 + # CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set 164 + CONFIG_INLINE_WRITE_UNLOCK=y 165 + # CONFIG_INLINE_WRITE_UNLOCK_BH is not set 166 + CONFIG_INLINE_WRITE_UNLOCK_IRQ=y 167 + # CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set 168 + # CONFIG_MUTEX_SPIN_ON_OWNER is not set 143 169 # CONFIG_FREEZER is not set 144 170 145 171 # ··· 196 168 # CONFIG_PHYS_ADDR_T_64BIT is not set 197 169 CONFIG_ZONE_DMA_FLAG=1 198 170 CONFIG_BOUNCE=y 199 - CONFIG_HAVE_MLOCK=y 200 - CONFIG_HAVE_MLOCKED_PAGE_BIT=y 171 + # CONFIG_KSM is not set 201 172 CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 202 173 CONFIG_SUN_PM=y 203 174 # CONFIG_SPARC_LED is not set ··· 284 257 CONFIG_INET6_XFRM_MODE_BEET=m 285 258 # CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set 286 259 CONFIG_IPV6_SIT=m 260 + # CONFIG_IPV6_SIT_6RD is not set 287 261 CONFIG_IPV6_NDISC_NODETYPE=y 288 262 CONFIG_IPV6_TUNNEL=m 289 263 # CONFIG_IPV6_MULTIPLE_TABLES is not set ··· 323 295 # CONFIG_AF_RXRPC is not set 324 296 CONFIG_WIRELESS=y 325 297 # CONFIG_CFG80211 is not set 326 - CONFIG_CFG80211_DEFAULT_PS_VALUE=0 327 - CONFIG_WIRELESS_OLD_REGULATORY=y 328 - # CONFIG_WIRELESS_EXT is not set 329 298 # CONFIG_LIB80211 is not set 330 299 331 300 # ··· 360 335 # CONFIG_BLK_DEV_COW_COMMON is not set 361 336 CONFIG_BLK_DEV_LOOP=m 362 337 CONFIG_BLK_DEV_CRYPTOLOOP=m 338 + 339 + # 340 + # DRBD disabled because PROC_FS, INET or CONNECTOR not selected 341 + # 363 342 # CONFIG_BLK_DEV_NBD is not set 364 343 # CONFIG_BLK_DEV_SX8 is not set 365 344 CONFIG_BLK_DEV_RAM=y ··· 427 398 # CONFIG_ISCSI_TCP is not set 428 399 # CONFIG_SCSI_CXGB3_ISCSI is not set 429 400 # CONFIG_SCSI_BNX2_ISCSI is not set 401 + # CONFIG_BE2ISCSI is not set 430 402 # CONFIG_BLK_DEV_3W_XXXX_RAID is not set 403 + # CONFIG_SCSI_HPSA is not set 431 404 # CONFIG_SCSI_3W_9XXX is not set 405 + # CONFIG_SCSI_3W_SAS is not set 432 406 # CONFIG_SCSI_ACARD is not set 433 407 # CONFIG_SCSI_AACRAID is not set 434 408 # CONFIG_SCSI_AIC7XXX is not set ··· 466 434 # CONFIG_SCSI_DEBUG is not set 467 435 CONFIG_SCSI_SUNESP=y 468 436 # CONFIG_SCSI_PMCRAID is not set 437 + # CONFIG_SCSI_PM8001 is not set 469 438 # CONFIG_SCSI_SRP is not set 439 + # CONFIG_SCSI_BFA_FC is not set 470 440 # CONFIG_SCSI_DH is not set 471 441 # CONFIG_SCSI_OSD_INITIATOR is not set 472 442 # CONFIG_ATA is not set ··· 484 450 # 485 451 486 452 # 487 - # See the help texts for more information. 453 + # The newer stack is recommended. 488 454 # 489 455 # CONFIG_FIREWIRE is not set 490 456 # CONFIG_IEEE1394 is not set ··· 521 487 # CONFIG_NET_PCI is not set 522 488 # CONFIG_B44 is not set 523 489 # CONFIG_KS8842 is not set 490 + # CONFIG_KS8851_MLL is not set 524 491 # CONFIG_ATL2 is not set 525 492 CONFIG_NETDEV_1000=y 526 493 # CONFIG_ACENIC is not set ··· 581 546 # CONFIG_NETCONSOLE is not set 582 547 # CONFIG_NETPOLL is not set 583 548 # CONFIG_NET_POLL_CONTROLLER is not set 549 + # CONFIG_VMXNET3 is not set 584 550 # CONFIG_ISDN is not set 585 551 # CONFIG_PHONE is not set 586 552 ··· 591 555 CONFIG_INPUT=y 592 556 # CONFIG_INPUT_FF_MEMLESS is not set 593 557 # CONFIG_INPUT_POLLDEV is not set 558 + # CONFIG_INPUT_SPARSEKMAP is not set 594 559 595 560 # 596 561 # Userland interfaces ··· 611 574 CONFIG_KEYBOARD_ATKBD=m 612 575 # CONFIG_KEYBOARD_LKKBD is not set 613 576 # CONFIG_KEYBOARD_NEWTON is not set 577 + # CONFIG_KEYBOARD_OPENCORES is not set 614 578 # CONFIG_KEYBOARD_STOWAWAY is not set 615 579 CONFIG_KEYBOARD_SUNKBD=m 616 580 # CONFIG_KEYBOARD_XTKBD is not set ··· 642 604 # CONFIG_SERIO_PCIPS2 is not set 643 605 CONFIG_SERIO_LIBPS2=m 644 606 # CONFIG_SERIO_RAW is not set 607 + # CONFIG_SERIO_ALTERA_PS2 is not set 645 608 # CONFIG_GAMEPORT is not set 646 609 647 610 # ··· 675 636 CONFIG_SERIAL_CORE_CONSOLE=y 676 637 CONFIG_CONSOLE_POLL=y 677 638 # CONFIG_SERIAL_JSM is not set 639 + # CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set 678 640 CONFIG_UNIX98_PTYS=y 679 641 # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 680 642 CONFIG_LEGACY_PTYS=y ··· 701 661 # CONFIG_POWER_SUPPLY is not set 702 662 CONFIG_HWMON=y 703 663 # CONFIG_HWMON_VID is not set 664 + # CONFIG_HWMON_DEBUG_CHIP is not set 665 + 666 + # 667 + # Native drivers 668 + # 704 669 # CONFIG_SENSORS_I5K_AMB is not set 705 670 # CONFIG_SENSORS_F71805F is not set 706 671 # CONFIG_SENSORS_F71882FG is not set ··· 720 675 # CONFIG_SENSORS_VT8231 is not set 721 676 # CONFIG_SENSORS_W83627HF is not set 722 677 # CONFIG_SENSORS_W83627EHF is not set 723 - # CONFIG_HWMON_DEBUG_CHIP is not set 724 678 # CONFIG_THERMAL is not set 725 - # CONFIG_THERMAL_HWMON is not set 726 679 # CONFIG_WATCHDOG is not set 727 680 CONFIG_SSB_POSSIBLE=y 728 681 ··· 742 699 # 743 700 # Graphics support 744 701 # 702 + CONFIG_VGA_ARB=y 745 703 # CONFIG_VGASTATE is not set 746 704 # CONFIG_VIDEO_OUTPUT_CONTROL is not set 747 705 # CONFIG_FB is not set ··· 820 776 # CONFIG_RTC_DRV_M48T86 is not set 821 777 # CONFIG_RTC_DRV_M48T35 is not set 822 778 CONFIG_RTC_DRV_M48T59=y 779 + # CONFIG_RTC_DRV_MSM6242 is not set 823 780 # CONFIG_RTC_DRV_BQ4802 is not set 781 + # CONFIG_RTC_DRV_RP5C01 is not set 824 782 # CONFIG_RTC_DRV_V3020 is not set 825 783 826 784 # ··· 1001 955 CONFIG_ENABLE_MUST_CHECK=y 1002 956 CONFIG_FRAME_WARN=1024 1003 957 CONFIG_MAGIC_SYSRQ=y 958 + # CONFIG_STRIP_ASM_SYMS is not set 1004 959 # CONFIG_UNUSED_SYMBOLS is not set 1005 960 # CONFIG_DEBUG_FS is not set 1006 961 # CONFIG_HEADERS_CHECK is not set ··· 1050 1003 CONFIG_KGDB_SERIAL_CONSOLE=y 1051 1004 CONFIG_KGDB_TESTS=y 1052 1005 # CONFIG_KGDB_TESTS_ON_BOOT is not set 1053 - # CONFIG_KMEMCHECK is not set 1054 1006 # CONFIG_DEBUG_STACK_USAGE is not set 1055 1007 # CONFIG_STACK_DEBUG is not set 1008 + # CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set 1056 1009 1057 1010 # 1058 1011 # Security options ··· 1060 1013 # CONFIG_KEYS is not set 1061 1014 # CONFIG_SECURITY is not set 1062 1015 # CONFIG_SECURITYFS is not set 1063 - # CONFIG_SECURITY_FILE_CAPABILITIES is not set 1016 + # CONFIG_DEFAULT_SECURITY_SELINUX is not set 1017 + # CONFIG_DEFAULT_SECURITY_SMACK is not set 1018 + # CONFIG_DEFAULT_SECURITY_TOMOYO is not set 1019 + CONFIG_DEFAULT_SECURITY_DAC=y 1020 + CONFIG_DEFAULT_SECURITY="" 1064 1021 CONFIG_CRYPTO=y 1065 1022 1066 1023 #
+100 -31
arch/sparc/configs/sparc64_defconfig
··· 1 1 # 2 2 # Automatically generated make config: don't edit 3 - # Linux kernel version: 2.6.31 4 - # Tue Sep 15 17:06:03 2009 3 + # Linux kernel version: 2.6.33-rc2 4 + # Wed Jan 20 16:31:47 2010 5 5 # 6 6 CONFIG_64BIT=y 7 7 CONFIG_SPARC=y ··· 20 20 CONFIG_AUDIT_ARCH=y 21 21 CONFIG_HAVE_SETUP_PER_CPU_AREA=y 22 22 CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y 23 + CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y 23 24 CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y 24 25 CONFIG_MMU=y 25 26 CONFIG_ARCH_NO_VIRT_TO_BUS=y ··· 51 50 # 52 51 CONFIG_TREE_RCU=y 53 52 # CONFIG_TREE_PREEMPT_RCU is not set 53 + # CONFIG_TINY_RCU is not set 54 54 # CONFIG_RCU_TRACE is not set 55 55 CONFIG_RCU_FANOUT=64 56 56 # CONFIG_RCU_FANOUT_EXACT is not set ··· 64 62 CONFIG_USER_SCHED=y 65 63 # CONFIG_CGROUP_SCHED is not set 66 64 # CONFIG_CGROUPS is not set 67 - CONFIG_SYSFS_DEPRECATED=y 68 - CONFIG_SYSFS_DEPRECATED_V2=y 65 + # CONFIG_SYSFS_DEPRECATED_V2 is not set 69 66 CONFIG_RELAY=y 70 67 CONFIG_NAMESPACES=y 71 68 # CONFIG_UTS_NS is not set ··· 98 97 CONFIG_EVENTFD=y 99 98 CONFIG_SHMEM=y 100 99 CONFIG_AIO=y 101 - CONFIG_HAVE_PERF_COUNTERS=y 100 + CONFIG_HAVE_PERF_EVENTS=y 101 + CONFIG_PERF_USE_VMALLOC=y 102 102 103 103 # 104 - # Performance Counters 104 + # Kernel Performance Events And Counters 105 105 # 106 - CONFIG_PERF_COUNTERS=y 106 + CONFIG_PERF_EVENTS=y 107 107 CONFIG_EVENT_PROFILE=y 108 + CONFIG_PERF_COUNTERS=y 109 + # CONFIG_DEBUG_PERF_USE_VMALLOC is not set 108 110 CONFIG_VM_EVENT_COUNTERS=y 109 111 CONFIG_PCI_QUIRKS=y 110 112 CONFIG_SLUB_DEBUG=y 111 - # CONFIG_STRIP_ASM_SYMS is not set 112 113 # CONFIG_COMPAT_BRK is not set 113 114 # CONFIG_SLAB is not set 114 115 CONFIG_SLUB=y 115 116 # CONFIG_SLOB is not set 116 117 CONFIG_PROFILING=y 117 118 CONFIG_TRACEPOINTS=y 118 - CONFIG_MARKERS=y 119 119 CONFIG_OPROFILE=m 120 120 CONFIG_HAVE_OPROFILE=y 121 121 CONFIG_KPROBES=y ··· 154 152 # IO Schedulers 155 153 # 156 154 CONFIG_IOSCHED_NOOP=y 157 - CONFIG_IOSCHED_AS=y 158 155 CONFIG_IOSCHED_DEADLINE=y 159 156 CONFIG_IOSCHED_CFQ=y 160 - CONFIG_DEFAULT_AS=y 161 157 # CONFIG_DEFAULT_DEADLINE is not set 162 - # CONFIG_DEFAULT_CFQ is not set 158 + CONFIG_DEFAULT_CFQ=y 163 159 # CONFIG_DEFAULT_NOOP is not set 164 - CONFIG_DEFAULT_IOSCHED="anticipatory" 160 + CONFIG_DEFAULT_IOSCHED="cfq" 161 + # CONFIG_INLINE_SPIN_TRYLOCK is not set 162 + # CONFIG_INLINE_SPIN_TRYLOCK_BH is not set 163 + # CONFIG_INLINE_SPIN_LOCK is not set 164 + # CONFIG_INLINE_SPIN_LOCK_BH is not set 165 + # CONFIG_INLINE_SPIN_LOCK_IRQ is not set 166 + # CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set 167 + CONFIG_INLINE_SPIN_UNLOCK=y 168 + # CONFIG_INLINE_SPIN_UNLOCK_BH is not set 169 + CONFIG_INLINE_SPIN_UNLOCK_IRQ=y 170 + # CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set 171 + # CONFIG_INLINE_READ_TRYLOCK is not set 172 + # CONFIG_INLINE_READ_LOCK is not set 173 + # CONFIG_INLINE_READ_LOCK_BH is not set 174 + # CONFIG_INLINE_READ_LOCK_IRQ is not set 175 + # CONFIG_INLINE_READ_LOCK_IRQSAVE is not set 176 + CONFIG_INLINE_READ_UNLOCK=y 177 + # CONFIG_INLINE_READ_UNLOCK_BH is not set 178 + CONFIG_INLINE_READ_UNLOCK_IRQ=y 179 + # CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set 180 + # CONFIG_INLINE_WRITE_TRYLOCK is not set 181 + # CONFIG_INLINE_WRITE_LOCK is not set 182 + # CONFIG_INLINE_WRITE_LOCK_BH is not set 183 + # CONFIG_INLINE_WRITE_LOCK_IRQ is not set 184 + # CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set 185 + CONFIG_INLINE_WRITE_UNLOCK=y 186 + # CONFIG_INLINE_WRITE_UNLOCK_BH is not set 187 + CONFIG_INLINE_WRITE_UNLOCK_IRQ=y 188 + # CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set 189 + CONFIG_MUTEX_SPIN_ON_OWNER=y 165 190 # CONFIG_FREEZER is not set 166 191 167 192 # ··· 208 179 CONFIG_GENERIC_CALIBRATE_DELAY=y 209 180 CONFIG_ARCH_MAY_HAVE_PC_FDC=y 210 181 CONFIG_SPARC64_SMP=y 182 + CONFIG_EARLYFB=y 211 183 CONFIG_SPARC64_PAGE_SIZE_8KB=y 212 184 # CONFIG_SPARC64_PAGE_SIZE_64KB is not set 213 185 CONFIG_SECCOMP=y ··· 246 216 CONFIG_PHYS_ADDR_T_64BIT=y 247 217 CONFIG_ZONE_DMA_FLAG=0 248 218 CONFIG_NR_QUICK=1 249 - CONFIG_HAVE_MLOCK=y 250 - CONFIG_HAVE_MLOCKED_PAGE_BIT=y 219 + # CONFIG_KSM is not set 251 220 CONFIG_DEFAULT_MMAP_MIN_ADDR=8192 252 221 CONFIG_SCHED_SMT=y 253 222 CONFIG_SCHED_MC=y ··· 344 315 CONFIG_INET6_XFRM_MODE_BEET=m 345 316 # CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set 346 317 CONFIG_IPV6_SIT=m 318 + # CONFIG_IPV6_SIT_6RD is not set 347 319 CONFIG_IPV6_NDISC_NODETYPE=y 348 320 CONFIG_IPV6_TUNNEL=m 349 321 # CONFIG_IPV6_MULTIPLE_TABLES is not set ··· 386 356 # CONFIG_AF_RXRPC is not set 387 357 CONFIG_WIRELESS=y 388 358 # CONFIG_CFG80211 is not set 389 - CONFIG_CFG80211_DEFAULT_PS_VALUE=0 390 - CONFIG_WIRELESS_OLD_REGULATORY=y 391 - # CONFIG_WIRELESS_EXT is not set 392 359 # CONFIG_LIB80211 is not set 393 360 394 361 # ··· 403 376 # Generic Driver Options 404 377 # 405 378 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 379 + # CONFIG_DEVTMPFS is not set 406 380 CONFIG_STANDALONE=y 407 381 # CONFIG_PREVENT_FIRMWARE_BUILD is not set 408 382 CONFIG_FW_LOADER=y ··· 425 397 # CONFIG_BLK_DEV_COW_COMMON is not set 426 398 CONFIG_BLK_DEV_LOOP=m 427 399 CONFIG_BLK_DEV_CRYPTOLOOP=m 400 + 401 + # 402 + # DRBD disabled because PROC_FS, INET or CONNECTOR not selected 403 + # 404 + # CONFIG_BLK_DEV_DRBD is not set 428 405 CONFIG_BLK_DEV_NBD=m 429 406 # CONFIG_BLK_DEV_SX8 is not set 430 407 # CONFIG_BLK_DEV_UB is not set ··· 441 408 CONFIG_SUNVDC=m 442 409 # CONFIG_BLK_DEV_HD is not set 443 410 CONFIG_MISC_DEVICES=y 411 + # CONFIG_AD525X_DPOT is not set 444 412 # CONFIG_PHANTOM is not set 445 413 # CONFIG_SGI_IOC4 is not set 446 414 # CONFIG_TIFM_CORE is not set ··· 449 415 # CONFIG_ENCLOSURE_SERVICES is not set 450 416 # CONFIG_HP_ILO is not set 451 417 # CONFIG_ISL29003 is not set 418 + # CONFIG_DS1682 is not set 452 419 # CONFIG_C2PORT is not set 453 420 454 421 # ··· 557 522 # CONFIG_ISCSI_TCP is not set 558 523 # CONFIG_SCSI_CXGB3_ISCSI is not set 559 524 # CONFIG_SCSI_BNX2_ISCSI is not set 525 + # CONFIG_BE2ISCSI is not set 560 526 # CONFIG_BLK_DEV_3W_XXXX_RAID is not set 527 + # CONFIG_SCSI_HPSA is not set 561 528 # CONFIG_SCSI_3W_9XXX is not set 529 + # CONFIG_SCSI_3W_SAS is not set 562 530 # CONFIG_SCSI_ACARD is not set 563 531 # CONFIG_SCSI_AACRAID is not set 564 532 # CONFIG_SCSI_AIC7XXX is not set ··· 595 557 # CONFIG_SCSI_DEBUG is not set 596 558 # CONFIG_SCSI_SUNESP is not set 597 559 # CONFIG_SCSI_PMCRAID is not set 560 + # CONFIG_SCSI_PM8001 is not set 598 561 # CONFIG_SCSI_SRP is not set 562 + # CONFIG_SCSI_BFA_FC is not set 599 563 # CONFIG_SCSI_DH is not set 600 564 # CONFIG_SCSI_OSD_INITIATOR is not set 601 565 # CONFIG_ATA is not set ··· 608 568 CONFIG_MD_RAID1=m 609 569 CONFIG_MD_RAID10=m 610 570 CONFIG_MD_RAID456=m 571 + # CONFIG_MULTICORE_RAID456 is not set 611 572 CONFIG_MD_RAID6_PQ=m 573 + # CONFIG_ASYNC_RAID6_TEST is not set 612 574 CONFIG_MD_MULTIPATH=m 613 575 # CONFIG_MD_FAULTY is not set 614 576 CONFIG_BLK_DEV_DM=m ··· 634 592 # 635 593 636 594 # 637 - # See the help texts for more information. 595 + # The newer stack is recommended. 638 596 # 639 597 # CONFIG_FIREWIRE is not set 640 598 # CONFIG_IEEE1394 is not set ··· 706 664 # CONFIG_SUNDANCE is not set 707 665 # CONFIG_TLAN is not set 708 666 # CONFIG_KS8842 is not set 667 + # CONFIG_KS8851_MLL is not set 709 668 # CONFIG_VIA_RHINE is not set 710 669 # CONFIG_SC92031 is not set 711 670 # CONFIG_ATL2 is not set ··· 788 745 # CONFIG_NETCONSOLE is not set 789 746 # CONFIG_NETPOLL is not set 790 747 # CONFIG_NET_POLL_CONTROLLER is not set 748 + # CONFIG_VMXNET3 is not set 791 749 # CONFIG_ISDN is not set 792 750 # CONFIG_PHONE is not set 793 751 ··· 798 754 CONFIG_INPUT=y 799 755 # CONFIG_INPUT_FF_MEMLESS is not set 800 756 # CONFIG_INPUT_POLLDEV is not set 757 + # CONFIG_INPUT_SPARSEKMAP is not set 801 758 802 759 # 803 760 # Userland interfaces ··· 815 770 # Input Device Drivers 816 771 # 817 772 CONFIG_INPUT_KEYBOARD=y 773 + # CONFIG_KEYBOARD_ADP5588 is not set 818 774 CONFIG_KEYBOARD_ATKBD=y 775 + # CONFIG_QT2160 is not set 819 776 CONFIG_KEYBOARD_LKKBD=m 777 + # CONFIG_KEYBOARD_MAX7359 is not set 820 778 # CONFIG_KEYBOARD_NEWTON is not set 779 + # CONFIG_KEYBOARD_OPENCORES is not set 821 780 # CONFIG_KEYBOARD_STOWAWAY is not set 822 781 CONFIG_KEYBOARD_SUNKBD=y 823 782 # CONFIG_KEYBOARD_XTKBD is not set ··· 861 812 CONFIG_SERIO_PCIPS2=m 862 813 CONFIG_SERIO_LIBPS2=y 863 814 CONFIG_SERIO_RAW=m 815 + # CONFIG_SERIO_ALTERA_PS2 is not set 864 816 # CONFIG_GAMEPORT is not set 865 817 866 818 # ··· 894 844 CONFIG_SERIAL_CORE=y 895 845 CONFIG_SERIAL_CORE_CONSOLE=y 896 846 # CONFIG_SERIAL_JSM is not set 847 + # CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set 897 848 CONFIG_UNIX98_PTYS=y 898 849 # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 899 850 # CONFIG_LEGACY_PTYS is not set ··· 909 858 CONFIG_DEVPORT=y 910 859 CONFIG_I2C=y 911 860 CONFIG_I2C_BOARDINFO=y 861 + CONFIG_I2C_COMPAT=y 912 862 # CONFIG_I2C_CHARDEV is not set 913 863 CONFIG_I2C_HELPER_AUTO=y 914 864 CONFIG_I2C_ALGOBIT=y ··· 950 898 # CONFIG_I2C_TINY_USB is not set 951 899 952 900 # 953 - # Graphics adapter I2C/DDC channel drivers 954 - # 955 - # CONFIG_I2C_VOODOO3 is not set 956 - 957 - # 958 901 # Other I2C/SMBus bus drivers 959 902 # 960 903 # CONFIG_I2C_PCA_PLATFORM is not set ··· 958 911 # 959 912 # Miscellaneous I2C Chip support 960 913 # 961 - # CONFIG_DS1682 is not set 962 - # CONFIG_SENSORS_PCF8574 is not set 963 - # CONFIG_PCF8575 is not set 964 - # CONFIG_SENSORS_PCA9539 is not set 965 914 # CONFIG_SENSORS_TSL2550 is not set 966 915 # CONFIG_I2C_DEBUG_CORE is not set 967 916 # CONFIG_I2C_DEBUG_ALGO is not set ··· 975 932 # CONFIG_POWER_SUPPLY is not set 976 933 CONFIG_HWMON=y 977 934 # CONFIG_HWMON_VID is not set 935 + # CONFIG_HWMON_DEBUG_CHIP is not set 936 + 937 + # 938 + # Native drivers 939 + # 978 940 # CONFIG_SENSORS_AD7414 is not set 979 941 # CONFIG_SENSORS_AD7418 is not set 980 942 # CONFIG_SENSORS_ADM1021 is not set ··· 1003 955 # CONFIG_SENSORS_GL520SM is not set 1004 956 # CONFIG_SENSORS_IT87 is not set 1005 957 # CONFIG_SENSORS_LM63 is not set 958 + # CONFIG_SENSORS_LM73 is not set 1006 959 # CONFIG_SENSORS_LM75 is not set 1007 960 # CONFIG_SENSORS_LM77 is not set 1008 961 # CONFIG_SENSORS_LM78 is not set ··· 1030 981 # CONFIG_SENSORS_ADS7828 is not set 1031 982 # CONFIG_SENSORS_THMC50 is not set 1032 983 # CONFIG_SENSORS_TMP401 is not set 984 + # CONFIG_SENSORS_TMP421 is not set 1033 985 # CONFIG_SENSORS_VIA686A is not set 1034 986 # CONFIG_SENSORS_VT1211 is not set 1035 987 # CONFIG_SENSORS_VT8231 is not set ··· 1043 993 # CONFIG_SENSORS_W83627HF is not set 1044 994 # CONFIG_SENSORS_W83627EHF is not set 1045 995 # CONFIG_SENSORS_ULTRA45 is not set 1046 - # CONFIG_HWMON_DEBUG_CHIP is not set 996 + # CONFIG_SENSORS_LIS3_I2C is not set 1047 997 # CONFIG_THERMAL is not set 1048 - # CONFIG_THERMAL_HWMON is not set 1049 998 # CONFIG_WATCHDOG is not set 1050 999 CONFIG_SSB_POSSIBLE=y 1051 1000 ··· 1062 1013 # CONFIG_TWL4030_CORE is not set 1063 1014 # CONFIG_MFD_TMIO is not set 1064 1015 # CONFIG_PMIC_DA903X is not set 1016 + # CONFIG_PMIC_ADP5520 is not set 1065 1017 # CONFIG_MFD_WM8400 is not set 1018 + # CONFIG_MFD_WM831X is not set 1066 1019 # CONFIG_MFD_WM8350_I2C is not set 1067 1020 # CONFIG_MFD_PCF50633 is not set 1068 1021 # CONFIG_AB3100_CORE is not set 1022 + # CONFIG_MFD_88PM8607 is not set 1069 1023 # CONFIG_REGULATOR is not set 1070 1024 # CONFIG_MEDIA_SUPPORT is not set 1071 1025 1072 1026 # 1073 1027 # Graphics support 1074 1028 # 1029 + CONFIG_VGA_ARB=y 1075 1030 # CONFIG_DRM is not set 1076 1031 # CONFIG_VGASTATE is not set 1077 1032 # CONFIG_VIDEO_OUTPUT_CONTROL is not set ··· 1229 1176 # CONFIG_SND_OXYGEN is not set 1230 1177 # CONFIG_SND_CS4281 is not set 1231 1178 # CONFIG_SND_CS46XX is not set 1179 + # CONFIG_SND_CS5535AUDIO is not set 1232 1180 # CONFIG_SND_CTXFI is not set 1233 1181 # CONFIG_SND_DARLA20 is not set 1234 1182 # CONFIG_SND_GINA20 is not set ··· 1365 1311 # CONFIG_USB_OXU210HP_HCD is not set 1366 1312 # CONFIG_USB_ISP116X_HCD is not set 1367 1313 # CONFIG_USB_ISP1760_HCD is not set 1314 + # CONFIG_USB_ISP1362_HCD is not set 1368 1315 CONFIG_USB_OHCI_HCD=y 1369 1316 # CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set 1370 1317 # CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set ··· 1481 1426 # CONFIG_RTC_DRV_PCF8563 is not set 1482 1427 # CONFIG_RTC_DRV_PCF8583 is not set 1483 1428 # CONFIG_RTC_DRV_M41T80 is not set 1429 + # CONFIG_RTC_DRV_BQ32K is not set 1484 1430 # CONFIG_RTC_DRV_S35390A is not set 1485 1431 # CONFIG_RTC_DRV_FM3130 is not set 1486 1432 # CONFIG_RTC_DRV_RX8581 is not set ··· 1503 1447 # CONFIG_RTC_DRV_M48T86 is not set 1504 1448 # CONFIG_RTC_DRV_M48T35 is not set 1505 1449 CONFIG_RTC_DRV_M48T59=y 1450 + # CONFIG_RTC_DRV_MSM6242 is not set 1506 1451 CONFIG_RTC_DRV_BQ4802=y 1452 + # CONFIG_RTC_DRV_RP5C01 is not set 1507 1453 # CONFIG_RTC_DRV_V3020 is not set 1508 1454 1509 1455 # ··· 1683 1625 CONFIG_ENABLE_MUST_CHECK=y 1684 1626 CONFIG_FRAME_WARN=2048 1685 1627 CONFIG_MAGIC_SYSRQ=y 1628 + # CONFIG_STRIP_ASM_SYMS is not set 1686 1629 # CONFIG_UNUSED_SYMBOLS is not set 1687 1630 CONFIG_DEBUG_FS=y 1688 1631 # CONFIG_HEADERS_CHECK is not set ··· 1737 1678 CONFIG_HAVE_FUNCTION_TRACER=y 1738 1679 CONFIG_HAVE_DYNAMIC_FTRACE=y 1739 1680 CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y 1681 + CONFIG_HAVE_SYSCALL_TRACEPOINTS=y 1740 1682 CONFIG_RING_BUFFER=y 1741 1683 CONFIG_EVENT_TRACING=y 1742 1684 CONFIG_CONTEXT_SWITCH_TRACER=y 1685 + CONFIG_RING_BUFFER_ALLOW_SWAP=y 1743 1686 CONFIG_TRACING=y 1744 1687 CONFIG_GENERIC_TRACER=y 1745 1688 CONFIG_TRACING_SUPPORT=y ··· 1749 1688 # CONFIG_FUNCTION_TRACER is not set 1750 1689 # CONFIG_IRQSOFF_TRACER is not set 1751 1690 # CONFIG_SCHED_TRACER is not set 1691 + # CONFIG_FTRACE_SYSCALLS is not set 1752 1692 # CONFIG_BOOT_TRACER is not set 1753 1693 CONFIG_BRANCH_PROFILE_NONE=y 1754 1694 # CONFIG_PROFILE_ANNOTATED_BRANCHES is not set ··· 1768 1706 # CONFIG_DEBUG_STACK_USAGE is not set 1769 1707 # CONFIG_DEBUG_DCFLUSH is not set 1770 1708 # CONFIG_STACK_DEBUG is not set 1709 + # CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set 1771 1710 1772 1711 # 1773 1712 # Security options ··· 1777 1714 # CONFIG_KEYS_DEBUG_PROC_KEYS is not set 1778 1715 # CONFIG_SECURITY is not set 1779 1716 # CONFIG_SECURITYFS is not set 1780 - # CONFIG_SECURITY_FILE_CAPABILITIES is not set 1717 + # CONFIG_DEFAULT_SECURITY_SELINUX is not set 1718 + # CONFIG_DEFAULT_SECURITY_SMACK is not set 1719 + # CONFIG_DEFAULT_SECURITY_TOMOYO is not set 1720 + CONFIG_DEFAULT_SECURITY_DAC=y 1721 + CONFIG_DEFAULT_SECURITY="" 1781 1722 CONFIG_XOR_BLOCKS=m 1782 1723 CONFIG_ASYNC_CORE=m 1783 1724 CONFIG_ASYNC_MEMCPY=m 1784 1725 CONFIG_ASYNC_XOR=m 1726 + CONFIG_ASYNC_PQ=m 1727 + CONFIG_ASYNC_RAID6_RECOV=m 1785 1728 CONFIG_CRYPTO=y 1786 1729 1787 1730 #
+1 -1
arch/sparc/include/asm/io_32.h
··· 8 8 #include <asm/page.h> /* IO address mapping routines need this */ 9 9 #include <asm/system.h> 10 10 11 - #define page_to_phys(page) (((page) - mem_map) << PAGE_SHIFT) 11 + #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 12 12 13 13 static inline u32 flip_dword (u32 l) 14 14 {
+1 -1
arch/sparc/include/asm/page_32.h
··· 143 143 #define phys_to_virt __va 144 144 145 145 #define ARCH_PFN_OFFSET (pfn_base) 146 - #define virt_to_page(kaddr) (mem_map + ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT))) 146 + #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 147 147 148 148 #define pfn_valid(pfn) (((pfn) >= (pfn_base)) && (((pfn)-(pfn_base)) < max_mapnr)) 149 149 #define virt_addr_valid(kaddr) ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT) < max_mapnr)
+2 -17
arch/sparc/include/asm/param.h
··· 1 1 #ifndef _ASMSPARC_PARAM_H 2 2 #define _ASMSPARC_PARAM_H 3 3 4 - #ifdef __KERNEL__ 5 - # define HZ CONFIG_HZ /* Internal kernel timer frequency */ 6 - # define USER_HZ 100 /* .. some user interfaces are in "ticks" */ 7 - # define CLOCKS_PER_SEC (USER_HZ) 8 - #endif 9 - 10 - #ifndef HZ 11 - #define HZ 100 12 - #endif 13 - 14 4 #define EXEC_PAGESIZE 8192 /* Thanks for sun4's we carry baggage... */ 5 + #include <asm-generic/param.h> 15 6 16 - #ifndef NOGROUP 17 - #define NOGROUP (-1) 18 - #endif 19 - 20 - #define MAXHOSTNAMELEN 64 /* max length of hostname */ 21 - 22 - #endif 7 + #endif /* _ASMSPARC_PARAM_H */
+1
arch/sparc/include/asm/timex_32.h
··· 12 12 typedef unsigned long cycles_t; 13 13 #define get_cycles() (0) 14 14 15 + extern u32 (*do_arch_gettimeoffset)(void); 15 16 #endif
+3 -1
arch/sparc/include/asm/topology_64.h
··· 12 12 13 13 #define parent_node(node) (node) 14 14 15 - #define cpumask_of_node(node) (&numa_cpumask_lookup_table[node]) 15 + #define cpumask_of_node(node) ((node) == -1 ? \ 16 + cpu_all_mask : \ 17 + &numa_cpumask_lookup_table[node]) 16 18 17 19 struct pci_bus; 18 20 #ifdef CONFIG_PCI
+1 -1
arch/sparc/include/asm/uaccess_32.h
··· 274 274 275 275 if (unlikely(sz != -1 && sz < n)) { 276 276 copy_from_user_overflow(); 277 - return -EFAULT; 277 + return n; 278 278 } 279 279 280 280 if (n && __access_ok((unsigned long) from, n))
+1 -1
arch/sparc/include/asm/uaccess_64.h
··· 221 221 static inline unsigned long __must_check 222 222 copy_from_user(void *to, const void __user *from, unsigned long size) 223 223 { 224 - unsigned long ret = (unsigned long) -EFAULT; 225 224 int sz = __compiletime_object_size(to); 225 + unsigned long ret = size; 226 226 227 227 if (likely(sz == -1 || sz >= size)) { 228 228 ret = ___copy_from_user(to, from, size);
+2 -2
arch/sparc/kernel/central.c
··· 99 99 100 100 p->leds_resource.start = (unsigned long) 101 101 (p->clock_regs + CLOCK_CTRL); 102 - p->leds_resource.end = p->leds_resource.end; 102 + p->leds_resource.end = p->leds_resource.start; 103 103 p->leds_resource.name = "leds"; 104 104 105 105 p->leds_pdev.name = "sunfire-clockboard-leds"; ··· 194 194 if (!p->central) { 195 195 p->leds_resource.start = (unsigned long) 196 196 (p->pregs + FHC_PREGS_CTRL); 197 - p->leds_resource.end = p->leds_resource.end; 197 + p->leds_resource.end = p->leds_resource.start; 198 198 p->leds_resource.name = "leds"; 199 199 200 200 p->leds_pdev.name = "sunfire-fhc-leds";
+28 -9
arch/sparc/kernel/irq_64.c
··· 250 250 }; 251 251 252 252 #ifdef CONFIG_SMP 253 - static int irq_choose_cpu(unsigned int virt_irq) 253 + static int irq_choose_cpu(unsigned int virt_irq, const struct cpumask *affinity) 254 254 { 255 255 cpumask_t mask; 256 256 int cpuid; 257 257 258 - cpumask_copy(&mask, irq_desc[virt_irq].affinity); 258 + cpumask_copy(&mask, affinity); 259 259 if (cpus_equal(mask, cpu_online_map)) { 260 260 cpuid = map_to_cpu(virt_irq); 261 261 } else { ··· 268 268 return cpuid; 269 269 } 270 270 #else 271 - static int irq_choose_cpu(unsigned int virt_irq) 271 + static int irq_choose_cpu(unsigned int virt_irq, const struct cpumask *affinity) 272 272 { 273 273 return real_hard_smp_processor_id(); 274 274 } ··· 282 282 unsigned long cpuid, imap, val; 283 283 unsigned int tid; 284 284 285 - cpuid = irq_choose_cpu(virt_irq); 285 + cpuid = irq_choose_cpu(virt_irq, 286 + irq_desc[virt_irq].affinity); 286 287 imap = data->imap; 287 288 288 289 tid = sun4u_compute_tid(imap, cpuid); ··· 300 299 static int sun4u_set_affinity(unsigned int virt_irq, 301 300 const struct cpumask *mask) 302 301 { 303 - sun4u_irq_enable(virt_irq); 302 + struct irq_handler_data *data = get_irq_chip_data(virt_irq); 303 + 304 + if (likely(data)) { 305 + unsigned long cpuid, imap, val; 306 + unsigned int tid; 307 + 308 + cpuid = irq_choose_cpu(virt_irq, mask); 309 + imap = data->imap; 310 + 311 + tid = sun4u_compute_tid(imap, cpuid); 312 + 313 + val = upa_readq(imap); 314 + val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS | 315 + IMAP_AID_SAFARI | IMAP_NID_SAFARI); 316 + val |= tid | IMAP_VALID; 317 + upa_writeq(val, imap); 318 + upa_writeq(ICLR_IDLE, data->iclr); 319 + } 304 320 305 321 return 0; 306 322 } ··· 358 340 static void sun4v_irq_enable(unsigned int virt_irq) 359 341 { 360 342 unsigned int ino = virt_irq_table[virt_irq].dev_ino; 361 - unsigned long cpuid = irq_choose_cpu(virt_irq); 343 + unsigned long cpuid = irq_choose_cpu(virt_irq, 344 + irq_desc[virt_irq].affinity); 362 345 int err; 363 346 364 347 err = sun4v_intr_settarget(ino, cpuid); ··· 380 361 const struct cpumask *mask) 381 362 { 382 363 unsigned int ino = virt_irq_table[virt_irq].dev_ino; 383 - unsigned long cpuid = irq_choose_cpu(virt_irq); 364 + unsigned long cpuid = irq_choose_cpu(virt_irq, mask); 384 365 int err; 385 366 386 367 err = sun4v_intr_settarget(ino, cpuid); ··· 422 403 unsigned long cpuid, dev_handle, dev_ino; 423 404 int err; 424 405 425 - cpuid = irq_choose_cpu(virt_irq); 406 + cpuid = irq_choose_cpu(virt_irq, irq_desc[virt_irq].affinity); 426 407 427 408 dev_handle = virt_irq_table[virt_irq].dev_handle; 428 409 dev_ino = virt_irq_table[virt_irq].dev_ino; ··· 452 433 unsigned long cpuid, dev_handle, dev_ino; 453 434 int err; 454 435 455 - cpuid = irq_choose_cpu(virt_irq); 436 + cpuid = irq_choose_cpu(virt_irq, mask); 456 437 457 438 dev_handle = virt_irq_table[virt_irq].dev_handle; 458 439 dev_ino = virt_irq_table[virt_irq].dev_ino;
+16 -87
arch/sparc/kernel/pcic.c
··· 30 30 #include <asm/oplib.h> 31 31 #include <asm/prom.h> 32 32 #include <asm/pcic.h> 33 + #include <asm/timex.h> 33 34 #include <asm/timer.h> 34 35 #include <asm/uaccess.h> 35 36 #include <asm/irq_regs.h> ··· 164 163 volatile int pcic_speculative; 165 164 volatile int pcic_trapped; 166 165 167 - static void pci_do_gettimeofday(struct timeval *tv); 168 - static int pci_do_settimeofday(struct timespec *tv); 169 166 170 167 #define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (((unsigned int)bus) << 16) | (((unsigned int)device_fn) << 8) | (where & ~3)) 171 168 ··· 715 716 #define USECS_PER_JIFFY 10000 /* We have 100HZ "standard" timer for sparc */ 716 717 #define TICK_TIMER_LIMIT ((100*1000000/4)/100) 717 718 719 + u32 pci_gettimeoffset(void) 720 + { 721 + /* 722 + * We divide all by 100 723 + * to have microsecond resolution and to avoid overflow 724 + */ 725 + unsigned long count = 726 + readl(pcic0.pcic_regs+PCI_SYS_COUNTER) & ~PCI_SYS_COUNTER_OVERFLOW; 727 + count = ((count/100)*USECS_PER_JIFFY) / (TICK_TIMER_LIMIT/100); 728 + return count * 1000; 729 + } 730 + 731 + 718 732 void __init pci_time_init(void) 719 733 { 720 734 struct linux_pcic *pcic = &pcic0; 721 735 unsigned long v; 722 736 int timer_irq, irq; 723 737 724 - /* A hack until do_gettimeofday prototype is moved to arch specific headers 725 - and btfixupped. Patch do_gettimeofday with ba pci_do_gettimeofday; nop */ 726 - ((unsigned int *)do_gettimeofday)[0] = 727 - 0x10800000 | ((((unsigned long)pci_do_gettimeofday - 728 - (unsigned long)do_gettimeofday) >> 2) & 0x003fffff); 729 - ((unsigned int *)do_gettimeofday)[1] = 0x01000000; 730 - BTFIXUPSET_CALL(bus_do_settimeofday, pci_do_settimeofday, BTFIXUPCALL_NORM); 738 + do_arch_gettimeoffset = pci_gettimeoffset; 739 + 731 740 btfixup(); 732 741 733 742 writel (TICK_TIMER_LIMIT, pcic->pcic_regs+PCI_SYS_LIMIT); ··· 753 746 local_irq_enable(); 754 747 } 755 748 756 - static inline unsigned long do_gettimeoffset(void) 757 - { 758 - /* 759 - * We divide all by 100 760 - * to have microsecond resolution and to avoid overflow 761 - */ 762 - unsigned long count = 763 - readl(pcic0.pcic_regs+PCI_SYS_COUNTER) & ~PCI_SYS_COUNTER_OVERFLOW; 764 - count = ((count/100)*USECS_PER_JIFFY) / (TICK_TIMER_LIMIT/100); 765 - return count; 766 - } 767 - 768 - static void pci_do_gettimeofday(struct timeval *tv) 769 - { 770 - unsigned long flags; 771 - unsigned long seq; 772 - unsigned long usec, sec; 773 - unsigned long max_ntp_tick = tick_usec - tickadj; 774 - 775 - do { 776 - seq = read_seqbegin_irqsave(&xtime_lock, flags); 777 - usec = do_gettimeoffset(); 778 - 779 - /* 780 - * If time_adjust is negative then NTP is slowing the clock 781 - * so make sure not to go into next possible interval. 782 - * Better to lose some accuracy than have time go backwards.. 783 - */ 784 - if (unlikely(time_adjust < 0)) 785 - usec = min(usec, max_ntp_tick); 786 - 787 - sec = xtime.tv_sec; 788 - usec += (xtime.tv_nsec / 1000); 789 - } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 790 - 791 - while (usec >= 1000000) { 792 - usec -= 1000000; 793 - sec++; 794 - } 795 - 796 - tv->tv_sec = sec; 797 - tv->tv_usec = usec; 798 - } 799 - 800 - static int pci_do_settimeofday(struct timespec *tv) 801 - { 802 - if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 803 - return -EINVAL; 804 - 805 - /* 806 - * This is revolting. We need to set "xtime" correctly. However, the 807 - * value in this location is the value at the most recent update of 808 - * wall time. Discover what correction gettimeofday() would have 809 - * made, and then undo it! 810 - */ 811 - tv->tv_nsec -= 1000 * do_gettimeoffset(); 812 - while (tv->tv_nsec < 0) { 813 - tv->tv_nsec += NSEC_PER_SEC; 814 - tv->tv_sec--; 815 - } 816 - 817 - wall_to_monotonic.tv_sec += xtime.tv_sec - tv->tv_sec; 818 - wall_to_monotonic.tv_nsec += xtime.tv_nsec - tv->tv_nsec; 819 - 820 - if (wall_to_monotonic.tv_nsec > NSEC_PER_SEC) { 821 - wall_to_monotonic.tv_nsec -= NSEC_PER_SEC; 822 - wall_to_monotonic.tv_sec++; 823 - } 824 - if (wall_to_monotonic.tv_nsec < 0) { 825 - wall_to_monotonic.tv_nsec += NSEC_PER_SEC; 826 - wall_to_monotonic.tv_sec--; 827 - } 828 - 829 - xtime.tv_sec = tv->tv_sec; 830 - xtime.tv_nsec = tv->tv_nsec; 831 - ntp_clear(); 832 - return 0; 833 - } 834 749 835 750 #if 0 836 751 static void watchdog_reset() {
+467 -150
arch/sparc/kernel/perf_event.c
··· 1 1 /* Performance event support for sparc64. 2 2 * 3 - * Copyright (C) 2009 David S. Miller <davem@davemloft.net> 3 + * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net> 4 4 * 5 5 * This code is based almost entirely upon the x86 perf event 6 6 * code, which is: ··· 18 18 #include <linux/kdebug.h> 19 19 #include <linux/mutex.h> 20 20 21 + #include <asm/stacktrace.h> 21 22 #include <asm/cpudata.h> 23 + #include <asm/uaccess.h> 22 24 #include <asm/atomic.h> 23 25 #include <asm/nmi.h> 24 26 #include <asm/pcr.h> 27 + 28 + #include "kstack.h" 25 29 26 30 /* Sparc64 chips have two performance counters, 32-bits each, with 27 31 * overflow interrupts generated on transition from 0xffffffff to 0. ··· 55 51 56 52 #define PIC_UPPER_INDEX 0 57 53 #define PIC_LOWER_INDEX 1 54 + #define PIC_NO_INDEX -1 58 55 59 56 struct cpu_hw_events { 60 - struct perf_event *events[MAX_HWEVENTS]; 61 - unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; 62 - unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; 57 + /* Number of events currently scheduled onto this cpu. 58 + * This tells how many entries in the arrays below 59 + * are valid. 60 + */ 61 + int n_events; 62 + 63 + /* Number of new events added since the last hw_perf_disable(). 64 + * This works because the perf event layer always adds new 65 + * events inside of a perf_{disable,enable}() sequence. 66 + */ 67 + int n_added; 68 + 69 + /* Array of events current scheduled on this cpu. */ 70 + struct perf_event *event[MAX_HWEVENTS]; 71 + 72 + /* Array of encoded longs, specifying the %pcr register 73 + * encoding and the mask of PIC counters this even can 74 + * be scheduled on. See perf_event_encode() et al. 75 + */ 76 + unsigned long events[MAX_HWEVENTS]; 77 + 78 + /* The current counter index assigned to an event. When the 79 + * event hasn't been programmed into the cpu yet, this will 80 + * hold PIC_NO_INDEX. The event->hw.idx value tells us where 81 + * we ought to schedule the event. 82 + */ 83 + int current_idx[MAX_HWEVENTS]; 84 + 85 + /* Software copy of %pcr register on this cpu. */ 63 86 u64 pcr; 87 + 88 + /* Enabled/disable state. */ 64 89 int enabled; 65 90 }; 66 91 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; 67 92 93 + /* An event map describes the characteristics of a performance 94 + * counter event. In particular it gives the encoding as well as 95 + * a mask telling which counters the event can be measured on. 96 + */ 68 97 struct perf_event_map { 69 98 u16 encoding; 70 99 u8 pic_mask; ··· 106 69 #define PIC_LOWER 0x02 107 70 }; 108 71 72 + /* Encode a perf_event_map entry into a long. */ 109 73 static unsigned long perf_event_encode(const struct perf_event_map *pmap) 110 74 { 111 75 return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask; 112 76 } 113 77 114 - static void perf_event_decode(unsigned long val, u16 *enc, u8 *msk) 78 + static u8 perf_event_get_msk(unsigned long val) 115 79 { 116 - *msk = val & 0xff; 117 - *enc = val >> 16; 80 + return val & 0xff; 81 + } 82 + 83 + static u64 perf_event_get_enc(unsigned long val) 84 + { 85 + return val >> 16; 118 86 } 119 87 120 88 #define C(x) PERF_COUNT_HW_CACHE_##x ··· 533 491 pcr_ops->write(cpuc->pcr); 534 492 } 535 493 536 - void hw_perf_enable(void) 537 - { 538 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 539 - u64 val; 540 - int i; 541 - 542 - if (cpuc->enabled) 543 - return; 544 - 545 - cpuc->enabled = 1; 546 - barrier(); 547 - 548 - val = cpuc->pcr; 549 - 550 - for (i = 0; i < MAX_HWEVENTS; i++) { 551 - struct perf_event *cp = cpuc->events[i]; 552 - struct hw_perf_event *hwc; 553 - 554 - if (!cp) 555 - continue; 556 - hwc = &cp->hw; 557 - val |= hwc->config_base; 558 - } 559 - 560 - cpuc->pcr = val; 561 - 562 - pcr_ops->write(cpuc->pcr); 563 - } 564 - 565 - void hw_perf_disable(void) 566 - { 567 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 568 - u64 val; 569 - 570 - if (!cpuc->enabled) 571 - return; 572 - 573 - cpuc->enabled = 0; 574 - 575 - val = cpuc->pcr; 576 - val &= ~(PCR_UTRACE | PCR_STRACE | 577 - sparc_pmu->hv_bit | sparc_pmu->irq_bit); 578 - cpuc->pcr = val; 579 - 580 - pcr_ops->write(cpuc->pcr); 581 - } 582 - 583 494 static u32 read_pmc(int idx) 584 495 { 585 496 u64 val; ··· 559 564 pic &= ~mask; 560 565 pic |= val; 561 566 write_pic(pic); 567 + } 568 + 569 + static u64 sparc_perf_event_update(struct perf_event *event, 570 + struct hw_perf_event *hwc, int idx) 571 + { 572 + int shift = 64 - 32; 573 + u64 prev_raw_count, new_raw_count; 574 + s64 delta; 575 + 576 + again: 577 + prev_raw_count = atomic64_read(&hwc->prev_count); 578 + new_raw_count = read_pmc(idx); 579 + 580 + if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, 581 + new_raw_count) != prev_raw_count) 582 + goto again; 583 + 584 + delta = (new_raw_count << shift) - (prev_raw_count << shift); 585 + delta >>= shift; 586 + 587 + atomic64_add(delta, &event->count); 588 + atomic64_sub(delta, &hwc->period_left); 589 + 590 + return new_raw_count; 562 591 } 563 592 564 593 static int sparc_perf_event_set_period(struct perf_event *event, ··· 617 598 return ret; 618 599 } 619 600 620 - static int sparc_pmu_enable(struct perf_event *event) 601 + /* If performance event entries have been added, move existing 602 + * events around (if necessary) and then assign new entries to 603 + * counters. 604 + */ 605 + static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr) 621 606 { 622 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 623 - struct hw_perf_event *hwc = &event->hw; 624 - int idx = hwc->idx; 607 + int i; 625 608 626 - if (test_and_set_bit(idx, cpuc->used_mask)) 627 - return -EAGAIN; 609 + if (!cpuc->n_added) 610 + goto out; 628 611 629 - sparc_pmu_disable_event(cpuc, hwc, idx); 612 + /* Read in the counters which are moving. */ 613 + for (i = 0; i < cpuc->n_events; i++) { 614 + struct perf_event *cp = cpuc->event[i]; 630 615 631 - cpuc->events[idx] = event; 632 - set_bit(idx, cpuc->active_mask); 616 + if (cpuc->current_idx[i] != PIC_NO_INDEX && 617 + cpuc->current_idx[i] != cp->hw.idx) { 618 + sparc_perf_event_update(cp, &cp->hw, 619 + cpuc->current_idx[i]); 620 + cpuc->current_idx[i] = PIC_NO_INDEX; 621 + } 622 + } 633 623 634 - sparc_perf_event_set_period(event, hwc, idx); 635 - sparc_pmu_enable_event(cpuc, hwc, idx); 636 - perf_event_update_userpage(event); 637 - return 0; 624 + /* Assign to counters all unassigned events. */ 625 + for (i = 0; i < cpuc->n_events; i++) { 626 + struct perf_event *cp = cpuc->event[i]; 627 + struct hw_perf_event *hwc = &cp->hw; 628 + int idx = hwc->idx; 629 + u64 enc; 630 + 631 + if (cpuc->current_idx[i] != PIC_NO_INDEX) 632 + continue; 633 + 634 + sparc_perf_event_set_period(cp, hwc, idx); 635 + cpuc->current_idx[i] = idx; 636 + 637 + enc = perf_event_get_enc(cpuc->events[i]); 638 + pcr |= event_encoding(enc, idx); 639 + } 640 + out: 641 + return pcr; 638 642 } 639 643 640 - static u64 sparc_perf_event_update(struct perf_event *event, 641 - struct hw_perf_event *hwc, int idx) 644 + void hw_perf_enable(void) 642 645 { 643 - int shift = 64 - 32; 644 - u64 prev_raw_count, new_raw_count; 645 - s64 delta; 646 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 647 + u64 pcr; 646 648 647 - again: 648 - prev_raw_count = atomic64_read(&hwc->prev_count); 649 - new_raw_count = read_pmc(idx); 649 + if (cpuc->enabled) 650 + return; 650 651 651 - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, 652 - new_raw_count) != prev_raw_count) 653 - goto again; 652 + cpuc->enabled = 1; 653 + barrier(); 654 654 655 - delta = (new_raw_count << shift) - (prev_raw_count << shift); 656 - delta >>= shift; 655 + pcr = cpuc->pcr; 656 + if (!cpuc->n_events) { 657 + pcr = 0; 658 + } else { 659 + pcr = maybe_change_configuration(cpuc, pcr); 657 660 658 - atomic64_add(delta, &event->count); 659 - atomic64_sub(delta, &hwc->period_left); 661 + /* We require that all of the events have the same 662 + * configuration, so just fetch the settings from the 663 + * first entry. 664 + */ 665 + cpuc->pcr = pcr | cpuc->event[0]->hw.config_base; 666 + } 660 667 661 - return new_raw_count; 668 + pcr_ops->write(cpuc->pcr); 669 + } 670 + 671 + void hw_perf_disable(void) 672 + { 673 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 674 + u64 val; 675 + 676 + if (!cpuc->enabled) 677 + return; 678 + 679 + cpuc->enabled = 0; 680 + cpuc->n_added = 0; 681 + 682 + val = cpuc->pcr; 683 + val &= ~(PCR_UTRACE | PCR_STRACE | 684 + sparc_pmu->hv_bit | sparc_pmu->irq_bit); 685 + cpuc->pcr = val; 686 + 687 + pcr_ops->write(cpuc->pcr); 662 688 } 663 689 664 690 static void sparc_pmu_disable(struct perf_event *event) 665 691 { 666 692 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 667 693 struct hw_perf_event *hwc = &event->hw; 668 - int idx = hwc->idx; 694 + unsigned long flags; 695 + int i; 669 696 670 - clear_bit(idx, cpuc->active_mask); 671 - sparc_pmu_disable_event(cpuc, hwc, idx); 697 + local_irq_save(flags); 698 + perf_disable(); 672 699 673 - barrier(); 700 + for (i = 0; i < cpuc->n_events; i++) { 701 + if (event == cpuc->event[i]) { 702 + int idx = cpuc->current_idx[i]; 674 703 675 - sparc_perf_event_update(event, hwc, idx); 676 - cpuc->events[idx] = NULL; 677 - clear_bit(idx, cpuc->used_mask); 704 + /* Shift remaining entries down into 705 + * the existing slot. 706 + */ 707 + while (++i < cpuc->n_events) { 708 + cpuc->event[i - 1] = cpuc->event[i]; 709 + cpuc->events[i - 1] = cpuc->events[i]; 710 + cpuc->current_idx[i - 1] = 711 + cpuc->current_idx[i]; 712 + } 678 713 679 - perf_event_update_userpage(event); 714 + /* Absorb the final count and turn off the 715 + * event. 716 + */ 717 + sparc_pmu_disable_event(cpuc, hwc, idx); 718 + barrier(); 719 + sparc_perf_event_update(event, hwc, idx); 720 + 721 + perf_event_update_userpage(event); 722 + 723 + cpuc->n_events--; 724 + break; 725 + } 726 + } 727 + 728 + perf_enable(); 729 + local_irq_restore(flags); 730 + } 731 + 732 + static int active_event_index(struct cpu_hw_events *cpuc, 733 + struct perf_event *event) 734 + { 735 + int i; 736 + 737 + for (i = 0; i < cpuc->n_events; i++) { 738 + if (cpuc->event[i] == event) 739 + break; 740 + } 741 + BUG_ON(i == cpuc->n_events); 742 + return cpuc->current_idx[i]; 680 743 } 681 744 682 745 static void sparc_pmu_read(struct perf_event *event) 683 746 { 747 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 748 + int idx = active_event_index(cpuc, event); 684 749 struct hw_perf_event *hwc = &event->hw; 685 750 686 - sparc_perf_event_update(event, hwc, hwc->idx); 751 + sparc_perf_event_update(event, hwc, idx); 687 752 } 688 753 689 754 static void sparc_pmu_unthrottle(struct perf_event *event) 690 755 { 691 756 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 757 + int idx = active_event_index(cpuc, event); 692 758 struct hw_perf_event *hwc = &event->hw; 693 759 694 - sparc_pmu_enable_event(cpuc, hwc, hwc->idx); 760 + sparc_pmu_enable_event(cpuc, hwc, idx); 695 761 } 696 762 697 763 static atomic_t active_events = ATOMIC_INIT(0); ··· 854 750 /* Make sure all events can be scheduled into the hardware at 855 751 * the same time. This is simplified by the fact that we only 856 752 * need to support 2 simultaneous HW events. 753 + * 754 + * As a side effect, the evts[]->hw.idx values will be assigned 755 + * on success. These are pending indexes. When the events are 756 + * actually programmed into the chip, these values will propagate 757 + * to the per-cpu cpuc->current_idx[] slots, see the code in 758 + * maybe_change_configuration() for details. 857 759 */ 858 - static int sparc_check_constraints(unsigned long *events, int n_ev) 760 + static int sparc_check_constraints(struct perf_event **evts, 761 + unsigned long *events, int n_ev) 859 762 { 860 - if (n_ev <= perf_max_events) { 861 - u8 msk1, msk2; 862 - u16 dummy; 763 + u8 msk0 = 0, msk1 = 0; 764 + int idx0 = 0; 863 765 864 - if (n_ev == 1) 865 - return 0; 866 - BUG_ON(n_ev != 2); 867 - perf_event_decode(events[0], &dummy, &msk1); 868 - perf_event_decode(events[1], &dummy, &msk2); 766 + /* This case is possible when we are invoked from 767 + * hw_perf_group_sched_in(). 768 + */ 769 + if (!n_ev) 770 + return 0; 869 771 870 - /* If both events can go on any counter, OK. */ 871 - if (msk1 == (PIC_UPPER | PIC_LOWER) && 872 - msk2 == (PIC_UPPER | PIC_LOWER)) 873 - return 0; 772 + if (n_ev > perf_max_events) 773 + return -1; 874 774 875 - /* If one event is limited to a specific counter, 876 - * and the other can go on both, OK. 877 - */ 878 - if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) && 879 - msk2 == (PIC_UPPER | PIC_LOWER)) 880 - return 0; 881 - if ((msk2 == PIC_UPPER || msk2 == PIC_LOWER) && 882 - msk1 == (PIC_UPPER | PIC_LOWER)) 883 - return 0; 775 + msk0 = perf_event_get_msk(events[0]); 776 + if (n_ev == 1) { 777 + if (msk0 & PIC_LOWER) 778 + idx0 = 1; 779 + goto success; 780 + } 781 + BUG_ON(n_ev != 2); 782 + msk1 = perf_event_get_msk(events[1]); 884 783 885 - /* If the events are fixed to different counters, OK. */ 886 - if ((msk1 == PIC_UPPER && msk2 == PIC_LOWER) || 887 - (msk1 == PIC_LOWER && msk2 == PIC_UPPER)) 888 - return 0; 784 + /* If both events can go on any counter, OK. */ 785 + if (msk0 == (PIC_UPPER | PIC_LOWER) && 786 + msk1 == (PIC_UPPER | PIC_LOWER)) 787 + goto success; 889 788 890 - /* Otherwise, there is a conflict. */ 789 + /* If one event is limited to a specific counter, 790 + * and the other can go on both, OK. 791 + */ 792 + if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) && 793 + msk1 == (PIC_UPPER | PIC_LOWER)) { 794 + if (msk0 & PIC_LOWER) 795 + idx0 = 1; 796 + goto success; 891 797 } 892 798 799 + if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) && 800 + msk0 == (PIC_UPPER | PIC_LOWER)) { 801 + if (msk1 & PIC_UPPER) 802 + idx0 = 1; 803 + goto success; 804 + } 805 + 806 + /* If the events are fixed to different counters, OK. */ 807 + if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) || 808 + (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) { 809 + if (msk0 & PIC_LOWER) 810 + idx0 = 1; 811 + goto success; 812 + } 813 + 814 + /* Otherwise, there is a conflict. */ 893 815 return -1; 816 + 817 + success: 818 + evts[0]->hw.idx = idx0; 819 + if (n_ev == 2) 820 + evts[1]->hw.idx = idx0 ^ 1; 821 + return 0; 894 822 } 895 823 896 824 static int check_excludes(struct perf_event **evts, int n_prev, int n_new) ··· 954 818 } 955 819 956 820 static int collect_events(struct perf_event *group, int max_count, 957 - struct perf_event *evts[], unsigned long *events) 821 + struct perf_event *evts[], unsigned long *events, 822 + int *current_idx) 958 823 { 959 824 struct perf_event *event; 960 825 int n = 0; ··· 964 827 if (n >= max_count) 965 828 return -1; 966 829 evts[n] = group; 967 - events[n++] = group->hw.event_base; 830 + events[n] = group->hw.event_base; 831 + current_idx[n++] = PIC_NO_INDEX; 968 832 } 969 833 list_for_each_entry(event, &group->sibling_list, group_entry) { 970 834 if (!is_software_event(event) && ··· 973 835 if (n >= max_count) 974 836 return -1; 975 837 evts[n] = event; 976 - events[n++] = event->hw.event_base; 838 + events[n] = event->hw.event_base; 839 + current_idx[n++] = PIC_NO_INDEX; 977 840 } 978 841 } 979 842 return n; 843 + } 844 + 845 + static void event_sched_in(struct perf_event *event, int cpu) 846 + { 847 + event->state = PERF_EVENT_STATE_ACTIVE; 848 + event->oncpu = cpu; 849 + event->tstamp_running += event->ctx->time - event->tstamp_stopped; 850 + if (is_software_event(event)) 851 + event->pmu->enable(event); 852 + } 853 + 854 + int hw_perf_group_sched_in(struct perf_event *group_leader, 855 + struct perf_cpu_context *cpuctx, 856 + struct perf_event_context *ctx, int cpu) 857 + { 858 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 859 + struct perf_event *sub; 860 + int n0, n; 861 + 862 + if (!sparc_pmu) 863 + return 0; 864 + 865 + n0 = cpuc->n_events; 866 + n = collect_events(group_leader, perf_max_events - n0, 867 + &cpuc->event[n0], &cpuc->events[n0], 868 + &cpuc->current_idx[n0]); 869 + if (n < 0) 870 + return -EAGAIN; 871 + if (check_excludes(cpuc->event, n0, n)) 872 + return -EINVAL; 873 + if (sparc_check_constraints(cpuc->event, cpuc->events, n + n0)) 874 + return -EAGAIN; 875 + cpuc->n_events = n0 + n; 876 + cpuc->n_added += n; 877 + 878 + cpuctx->active_oncpu += n; 879 + n = 1; 880 + event_sched_in(group_leader, cpu); 881 + list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { 882 + if (sub->state != PERF_EVENT_STATE_OFF) { 883 + event_sched_in(sub, cpu); 884 + n++; 885 + } 886 + } 887 + ctx->nr_active += n; 888 + 889 + return 1; 890 + } 891 + 892 + static int sparc_pmu_enable(struct perf_event *event) 893 + { 894 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 895 + int n0, ret = -EAGAIN; 896 + unsigned long flags; 897 + 898 + local_irq_save(flags); 899 + perf_disable(); 900 + 901 + n0 = cpuc->n_events; 902 + if (n0 >= perf_max_events) 903 + goto out; 904 + 905 + cpuc->event[n0] = event; 906 + cpuc->events[n0] = event->hw.event_base; 907 + cpuc->current_idx[n0] = PIC_NO_INDEX; 908 + 909 + if (check_excludes(cpuc->event, n0, 1)) 910 + goto out; 911 + if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1)) 912 + goto out; 913 + 914 + cpuc->n_events++; 915 + cpuc->n_added++; 916 + 917 + ret = 0; 918 + out: 919 + perf_enable(); 920 + local_irq_restore(flags); 921 + return ret; 980 922 } 981 923 982 924 static int __hw_perf_event_init(struct perf_event *event) ··· 1065 847 struct perf_event *evts[MAX_HWEVENTS]; 1066 848 struct hw_perf_event *hwc = &event->hw; 1067 849 unsigned long events[MAX_HWEVENTS]; 850 + int current_idx_dmy[MAX_HWEVENTS]; 1068 851 const struct perf_event_map *pmap; 1069 - u64 enc; 1070 852 int n; 1071 853 1072 854 if (atomic_read(&nmi_active) < 0) ··· 1083 865 } else 1084 866 return -EOPNOTSUPP; 1085 867 1086 - /* We save the enable bits in the config_base. So to 1087 - * turn off sampling just write 'config', and to enable 1088 - * things write 'config | config_base'. 1089 - */ 868 + /* We save the enable bits in the config_base. */ 1090 869 hwc->config_base = sparc_pmu->irq_bit; 1091 870 if (!attr->exclude_user) 1092 871 hwc->config_base |= PCR_UTRACE; ··· 1094 879 1095 880 hwc->event_base = perf_event_encode(pmap); 1096 881 1097 - enc = pmap->encoding; 1098 - 1099 882 n = 0; 1100 883 if (event->group_leader != event) { 1101 884 n = collect_events(event->group_leader, 1102 885 perf_max_events - 1, 1103 - evts, events); 886 + evts, events, current_idx_dmy); 1104 887 if (n < 0) 1105 888 return -EINVAL; 1106 889 } ··· 1108 895 if (check_excludes(evts, n, 1)) 1109 896 return -EINVAL; 1110 897 1111 - if (sparc_check_constraints(events, n + 1)) 898 + if (sparc_check_constraints(evts, events, n + 1)) 1112 899 return -EINVAL; 900 + 901 + hwc->idx = PIC_NO_INDEX; 1113 902 1114 903 /* Try to do all error checking before this point, as unwinding 1115 904 * state after grabbing the PMC is difficult. ··· 1125 910 atomic64_set(&hwc->period_left, hwc->sample_period); 1126 911 } 1127 912 1128 - if (pmap->pic_mask & PIC_UPPER) { 1129 - hwc->idx = PIC_UPPER_INDEX; 1130 - enc <<= sparc_pmu->upper_shift; 1131 - } else { 1132 - hwc->idx = PIC_LOWER_INDEX; 1133 - enc <<= sparc_pmu->lower_shift; 1134 - } 1135 - 1136 - hwc->config |= enc; 1137 913 return 0; 1138 914 } 1139 915 ··· 1174 968 struct perf_sample_data data; 1175 969 struct cpu_hw_events *cpuc; 1176 970 struct pt_regs *regs; 1177 - int idx; 971 + int i; 1178 972 1179 973 if (!atomic_read(&active_events)) 1180 974 return NOTIFY_DONE; ··· 1203 997 if (sparc_pmu->irq_bit) 1204 998 pcr_ops->write(cpuc->pcr); 1205 999 1206 - for (idx = 0; idx < MAX_HWEVENTS; idx++) { 1207 - struct perf_event *event = cpuc->events[idx]; 1000 + for (i = 0; i < cpuc->n_events; i++) { 1001 + struct perf_event *event = cpuc->event[i]; 1002 + int idx = cpuc->current_idx[i]; 1208 1003 struct hw_perf_event *hwc; 1209 1004 u64 val; 1210 1005 1211 - if (!test_bit(idx, cpuc->active_mask)) 1212 - continue; 1213 1006 hwc = &event->hw; 1214 1007 val = sparc_perf_event_update(event, hwc, idx); 1215 1008 if (val & (1ULL << 31)) ··· 1260 1055 1261 1056 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); 1262 1057 1263 - /* All sparc64 PMUs currently have 2 events. But this simple 1264 - * driver only supports one active event at a time. 1265 - */ 1266 - perf_max_events = 1; 1058 + /* All sparc64 PMUs currently have 2 events. */ 1059 + perf_max_events = 2; 1267 1060 1268 1061 register_die_notifier(&perf_event_nmi_notifier); 1062 + } 1063 + 1064 + static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip) 1065 + { 1066 + if (entry->nr < PERF_MAX_STACK_DEPTH) 1067 + entry->ip[entry->nr++] = ip; 1068 + } 1069 + 1070 + static void perf_callchain_kernel(struct pt_regs *regs, 1071 + struct perf_callchain_entry *entry) 1072 + { 1073 + unsigned long ksp, fp; 1074 + 1075 + callchain_store(entry, PERF_CONTEXT_KERNEL); 1076 + callchain_store(entry, regs->tpc); 1077 + 1078 + ksp = regs->u_regs[UREG_I6]; 1079 + fp = ksp + STACK_BIAS; 1080 + do { 1081 + struct sparc_stackf *sf; 1082 + struct pt_regs *regs; 1083 + unsigned long pc; 1084 + 1085 + if (!kstack_valid(current_thread_info(), fp)) 1086 + break; 1087 + 1088 + sf = (struct sparc_stackf *) fp; 1089 + regs = (struct pt_regs *) (sf + 1); 1090 + 1091 + if (kstack_is_trap_frame(current_thread_info(), regs)) { 1092 + if (user_mode(regs)) 1093 + break; 1094 + pc = regs->tpc; 1095 + fp = regs->u_regs[UREG_I6] + STACK_BIAS; 1096 + } else { 1097 + pc = sf->callers_pc; 1098 + fp = (unsigned long)sf->fp + STACK_BIAS; 1099 + } 1100 + callchain_store(entry, pc); 1101 + } while (entry->nr < PERF_MAX_STACK_DEPTH); 1102 + } 1103 + 1104 + static void perf_callchain_user_64(struct pt_regs *regs, 1105 + struct perf_callchain_entry *entry) 1106 + { 1107 + unsigned long ufp; 1108 + 1109 + callchain_store(entry, PERF_CONTEXT_USER); 1110 + callchain_store(entry, regs->tpc); 1111 + 1112 + ufp = regs->u_regs[UREG_I6] + STACK_BIAS; 1113 + do { 1114 + struct sparc_stackf *usf, sf; 1115 + unsigned long pc; 1116 + 1117 + usf = (struct sparc_stackf *) ufp; 1118 + if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) 1119 + break; 1120 + 1121 + pc = sf.callers_pc; 1122 + ufp = (unsigned long)sf.fp + STACK_BIAS; 1123 + callchain_store(entry, pc); 1124 + } while (entry->nr < PERF_MAX_STACK_DEPTH); 1125 + } 1126 + 1127 + static void perf_callchain_user_32(struct pt_regs *regs, 1128 + struct perf_callchain_entry *entry) 1129 + { 1130 + unsigned long ufp; 1131 + 1132 + callchain_store(entry, PERF_CONTEXT_USER); 1133 + callchain_store(entry, regs->tpc); 1134 + 1135 + ufp = regs->u_regs[UREG_I6]; 1136 + do { 1137 + struct sparc_stackf32 *usf, sf; 1138 + unsigned long pc; 1139 + 1140 + usf = (struct sparc_stackf32 *) ufp; 1141 + if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) 1142 + break; 1143 + 1144 + pc = sf.callers_pc; 1145 + ufp = (unsigned long)sf.fp; 1146 + callchain_store(entry, pc); 1147 + } while (entry->nr < PERF_MAX_STACK_DEPTH); 1148 + } 1149 + 1150 + /* Like powerpc we can't get PMU interrupts within the PMU handler, 1151 + * so no need for seperate NMI and IRQ chains as on x86. 1152 + */ 1153 + static DEFINE_PER_CPU(struct perf_callchain_entry, callchain); 1154 + 1155 + struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) 1156 + { 1157 + struct perf_callchain_entry *entry = &__get_cpu_var(callchain); 1158 + 1159 + entry->nr = 0; 1160 + if (!user_mode(regs)) { 1161 + stack_trace_flush(); 1162 + perf_callchain_kernel(regs, entry); 1163 + if (current->mm) 1164 + regs = task_pt_regs(current); 1165 + else 1166 + regs = NULL; 1167 + } 1168 + if (regs) { 1169 + flushw_user(); 1170 + if (test_thread_flag(TIF_32BIT)) 1171 + perf_callchain_user_32(regs, entry); 1172 + else 1173 + perf_callchain_user_64(regs, entry); 1174 + } 1175 + return entry; 1269 1176 }
+3 -3
arch/sparc/kernel/sys_sparc_64.c
··· 365 365 void arch_pick_mmap_layout(struct mm_struct *mm) 366 366 { 367 367 unsigned long random_factor = 0UL; 368 + unsigned long gap; 368 369 369 370 if (current->flags & PF_RANDOMIZE) { 370 371 random_factor = get_random_int(); ··· 380 379 * Fall back to the standard layout if the personality 381 380 * bit is set, or if the expected stack growth is unlimited: 382 381 */ 382 + gap = rlimit(RLIMIT_STACK); 383 383 if (!test_thread_flag(TIF_32BIT) || 384 384 (current->personality & ADDR_COMPAT_LAYOUT) || 385 - current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY || 385 + gap == RLIM_INFINITY || 386 386 sysctl_legacy_va_layout) { 387 387 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; 388 388 mm->get_unmapped_area = arch_get_unmapped_area; ··· 391 389 } else { 392 390 /* We know it's 32-bit */ 393 391 unsigned long task_size = STACK_TOP32; 394 - unsigned long gap; 395 392 396 - gap = current->signal->rlim[RLIMIT_STACK].rlim_cur; 397 393 if (gap < 128 * 1024 * 1024) 398 394 gap = 128 * 1024 * 1024; 399 395 if (gap > (task_size / 6 * 5))
+25 -91
arch/sparc/kernel/time_32.c
··· 35 35 #include <linux/platform_device.h> 36 36 37 37 #include <asm/oplib.h> 38 + #include <asm/timex.h> 38 39 #include <asm/timer.h> 39 40 #include <asm/system.h> 40 41 #include <asm/irq.h> ··· 52 51 EXPORT_SYMBOL(rtc_lock); 53 52 54 53 static int set_rtc_mmss(unsigned long); 55 - static int sbus_do_settimeofday(struct timespec *tv); 56 54 57 55 unsigned long profile_pc(struct pt_regs *regs) 58 56 { ··· 75 75 EXPORT_SYMBOL(profile_pc); 76 76 77 77 __volatile__ unsigned int *master_l10_counter; 78 + 79 + u32 (*do_arch_gettimeoffset)(void); 78 80 79 81 /* 80 82 * timer_interrupt() needs to keep up the real-time clock, ··· 198 196 { 199 197 return of_register_driver(&clock_driver, &of_platform_bus_type); 200 198 } 201 - 202 199 /* Must be after subsys_initcall() so that busses are probed. Must 203 200 * be before device_initcall() because things like the RTC driver 204 201 * need to see the clock registers. 205 202 */ 206 203 fs_initcall(clock_init); 207 204 205 + 206 + u32 sbus_do_gettimeoffset(void) 207 + { 208 + unsigned long val = *master_l10_counter; 209 + unsigned long usec = (val >> 10) & 0x1fffff; 210 + 211 + /* Limit hit? */ 212 + if (val & 0x80000000) 213 + usec += 1000000 / HZ; 214 + 215 + return usec * 1000; 216 + } 217 + 218 + 219 + u32 arch_gettimeoffset(void) 220 + { 221 + if (unlikely(!do_arch_gettimeoffset)) 222 + return 0; 223 + return do_arch_gettimeoffset(); 224 + } 225 + 208 226 static void __init sbus_time_init(void) 209 227 { 228 + do_arch_gettimeoffset = sbus_do_gettimeoffset; 210 229 211 - BTFIXUPSET_CALL(bus_do_settimeofday, sbus_do_settimeofday, BTFIXUPCALL_NORM); 212 230 btfixup(); 213 231 214 232 sparc_init_timers(timer_interrupt); ··· 246 224 sbus_time_init(); 247 225 } 248 226 249 - static inline unsigned long do_gettimeoffset(void) 250 - { 251 - unsigned long val = *master_l10_counter; 252 - unsigned long usec = (val >> 10) & 0x1fffff; 253 - 254 - /* Limit hit? */ 255 - if (val & 0x80000000) 256 - usec += 1000000 / HZ; 257 - 258 - return usec; 259 - } 260 - 261 - /* Ok, my cute asm atomicity trick doesn't work anymore. 262 - * There are just too many variables that need to be protected 263 - * now (both members of xtime, et al.) 264 - */ 265 - void do_gettimeofday(struct timeval *tv) 266 - { 267 - unsigned long flags; 268 - unsigned long seq; 269 - unsigned long usec, sec; 270 - unsigned long max_ntp_tick = tick_usec - tickadj; 271 - 272 - do { 273 - seq = read_seqbegin_irqsave(&xtime_lock, flags); 274 - usec = do_gettimeoffset(); 275 - 276 - /* 277 - * If time_adjust is negative then NTP is slowing the clock 278 - * so make sure not to go into next possible interval. 279 - * Better to lose some accuracy than have time go backwards.. 280 - */ 281 - if (unlikely(time_adjust < 0)) 282 - usec = min(usec, max_ntp_tick); 283 - 284 - sec = xtime.tv_sec; 285 - usec += (xtime.tv_nsec / 1000); 286 - } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 287 - 288 - while (usec >= 1000000) { 289 - usec -= 1000000; 290 - sec++; 291 - } 292 - 293 - tv->tv_sec = sec; 294 - tv->tv_usec = usec; 295 - } 296 - 297 - EXPORT_SYMBOL(do_gettimeofday); 298 - 299 - int do_settimeofday(struct timespec *tv) 300 - { 301 - int ret; 302 - 303 - write_seqlock_irq(&xtime_lock); 304 - ret = bus_do_settimeofday(tv); 305 - write_sequnlock_irq(&xtime_lock); 306 - clock_was_set(); 307 - return ret; 308 - } 309 - 310 - EXPORT_SYMBOL(do_settimeofday); 311 - 312 - static int sbus_do_settimeofday(struct timespec *tv) 313 - { 314 - time_t wtm_sec, sec = tv->tv_sec; 315 - long wtm_nsec, nsec = tv->tv_nsec; 316 - 317 - if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 318 - return -EINVAL; 319 - 320 - /* 321 - * This is revolting. We need to set "xtime" correctly. However, the 322 - * value in this location is the value at the most recent update of 323 - * wall time. Discover what correction gettimeofday() would have 324 - * made, and then undo it! 325 - */ 326 - nsec -= 1000 * do_gettimeoffset(); 327 - 328 - wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); 329 - wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); 330 - 331 - set_normalized_timespec(&xtime, sec, nsec); 332 - set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 333 - 334 - ntp_clear(); 335 - return 0; 336 - } 337 227 338 228 static int set_rtc_mmss(unsigned long secs) 339 229 {
+10 -2
arch/sparc/mm/fault_32.c
··· 18 18 #include <linux/signal.h> 19 19 #include <linux/mm.h> 20 20 #include <linux/smp.h> 21 + #include <linux/perf_event.h> 21 22 #include <linux/interrupt.h> 22 23 #include <linux/module.h> 23 24 #include <linux/kdebug.h> ··· 204 203 if (in_atomic() || !mm) 205 204 goto no_context; 206 205 206 + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 207 + 207 208 down_read(&mm->mmap_sem); 208 209 209 210 /* ··· 252 249 goto do_sigbus; 253 250 BUG(); 254 251 } 255 - if (fault & VM_FAULT_MAJOR) 252 + if (fault & VM_FAULT_MAJOR) { 256 253 current->maj_flt++; 257 - else 254 + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 255 + regs, address); 256 + } else { 258 257 current->min_flt++; 258 + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 259 + regs, address); 260 + } 259 261 up_read(&mm->mmap_sem); 260 262 return; 261 263
+10 -3
arch/sparc/mm/fault_64.c
··· 16 16 #include <linux/mm.h> 17 17 #include <linux/module.h> 18 18 #include <linux/init.h> 19 + #include <linux/perf_event.h> 19 20 #include <linux/interrupt.h> 20 21 #include <linux/kprobes.h> 21 22 #include <linux/kdebug.h> ··· 297 296 if (in_atomic() || !mm) 298 297 goto intr_or_no_mm; 299 298 299 + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 300 + 300 301 if (!down_read_trylock(&mm->mmap_sem)) { 301 302 if ((regs->tstate & TSTATE_PRIV) && 302 303 !search_exception_tables(regs->tpc)) { ··· 403 400 goto do_sigbus; 404 401 BUG(); 405 402 } 406 - if (fault & VM_FAULT_MAJOR) 403 + if (fault & VM_FAULT_MAJOR) { 407 404 current->maj_flt++; 408 - else 405 + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 406 + regs, address); 407 + } else { 409 408 current->min_flt++; 410 - 409 + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 410 + regs, address); 411 + } 411 412 up_read(&mm->mmap_sem); 412 413 413 414 mm_rss = get_mm_rss(mm);