Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'misc' into devel

Conflicts:
arch/arm/Kconfig
arch/arm/common/Makefile
arch/arm/kernel/Makefile
arch/arm/kernel/smp.c

+1697 -1339
+2
Documentation/arm/00-INDEX
··· 34 34 - description of the virtual memory layout 35 35 nwfpe/ 36 36 - NWFPE floating point emulator documentation 37 + swp_emulation 38 + - SWP/SWPB emulation handler/logging description
+27
Documentation/arm/swp_emulation
··· 1 + Software emulation of deprecated SWP instruction (CONFIG_SWP_EMULATE) 2 + --------------------------------------------------------------------- 3 + 4 + ARMv6 architecture deprecates use of the SWP/SWPB instructions, and recommeds 5 + moving to the load-locked/store-conditional instructions LDREX and STREX. 6 + 7 + ARMv7 multiprocessing extensions introduce the ability to disable these 8 + instructions, triggering an undefined instruction exception when executed. 9 + Trapped instructions are emulated using an LDREX/STREX or LDREXB/STREXB 10 + sequence. If a memory access fault (an abort) occurs, a segmentation fault is 11 + signalled to the triggering process. 12 + 13 + /proc/cpu/swp_emulation holds some statistics/information, including the PID of 14 + the last process to trigger the emulation to be invocated. For example: 15 + --- 16 + Emulated SWP: 12 17 + Emulated SWPB: 0 18 + Aborted SWP{B}: 1 19 + Last process: 314 20 + --- 21 + 22 + NOTE: when accessing uncached shared regions, LDREX/STREX rely on an external 23 + transaction monitoring block called a global monitor to maintain update 24 + atomicity. If your system does not implement a global monitor, this option can 25 + cause programs that perform SWP operations to uncached memory to deadlock, as 26 + the STREX operation will always fail. 27 +
+39 -26
arch/arm/Kconfig
··· 2 2 bool 3 3 default y 4 4 select HAVE_AOUT 5 + select HAVE_DMA_API_DEBUG 5 6 select HAVE_IDE 6 7 select HAVE_MEMBLOCK 7 8 select RTC_LIB ··· 35 34 <http://www.arm.linux.org.uk/>. 36 35 37 36 config HAVE_PWM 37 + bool 38 + 39 + config MIGHT_HAVE_PCI 38 40 bool 39 41 40 42 config SYS_SUPPORTS_APM_EMULATION ··· 230 226 bool "ARM Ltd. Integrator family" 231 227 select ARM_AMBA 232 228 select ARCH_HAS_CPUFREQ 233 - select COMMON_CLKDEV 229 + select CLKDEV_LOOKUP 234 230 select ICST 235 231 select GENERIC_CLOCKEVENTS 236 232 select PLAT_VERSATILE ··· 240 236 config ARCH_REALVIEW 241 237 bool "ARM Ltd. RealView family" 242 238 select ARM_AMBA 243 - select COMMON_CLKDEV 239 + select CLKDEV_LOOKUP 244 240 select HAVE_SCHED_CLOCK 245 241 select ICST 246 242 select GENERIC_CLOCKEVENTS ··· 255 251 bool "ARM Ltd. Versatile family" 256 252 select ARM_AMBA 257 253 select ARM_VIC 258 - select COMMON_CLKDEV 254 + select CLKDEV_LOOKUP 259 255 select HAVE_SCHED_CLOCK 260 256 select ICST 261 257 select GENERIC_CLOCKEVENTS ··· 270 266 select ARCH_WANT_OPTIONAL_GPIOLIB 271 267 select ARM_AMBA 272 268 select ARM_TIMER_SP804 273 - select COMMON_CLKDEV 269 + select CLKDEV_LOOKUP 274 270 select GENERIC_CLOCKEVENTS 275 271 select HAVE_CLK 276 272 select HAVE_SCHED_CLOCK ··· 292 288 depends on MMU 293 289 select CPU_V6 294 290 select ARM_AMBA 295 - select COMMON_CLKDEV 291 + select CLKDEV_LOOKUP 296 292 select GENERIC_CLOCKEVENTS 297 293 select ARCH_WANT_OPTIONAL_GPIOLIB 298 294 help ··· 310 306 select CPU_V6 311 307 select GENERIC_CLOCKEVENTS 312 308 select ARM_GIC 309 + select MIGHT_HAVE_PCI 313 310 select PCI_DOMAINS if PCI 314 311 help 315 312 Support for Cavium Networks CNS3XXX platform. ··· 340 335 select CPU_ARM920T 341 336 select ARM_AMBA 342 337 select ARM_VIC 343 - select COMMON_CLKDEV 338 + select CLKDEV_LOOKUP 344 339 select ARCH_REQUIRE_GPIOLIB 345 340 select ARCH_HAS_HOLES_MEMORYMODEL 346 341 select ARCH_USES_GETTIMEOFFSET ··· 360 355 bool "Freescale MXC/iMX-based" 361 356 select GENERIC_CLOCKEVENTS 362 357 select ARCH_REQUIRE_GPIOLIB 363 - select COMMON_CLKDEV 358 + select CLKDEV_LOOKUP 364 359 help 365 360 Support for Freescale MXC/iMX-based family of processors 366 361 367 362 config ARCH_STMP3XXX 368 363 bool "Freescale STMP3xxx" 369 364 select CPU_ARM926T 370 - select COMMON_CLKDEV 365 + select CLKDEV_LOOKUP 371 366 select ARCH_REQUIRE_GPIOLIB 372 367 select GENERIC_CLOCKEVENTS 373 368 select USB_ARCH_HAS_EHCI ··· 447 442 select GENERIC_GPIO 448 443 select GENERIC_CLOCKEVENTS 449 444 select HAVE_SCHED_CLOCK 445 + select MIGHT_HAVE_PCI 450 446 select DMABOUNCE if PCI 451 447 help 452 448 Support for Intel's IXP4XX (XScale) family of processors. ··· 487 481 select HAVE_IDE 488 482 select ARM_AMBA 489 483 select USB_ARCH_HAS_OHCI 490 - select COMMON_CLKDEV 484 + select CLKDEV_LOOKUP 491 485 select GENERIC_TIME 492 486 select GENERIC_CLOCKEVENTS 493 487 help ··· 521 515 bool "Marvell PXA168/910/MMP2" 522 516 depends on MMU 523 517 select ARCH_REQUIRE_GPIOLIB 524 - select COMMON_CLKDEV 518 + select CLKDEV_LOOKUP 525 519 select GENERIC_CLOCKEVENTS 526 520 select HAVE_SCHED_CLOCK 527 521 select TICK_ONESHOT ··· 555 549 bool "Nuvoton W90X900 CPU" 556 550 select CPU_ARM926T 557 551 select ARCH_REQUIRE_GPIOLIB 558 - select COMMON_CLKDEV 552 + select CLKDEV_LOOKUP 559 553 select GENERIC_CLOCKEVENTS 560 554 help 561 555 Support for Nuvoton (Winbond logic dept.) ARM9 processor, ··· 569 563 config ARCH_NUC93X 570 564 bool "Nuvoton NUC93X CPU" 571 565 select CPU_ARM926T 572 - select COMMON_CLKDEV 566 + select CLKDEV_LOOKUP 573 567 help 574 568 Support for Nuvoton (Winbond logic dept.) NUC93X MCU,The NUC93X is a 575 569 low-power and high performance MPEG-4/JPEG multimedia controller chip. 576 570 577 571 config ARCH_TEGRA 578 572 bool "NVIDIA Tegra" 573 + select CLKDEV_LOOKUP 579 574 select GENERIC_TIME 580 575 select GENERIC_CLOCKEVENTS 581 576 select GENERIC_GPIO 582 577 select HAVE_CLK 583 578 select HAVE_SCHED_CLOCK 584 - select COMMON_CLKDEV 585 579 select ARCH_HAS_BARRIERS if CACHE_L2X0 586 580 select ARCH_HAS_CPUFREQ 587 581 help ··· 591 585 config ARCH_PNX4008 592 586 bool "Philips Nexperia PNX4008 Mobile" 593 587 select CPU_ARM926T 594 - select COMMON_CLKDEV 588 + select CLKDEV_LOOKUP 595 589 select ARCH_USES_GETTIMEOFFSET 596 590 help 597 591 This enables support for Philips PNX4008 mobile platform. ··· 601 595 depends on MMU 602 596 select ARCH_MTD_XIP 603 597 select ARCH_HAS_CPUFREQ 604 - select COMMON_CLKDEV 598 + select CLKDEV_LOOKUP 605 599 select ARCH_REQUIRE_GPIOLIB 606 600 select GENERIC_CLOCKEVENTS 607 601 select HAVE_SCHED_CLOCK ··· 780 774 bool "Telechips TCC ARM926-based systems" 781 775 select CPU_ARM926T 782 776 select HAVE_CLK 783 - select COMMON_CLKDEV 777 + select CLKDEV_LOOKUP 784 778 select GENERIC_CLOCKEVENTS 785 779 help 786 780 Support for Telechips TCC ARM926-based systems. ··· 805 799 select ARM_AMBA 806 800 select ARM_VIC 807 801 select GENERIC_CLOCKEVENTS 808 - select COMMON_CLKDEV 802 + select CLKDEV_LOOKUP 809 803 select GENERIC_GPIO 810 804 help 811 805 Support for ST-Ericsson U300 series mobile platforms. ··· 815 809 select CPU_V7 816 810 select ARM_AMBA 817 811 select GENERIC_CLOCKEVENTS 818 - select COMMON_CLKDEV 812 + select CLKDEV_LOOKUP 819 813 select ARCH_REQUIRE_GPIOLIB 820 814 help 821 815 Support for ST-Ericsson's Ux500 architecture ··· 825 819 select ARM_AMBA 826 820 select ARM_VIC 827 821 select CPU_ARM926T 828 - select COMMON_CLKDEV 822 + select CLKDEV_LOOKUP 829 823 select GENERIC_CLOCKEVENTS 830 824 select ARCH_REQUIRE_GPIOLIB 831 825 help ··· 837 831 select ARCH_REQUIRE_GPIOLIB 838 832 select ZONE_DMA 839 833 select HAVE_IDE 840 - select COMMON_CLKDEV 834 + select CLKDEV_LOOKUP 841 835 select GENERIC_ALLOCATOR 842 836 select ARCH_HAS_HOLES_MEMORYMODEL 843 837 help ··· 858 852 bool "ST SPEAr" 859 853 select ARM_AMBA 860 854 select ARCH_REQUIRE_GPIOLIB 861 - select COMMON_CLKDEV 855 + select CLKDEV_LOOKUP 862 856 select GENERIC_CLOCKEVENTS 863 857 select HAVE_CLK 864 858 help ··· 1040 1034 default y 1041 1035 bool 1042 1036 1037 + config MULTI_IRQ_HANDLER 1038 + bool 1039 + help 1040 + Allow each machine to specify it's own IRQ handler at run time. 1041 + 1043 1042 if !MMU 1044 1043 source "arch/arm/Kconfig-nommu" 1045 1044 endif ··· 1192 1181 bool 1193 1182 1194 1183 config PCI 1195 - bool "PCI support" if ARCH_INTEGRATOR_AP || ARCH_VERSATILE_PB || ARCH_IXP4XX || ARCH_KS8695 || MACH_ARMCORE || ARCH_CNS3XXX || SA1100_NANOENGINE 1184 + bool "PCI support" if MIGHT_HAVE_PCI 1196 1185 help 1197 1186 Find out whether you have a PCI motherboard. PCI is the name of a 1198 1187 bus system, i.e. the way the CPU talks to the other stuff inside ··· 1264 1253 config SMP_ON_UP 1265 1254 bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)" 1266 1255 depends on EXPERIMENTAL 1267 - depends on SMP && !XIP && !THUMB2_KERNEL 1256 + depends on SMP && !XIP 1268 1257 default y 1269 1258 help 1270 1259 SMP kernels contain instructions which fail on non-SMP processors. ··· 1283 1272 config HAVE_ARM_TWD 1284 1273 bool 1285 1274 depends on SMP 1275 + select TICK_ONESHOT 1286 1276 help 1287 1277 This options enables support for the ARM timer and watchdog unit 1288 1278 ··· 1347 1335 default 100 1348 1336 1349 1337 config THUMB2_KERNEL 1350 - bool "Compile the kernel in Thumb-2 mode" 1338 + bool "Compile the kernel in Thumb-2 mode (EXPERIMENTAL)" 1351 1339 depends on CPU_V7 && !CPU_V6 && EXPERIMENTAL 1352 1340 select AEABI 1353 1341 select ARM_ASM_UNIFIED ··· 1561 1549 1562 1550 config CC_STACKPROTECTOR 1563 1551 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" 1552 + depends on EXPERIMENTAL 1564 1553 help 1565 1554 This option turns on the -fstack-protector GCC feature. This 1566 1555 feature puts, at the beginning of functions, a canary value on ··· 1758 1745 Internal configuration node for common cpufreq on Samsung SoC 1759 1746 1760 1747 config CPU_FREQ_S3C24XX 1761 - bool "CPUfreq driver for Samsung S3C24XX series CPUs" 1748 + bool "CPUfreq driver for Samsung S3C24XX series CPUs (EXPERIMENTAL)" 1762 1749 depends on ARCH_S3C2410 && CPU_FREQ && EXPERIMENTAL 1763 1750 select CPU_FREQ_S3C 1764 1751 help ··· 1770 1757 If in doubt, say N. 1771 1758 1772 1759 config CPU_FREQ_S3C24XX_PLL 1773 - bool "Support CPUfreq changing of PLL frequency" 1760 + bool "Support CPUfreq changing of PLL frequency (EXPERIMENTAL)" 1774 1761 depends on CPU_FREQ_S3C24XX && EXPERIMENTAL 1775 1762 help 1776 1763 Compile in support for changing the PLL frequency from the
+1 -1
arch/arm/Kconfig.debug
··· 31 31 reported is severely limited. 32 32 33 33 config ARM_UNWIND 34 - bool "Enable stack unwinding support" 34 + bool "Enable stack unwinding support (EXPERIMENTAL)" 35 35 depends on AEABI && EXPERIMENTAL 36 36 default y 37 37 help
-4
arch/arm/common/Kconfig
··· 37 37 38 38 config SHARP_SCOOP 39 39 bool 40 - 41 - config COMMON_CLKDEV 42 - bool 43 - select HAVE_CLK
-179
arch/arm/common/clkdev.c
··· 1 - /* 2 - * arch/arm/common/clkdev.c 3 - * 4 - * Copyright (C) 2008 Russell King. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License version 2 as 8 - * published by the Free Software Foundation. 9 - * 10 - * Helper for the clk API to assist looking up a struct clk. 11 - */ 12 - #include <linux/module.h> 13 - #include <linux/kernel.h> 14 - #include <linux/device.h> 15 - #include <linux/list.h> 16 - #include <linux/errno.h> 17 - #include <linux/err.h> 18 - #include <linux/string.h> 19 - #include <linux/mutex.h> 20 - #include <linux/clk.h> 21 - #include <linux/slab.h> 22 - 23 - #include <asm/clkdev.h> 24 - #include <mach/clkdev.h> 25 - 26 - static LIST_HEAD(clocks); 27 - static DEFINE_MUTEX(clocks_mutex); 28 - 29 - /* 30 - * Find the correct struct clk for the device and connection ID. 31 - * We do slightly fuzzy matching here: 32 - * An entry with a NULL ID is assumed to be a wildcard. 33 - * If an entry has a device ID, it must match 34 - * If an entry has a connection ID, it must match 35 - * Then we take the most specific entry - with the following 36 - * order of precedence: dev+con > dev only > con only. 37 - */ 38 - static struct clk *clk_find(const char *dev_id, const char *con_id) 39 - { 40 - struct clk_lookup *p; 41 - struct clk *clk = NULL; 42 - int match, best = 0; 43 - 44 - list_for_each_entry(p, &clocks, node) { 45 - match = 0; 46 - if (p->dev_id) { 47 - if (!dev_id || strcmp(p->dev_id, dev_id)) 48 - continue; 49 - match += 2; 50 - } 51 - if (p->con_id) { 52 - if (!con_id || strcmp(p->con_id, con_id)) 53 - continue; 54 - match += 1; 55 - } 56 - 57 - if (match > best) { 58 - clk = p->clk; 59 - if (match != 3) 60 - best = match; 61 - else 62 - break; 63 - } 64 - } 65 - return clk; 66 - } 67 - 68 - struct clk *clk_get_sys(const char *dev_id, const char *con_id) 69 - { 70 - struct clk *clk; 71 - 72 - mutex_lock(&clocks_mutex); 73 - clk = clk_find(dev_id, con_id); 74 - if (clk && !__clk_get(clk)) 75 - clk = NULL; 76 - mutex_unlock(&clocks_mutex); 77 - 78 - return clk ? clk : ERR_PTR(-ENOENT); 79 - } 80 - EXPORT_SYMBOL(clk_get_sys); 81 - 82 - struct clk *clk_get(struct device *dev, const char *con_id) 83 - { 84 - const char *dev_id = dev ? dev_name(dev) : NULL; 85 - 86 - return clk_get_sys(dev_id, con_id); 87 - } 88 - EXPORT_SYMBOL(clk_get); 89 - 90 - void clk_put(struct clk *clk) 91 - { 92 - __clk_put(clk); 93 - } 94 - EXPORT_SYMBOL(clk_put); 95 - 96 - void clkdev_add(struct clk_lookup *cl) 97 - { 98 - mutex_lock(&clocks_mutex); 99 - list_add_tail(&cl->node, &clocks); 100 - mutex_unlock(&clocks_mutex); 101 - } 102 - EXPORT_SYMBOL(clkdev_add); 103 - 104 - void __init clkdev_add_table(struct clk_lookup *cl, size_t num) 105 - { 106 - mutex_lock(&clocks_mutex); 107 - while (num--) { 108 - list_add_tail(&cl->node, &clocks); 109 - cl++; 110 - } 111 - mutex_unlock(&clocks_mutex); 112 - } 113 - 114 - #define MAX_DEV_ID 20 115 - #define MAX_CON_ID 16 116 - 117 - struct clk_lookup_alloc { 118 - struct clk_lookup cl; 119 - char dev_id[MAX_DEV_ID]; 120 - char con_id[MAX_CON_ID]; 121 - }; 122 - 123 - struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, 124 - const char *dev_fmt, ...) 125 - { 126 - struct clk_lookup_alloc *cla; 127 - 128 - cla = kzalloc(sizeof(*cla), GFP_KERNEL); 129 - if (!cla) 130 - return NULL; 131 - 132 - cla->cl.clk = clk; 133 - if (con_id) { 134 - strlcpy(cla->con_id, con_id, sizeof(cla->con_id)); 135 - cla->cl.con_id = cla->con_id; 136 - } 137 - 138 - if (dev_fmt) { 139 - va_list ap; 140 - 141 - va_start(ap, dev_fmt); 142 - vscnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap); 143 - cla->cl.dev_id = cla->dev_id; 144 - va_end(ap); 145 - } 146 - 147 - return &cla->cl; 148 - } 149 - EXPORT_SYMBOL(clkdev_alloc); 150 - 151 - int clk_add_alias(const char *alias, const char *alias_dev_name, char *id, 152 - struct device *dev) 153 - { 154 - struct clk *r = clk_get(dev, id); 155 - struct clk_lookup *l; 156 - 157 - if (IS_ERR(r)) 158 - return PTR_ERR(r); 159 - 160 - l = clkdev_alloc(r, alias, alias_dev_name); 161 - clk_put(r); 162 - if (!l) 163 - return -ENODEV; 164 - clkdev_add(l); 165 - return 0; 166 - } 167 - EXPORT_SYMBOL(clk_add_alias); 168 - 169 - /* 170 - * clkdev_drop - remove a clock dynamically allocated 171 - */ 172 - void clkdev_drop(struct clk_lookup *cl) 173 - { 174 - mutex_lock(&clocks_mutex); 175 - list_del(&cl->node); 176 - mutex_unlock(&clocks_mutex); 177 - kfree(cl); 178 - } 179 - EXPORT_SYMBOL(clkdev_drop);
+8 -8
arch/arm/common/dmabounce.c
··· 328 328 * substitute the safe buffer for the unsafe one. 329 329 * (basically move the buffer from an unsafe area to a safe one) 330 330 */ 331 - dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, 331 + dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size, 332 332 enum dma_data_direction dir) 333 333 { 334 334 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", ··· 338 338 339 339 return map_single(dev, ptr, size, dir); 340 340 } 341 - EXPORT_SYMBOL(dma_map_single); 341 + EXPORT_SYMBOL(__dma_map_single); 342 342 343 343 /* 344 344 * see if a mapped address was really a "safe" buffer and if so, copy ··· 346 346 * the safe buffer. (basically return things back to the way they 347 347 * should be) 348 348 */ 349 - void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 349 + void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 350 350 enum dma_data_direction dir) 351 351 { 352 352 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", ··· 354 354 355 355 unmap_single(dev, dma_addr, size, dir); 356 356 } 357 - EXPORT_SYMBOL(dma_unmap_single); 357 + EXPORT_SYMBOL(__dma_unmap_single); 358 358 359 - dma_addr_t dma_map_page(struct device *dev, struct page *page, 359 + dma_addr_t __dma_map_page(struct device *dev, struct page *page, 360 360 unsigned long offset, size_t size, enum dma_data_direction dir) 361 361 { 362 362 dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", ··· 372 372 373 373 return map_single(dev, page_address(page) + offset, size, dir); 374 374 } 375 - EXPORT_SYMBOL(dma_map_page); 375 + EXPORT_SYMBOL(__dma_map_page); 376 376 377 377 /* 378 378 * see if a mapped address was really a "safe" buffer and if so, copy ··· 380 380 * the safe buffer. (basically return things back to the way they 381 381 * should be) 382 382 */ 383 - void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, 383 + void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, 384 384 enum dma_data_direction dir) 385 385 { 386 386 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", ··· 388 388 389 389 unmap_single(dev, dma_addr, size, dir); 390 390 } 391 - EXPORT_SYMBOL(dma_unmap_page); 391 + EXPORT_SYMBOL(__dma_unmap_page); 392 392 393 393 int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, 394 394 unsigned long off, size_t sz, enum dma_data_direction dir)
+26 -9
arch/arm/include/asm/assembler.h
··· 18 18 #endif 19 19 20 20 #include <asm/ptrace.h> 21 + #include <asm/domain.h> 21 22 22 23 /* 23 24 * Endian independent macros for shifting bytes within registers. ··· 158 157 #ifdef CONFIG_SMP 159 158 #define ALT_SMP(instr...) \ 160 159 9998: instr 160 + /* 161 + * Note: if you get assembler errors from ALT_UP() when building with 162 + * CONFIG_THUMB2_KERNEL, you almost certainly need to use 163 + * ALT_SMP( W(instr) ... ) 164 + */ 161 165 #define ALT_UP(instr...) \ 162 166 .pushsection ".alt.smp.init", "a" ;\ 163 167 .long 9998b ;\ 164 - instr ;\ 168 + 9997: instr ;\ 169 + .if . - 9997b != 4 ;\ 170 + .error "ALT_UP() content must assemble to exactly 4 bytes";\ 171 + .endif ;\ 165 172 .popsection 166 173 #define ALT_UP_B(label) \ 167 174 .equ up_b_offset, label - 9998b ;\ 168 175 .pushsection ".alt.smp.init", "a" ;\ 169 176 .long 9998b ;\ 170 - b . + up_b_offset ;\ 177 + W(b) . + up_b_offset ;\ 171 178 .popsection 172 179 #else 173 180 #define ALT_SMP(instr...) ··· 186 177 /* 187 178 * SMP data memory barrier 188 179 */ 189 - .macro smp_dmb 180 + .macro smp_dmb mode 190 181 #ifdef CONFIG_SMP 191 182 #if __LINUX_ARM_ARCH__ >= 7 183 + .ifeqs "\mode","arm" 192 184 ALT_SMP(dmb) 185 + .else 186 + ALT_SMP(W(dmb)) 187 + .endif 193 188 #elif __LINUX_ARM_ARCH__ == 6 194 189 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb 195 190 #else 196 191 #error Incompatible SMP platform 197 192 #endif 193 + .ifeqs "\mode","arm" 198 194 ALT_UP(nop) 195 + .else 196 + ALT_UP(W(nop)) 197 + .endif 199 198 #endif 200 199 .endm 201 200 ··· 223 206 */ 224 207 #ifdef CONFIG_THUMB2_KERNEL 225 208 226 - .macro usraccoff, instr, reg, ptr, inc, off, cond, abort 209 + .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=T() 227 210 9999: 228 211 .if \inc == 1 229 - \instr\cond\()bt \reg, [\ptr, #\off] 212 + \instr\cond\()b\()\t\().w \reg, [\ptr, #\off] 230 213 .elseif \inc == 4 231 - \instr\cond\()t \reg, [\ptr, #\off] 214 + \instr\cond\()\t\().w \reg, [\ptr, #\off] 232 215 .else 233 216 .error "Unsupported inc macro argument" 234 217 .endif ··· 263 246 264 247 #else /* !CONFIG_THUMB2_KERNEL */ 265 248 266 - .macro usracc, instr, reg, ptr, inc, cond, rept, abort 249 + .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=T() 267 250 .rept \rept 268 251 9999: 269 252 .if \inc == 1 270 - \instr\cond\()bt \reg, [\ptr], #\inc 253 + \instr\cond\()b\()\t \reg, [\ptr], #\inc 271 254 .elseif \inc == 4 272 - \instr\cond\()t \reg, [\ptr], #\inc 255 + \instr\cond\()\t \reg, [\ptr], #\inc 273 256 .else 274 257 .error "Unsupported inc macro argument" 275 258 .endif
+2
arch/arm/include/asm/cache.h
··· 23 23 #define ARCH_SLAB_MINALIGN 8 24 24 #endif 25 25 26 + #define __read_mostly __attribute__((__section__(".data..read_mostly"))) 27 + 26 28 #endif
+6 -16
arch/arm/include/asm/clkdev.h
··· 12 12 #ifndef __ASM_CLKDEV_H 13 13 #define __ASM_CLKDEV_H 14 14 15 - struct clk; 16 - struct device; 15 + #include <linux/slab.h> 17 16 18 - struct clk_lookup { 19 - struct list_head node; 20 - const char *dev_id; 21 - const char *con_id; 22 - struct clk *clk; 23 - }; 17 + #include <mach/clkdev.h> 24 18 25 - struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, 26 - const char *dev_fmt, ...); 27 - 28 - void clkdev_add(struct clk_lookup *cl); 29 - void clkdev_drop(struct clk_lookup *cl); 30 - 31 - void clkdev_add_table(struct clk_lookup *, size_t); 32 - int clk_add_alias(const char *, const char *, char *, struct device *); 19 + static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size) 20 + { 21 + return kzalloc(size, GFP_KERNEL); 22 + } 33 23 34 24 #endif
+69 -24
arch/arm/include/asm/dma-mapping.h
··· 5 5 6 6 #include <linux/mm_types.h> 7 7 #include <linux/scatterlist.h> 8 + #include <linux/dma-debug.h> 8 9 9 10 #include <asm-generic/dma-coherent.h> 10 11 #include <asm/memory.h> 11 12 13 + #ifdef __arch_page_to_dma 14 + #error Please update to __arch_pfn_to_dma 15 + #endif 16 + 12 17 /* 13 - * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions 14 - * used internally by the DMA-mapping API to provide DMA addresses. They 15 - * must not be used by drivers. 18 + * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private 19 + * functions used internally by the DMA-mapping API to provide DMA 20 + * addresses. They must not be used by drivers. 16 21 */ 17 - #ifndef __arch_page_to_dma 18 - static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) 22 + #ifndef __arch_pfn_to_dma 23 + static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) 19 24 { 20 - return (dma_addr_t)__pfn_to_bus(page_to_pfn(page)); 25 + return (dma_addr_t)__pfn_to_bus(pfn); 21 26 } 22 27 23 - static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr) 28 + static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) 24 29 { 25 - return pfn_to_page(__bus_to_pfn(addr)); 30 + return __bus_to_pfn(addr); 26 31 } 27 32 28 33 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) ··· 40 35 return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); 41 36 } 42 37 #else 43 - static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) 38 + static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) 44 39 { 45 - return __arch_page_to_dma(dev, page); 40 + return __arch_pfn_to_dma(dev, pfn); 46 41 } 47 42 48 - static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr) 43 + static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) 49 44 { 50 - return __arch_dma_to_page(dev, addr); 45 + return __arch_dma_to_pfn(dev, addr); 51 46 } 52 47 53 48 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) ··· 298 293 /* 299 294 * The DMA API, implemented by dmabounce.c. See below for descriptions. 300 295 */ 301 - extern dma_addr_t dma_map_single(struct device *, void *, size_t, 296 + extern dma_addr_t __dma_map_single(struct device *, void *, size_t, 302 297 enum dma_data_direction); 303 - extern void dma_unmap_single(struct device *, dma_addr_t, size_t, 298 + extern void __dma_unmap_single(struct device *, dma_addr_t, size_t, 304 299 enum dma_data_direction); 305 - extern dma_addr_t dma_map_page(struct device *, struct page *, 300 + extern dma_addr_t __dma_map_page(struct device *, struct page *, 306 301 unsigned long, size_t, enum dma_data_direction); 307 - extern void dma_unmap_page(struct device *, dma_addr_t, size_t, 302 + extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, 308 303 enum dma_data_direction); 309 304 310 305 /* ··· 328 323 } 329 324 330 325 326 + static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr, 327 + size_t size, enum dma_data_direction dir) 328 + { 329 + __dma_single_cpu_to_dev(cpu_addr, size, dir); 330 + return virt_to_dma(dev, cpu_addr); 331 + } 332 + 333 + static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, 334 + unsigned long offset, size_t size, enum dma_data_direction dir) 335 + { 336 + __dma_page_cpu_to_dev(page, offset, size, dir); 337 + return pfn_to_dma(dev, page_to_pfn(page)) + offset; 338 + } 339 + 340 + static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle, 341 + size_t size, enum dma_data_direction dir) 342 + { 343 + __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); 344 + } 345 + 346 + static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, 347 + size_t size, enum dma_data_direction dir) 348 + { 349 + __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), 350 + handle & ~PAGE_MASK, size, dir); 351 + } 352 + #endif /* CONFIG_DMABOUNCE */ 353 + 331 354 /** 332 355 * dma_map_single - map a single buffer for streaming DMA 333 356 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices ··· 373 340 static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, 374 341 size_t size, enum dma_data_direction dir) 375 342 { 343 + dma_addr_t addr; 344 + 376 345 BUG_ON(!valid_dma_direction(dir)); 377 346 378 - __dma_single_cpu_to_dev(cpu_addr, size, dir); 347 + addr = __dma_map_single(dev, cpu_addr, size, dir); 348 + debug_dma_map_page(dev, virt_to_page(cpu_addr), 349 + (unsigned long)cpu_addr & ~PAGE_MASK, size, 350 + dir, addr, true); 379 351 380 - return virt_to_dma(dev, cpu_addr); 352 + return addr; 381 353 } 382 354 383 355 /** ··· 402 364 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 403 365 unsigned long offset, size_t size, enum dma_data_direction dir) 404 366 { 367 + dma_addr_t addr; 368 + 405 369 BUG_ON(!valid_dma_direction(dir)); 406 370 407 - __dma_page_cpu_to_dev(page, offset, size, dir); 371 + addr = __dma_map_page(dev, page, offset, size, dir); 372 + debug_dma_map_page(dev, page, offset, size, dir, addr, false); 408 373 409 - return page_to_dma(dev, page) + offset; 374 + return addr; 410 375 } 411 376 412 377 /** ··· 429 388 static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, 430 389 size_t size, enum dma_data_direction dir) 431 390 { 432 - __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); 391 + debug_dma_unmap_page(dev, handle, size, dir, true); 392 + __dma_unmap_single(dev, handle, size, dir); 433 393 } 434 394 435 395 /** ··· 450 408 static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, 451 409 size_t size, enum dma_data_direction dir) 452 410 { 453 - __dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK, 454 - size, dir); 411 + debug_dma_unmap_page(dev, handle, size, dir, false); 412 + __dma_unmap_page(dev, handle, size, dir); 455 413 } 456 - #endif /* CONFIG_DMABOUNCE */ 457 414 458 415 /** 459 416 * dma_sync_single_range_for_cpu ··· 478 437 { 479 438 BUG_ON(!valid_dma_direction(dir)); 480 439 440 + debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir); 441 + 481 442 if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) 482 443 return; 483 444 ··· 491 448 enum dma_data_direction dir) 492 449 { 493 450 BUG_ON(!valid_dma_direction(dir)); 451 + 452 + debug_dma_sync_single_for_device(dev, handle + offset, size, dir); 494 453 495 454 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) 496 455 return;
+29 -2
arch/arm/include/asm/domain.h
··· 45 45 */ 46 46 #define DOMAIN_NOACCESS 0 47 47 #define DOMAIN_CLIENT 1 48 + #ifdef CONFIG_CPU_USE_DOMAINS 48 49 #define DOMAIN_MANAGER 3 50 + #else 51 + #define DOMAIN_MANAGER 1 52 + #endif 49 53 50 54 #define domain_val(dom,type) ((type) << (2*(dom))) 51 55 52 56 #ifndef __ASSEMBLY__ 53 57 54 - #ifdef CONFIG_MMU 58 + #ifdef CONFIG_CPU_USE_DOMAINS 55 59 #define set_domain(x) \ 56 60 do { \ 57 61 __asm__ __volatile__( \ ··· 78 74 #define modify_domain(dom,type) do { } while (0) 79 75 #endif 80 76 77 + /* 78 + * Generate the T (user) versions of the LDR/STR and related 79 + * instructions (inline assembly) 80 + */ 81 + #ifdef CONFIG_CPU_USE_DOMAINS 82 + #define T(instr) #instr "t" 83 + #else 84 + #define T(instr) #instr 81 85 #endif 82 - #endif /* !__ASSEMBLY__ */ 86 + 87 + #else /* __ASSEMBLY__ */ 88 + 89 + /* 90 + * Generate the T (user) versions of the LDR/STR and related 91 + * instructions 92 + */ 93 + #ifdef CONFIG_CPU_USE_DOMAINS 94 + #define T(instr) instr ## t 95 + #else 96 + #define T(instr) instr 97 + #endif 98 + 99 + #endif /* __ASSEMBLY__ */ 100 + 101 + #endif /* !__ASM_PROC_DOMAIN_H */
+44
arch/arm/include/asm/entry-macro-multi.S
··· 1 + /* 2 + * Interrupt handling. Preserves r7, r8, r9 3 + */ 4 + .macro arch_irq_handler_default 5 + get_irqnr_preamble r5, lr 6 + 1: get_irqnr_and_base r0, r6, r5, lr 7 + movne r1, sp 8 + @ 9 + @ routine called with r0 = irq number, r1 = struct pt_regs * 10 + @ 11 + adrne lr, BSYM(1b) 12 + bne asm_do_IRQ 13 + 14 + #ifdef CONFIG_SMP 15 + /* 16 + * XXX 17 + * 18 + * this macro assumes that irqstat (r6) and base (r5) are 19 + * preserved from get_irqnr_and_base above 20 + */ 21 + ALT_SMP(test_for_ipi r0, r6, r5, lr) 22 + ALT_UP_B(9997f) 23 + movne r1, sp 24 + adrne lr, BSYM(1b) 25 + bne do_IPI 26 + 27 + #ifdef CONFIG_LOCAL_TIMERS 28 + test_for_ltirq r0, r6, r5, lr 29 + movne r0, sp 30 + adrne lr, BSYM(1b) 31 + bne do_local_timer 32 + #endif 33 + #endif 34 + 9997: 35 + .endm 36 + 37 + .macro arch_irq_handler, symbol_name 38 + .align 5 39 + .global \symbol_name 40 + \symbol_name: 41 + mov r4, lr 42 + arch_irq_handler_default 43 + mov pc, r4 44 + .endm
+5 -4
arch/arm/include/asm/futex.h
··· 13 13 #include <linux/preempt.h> 14 14 #include <linux/uaccess.h> 15 15 #include <asm/errno.h> 16 + #include <asm/domain.h> 16 17 17 18 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ 18 19 __asm__ __volatile__( \ 19 - "1: ldrt %1, [%2]\n" \ 20 + "1: " T(ldr) " %1, [%2]\n" \ 20 21 " " insn "\n" \ 21 - "2: strt %0, [%2]\n" \ 22 + "2: " T(str) " %0, [%2]\n" \ 22 23 " mov %0, #0\n" \ 23 24 "3:\n" \ 24 25 " .pushsection __ex_table,\"a\"\n" \ ··· 98 97 pagefault_disable(); /* implies preempt_disable() */ 99 98 100 99 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" 101 - "1: ldrt %0, [%3]\n" 100 + "1: " T(ldr) " %0, [%3]\n" 102 101 " teq %0, %1\n" 103 102 " it eq @ explicit IT needed for the 2b label\n" 104 - "2: streqt %2, [%3]\n" 103 + "2: " T(streq) " %2, [%3]\n" 105 104 "3:\n" 106 105 " .pushsection __ex_table,\"a\"\n" 107 106 " .align 3\n"
+18
arch/arm/include/asm/hardirq.h
··· 5 5 #include <linux/threads.h> 6 6 #include <asm/irq.h> 7 7 8 + #define NR_IPI 5 9 + 8 10 typedef struct { 9 11 unsigned int __softirq_pending; 12 + #ifdef CONFIG_LOCAL_TIMERS 10 13 unsigned int local_timer_irqs; 14 + #endif 15 + #ifdef CONFIG_SMP 16 + unsigned int ipi_irqs[NR_IPI]; 17 + #endif 11 18 } ____cacheline_aligned irq_cpustat_t; 12 19 13 20 #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ 21 + 22 + #define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++ 23 + #define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member) 24 + 25 + #ifdef CONFIG_SMP 26 + u64 smp_irq_stat_cpu(unsigned int cpu); 27 + #else 28 + #define smp_irq_stat_cpu(cpu) 0 29 + #endif 30 + 31 + #define arch_irq_stat_cpu smp_irq_stat_cpu 14 32 15 33 #if NR_IRQS > 512 16 34 #define HARDIRQ_BITS 10
-12
arch/arm/include/asm/localtimer.h
··· 30 30 #include "smp_twd.h" 31 31 32 32 #define local_timer_ack() twd_timer_ack() 33 - #define local_timer_stop() twd_timer_stop() 34 33 35 34 #else 36 35 ··· 39 40 */ 40 41 int local_timer_ack(void); 41 42 42 - /* 43 - * Stop a local timer interrupt. 44 - */ 45 - void local_timer_stop(void); 46 - 47 43 #endif 48 44 49 45 /* 50 46 * Setup a local timer interrupt for a CPU. 51 47 */ 52 48 void local_timer_setup(struct clock_event_device *); 53 - 54 - #else 55 - 56 - static inline void local_timer_stop(void) 57 - { 58 - } 59 49 60 50 #endif 61 51
+9
arch/arm/include/asm/mach/arch.h
··· 37 37 struct meminfo *); 38 38 void (*reserve)(void);/* reserve mem blocks */ 39 39 void (*map_io)(void);/* IO mapping function */ 40 + void (*init_early)(void); 40 41 void (*init_irq)(void); 41 42 struct sys_timer *timer; /* system tick timer */ 42 43 void (*init_machine)(void); 44 + #ifdef CONFIG_MULTI_IRQ_HANDLER 45 + void (*handle_irq)(struct pt_regs *); 46 + #endif 43 47 }; 48 + 49 + /* 50 + * Current machine - only accessible during boot. 51 + */ 52 + extern struct machine_desc *machine_desc; 44 53 45 54 /* 46 55 * Set of macros to define architecture features. This is built into
+5 -3
arch/arm/include/asm/mach/irq.h
··· 17 17 /* 18 18 * This is internal. Do not use it. 19 19 */ 20 - extern unsigned int arch_nr_irqs; 21 - extern void (*init_arch_irq)(void); 22 20 extern void init_FIQ(void); 23 - extern int show_fiq_list(struct seq_file *, void *); 21 + extern int show_fiq_list(struct seq_file *, int); 22 + 23 + #ifdef CONFIG_MULTI_IRQ_HANDLER 24 + extern void (*handle_arch_irq)(struct pt_regs *); 25 + #endif 24 26 25 27 /* 26 28 * This is for easy migration, but should be changed in the source
-1
arch/arm/include/asm/mach/time.h
··· 43 43 #endif 44 44 }; 45 45 46 - extern struct sys_timer *system_timer; 47 46 extern void timer_tick(void); 48 47 49 48 #endif
+9 -8
arch/arm/include/asm/smp.h
··· 33 33 /* 34 34 * generate IPI list text 35 35 */ 36 - extern void show_ipi_list(struct seq_file *p); 36 + extern void show_ipi_list(struct seq_file *, int); 37 37 38 38 /* 39 39 * Called from assembly code, this handles an IPI. 40 40 */ 41 - asmlinkage void do_IPI(struct pt_regs *regs); 41 + asmlinkage void do_IPI(int ipinr, struct pt_regs *regs); 42 42 43 43 /* 44 44 * Setup the set of possible CPUs (via set_cpu_possible) 45 45 */ 46 46 extern void smp_init_cpus(void); 47 47 48 - /* 49 - * Move global data into per-processor storage. 50 - */ 51 - extern void smp_store_cpu_info(unsigned int cpuid); 52 48 53 49 /* 54 50 * Raise an IPI cross call on CPUs in callmap. 55 51 */ 56 - extern void smp_cross_call(const struct cpumask *mask); 52 + extern void smp_cross_call(const struct cpumask *mask, int ipi); 57 53 58 54 /* 59 55 * Boot a secondary CPU, and assign it the specified idle task. ··· 67 71 * Perform platform specific initialisation of the specified CPU. 68 72 */ 69 73 extern void platform_secondary_init(unsigned int cpu); 74 + 75 + /* 76 + * Initialize cpu_possible map, and enable coherency 77 + */ 78 + extern void platform_smp_prepare_cpus(unsigned int); 70 79 71 80 /* 72 81 * Initial data for bringing up a secondary CPU. ··· 98 97 /* 99 98 * show local interrupt info 100 99 */ 101 - extern void show_local_irqs(struct seq_file *); 100 + extern void show_local_irqs(struct seq_file *, int); 102 101 103 102 #endif /* ifndef __ASM_ARM_SMP_H */
-17
arch/arm/include/asm/smp_mpidr.h
··· 1 - #ifndef ASMARM_SMP_MIDR_H 2 - #define ASMARM_SMP_MIDR_H 3 - 4 - #define hard_smp_processor_id() \ 5 - ({ \ 6 - unsigned int cpunum; \ 7 - __asm__("\n" \ 8 - "1: mrc p15, 0, %0, c0, c0, 5\n" \ 9 - " .pushsection \".alt.smp.init\", \"a\"\n"\ 10 - " .long 1b\n" \ 11 - " mov %0, #0\n" \ 12 - " .popsection" \ 13 - : "=r" (cpunum)); \ 14 - cpunum &= 0x0F; \ 15 - }) 16 - 17 - #endif
-1
arch/arm/include/asm/smp_twd.h
··· 22 22 23 23 extern void __iomem *twd_base; 24 24 25 - void twd_timer_stop(void); 26 25 int twd_timer_ack(void); 27 26 void twd_timer_setup(struct clock_event_device *); 28 27
+7
arch/arm/include/asm/system.h
··· 124 124 #define vectors_high() (0) 125 125 #endif 126 126 127 + #if __LINUX_ARM_ARCH__ >= 7 || \ 128 + (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) 129 + #define sev() __asm__ __volatile__ ("sev" : : : "memory") 130 + #define wfe() __asm__ __volatile__ ("wfe" : : : "memory") 131 + #define wfi() __asm__ __volatile__ ("wfi" : : : "memory") 132 + #endif 133 + 127 134 #if __LINUX_ARM_ARCH__ >= 7 128 135 #define isb() __asm__ __volatile__ ("isb" : : : "memory") 129 136 #define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
+2
arch/arm/include/asm/traps.h
··· 46 46 extern void __init early_trap_init(void); 47 47 extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame); 48 48 49 + extern void *vectors_page; 50 + 49 51 #endif
+8 -8
arch/arm/include/asm/uaccess.h
··· 227 227 228 228 #define __get_user_asm_byte(x,addr,err) \ 229 229 __asm__ __volatile__( \ 230 - "1: ldrbt %1,[%2]\n" \ 230 + "1: " T(ldrb) " %1,[%2],#0\n" \ 231 231 "2:\n" \ 232 232 " .pushsection .fixup,\"ax\"\n" \ 233 233 " .align 2\n" \ ··· 263 263 264 264 #define __get_user_asm_word(x,addr,err) \ 265 265 __asm__ __volatile__( \ 266 - "1: ldrt %1,[%2]\n" \ 266 + "1: " T(ldr) " %1,[%2],#0\n" \ 267 267 "2:\n" \ 268 268 " .pushsection .fixup,\"ax\"\n" \ 269 269 " .align 2\n" \ ··· 308 308 309 309 #define __put_user_asm_byte(x,__pu_addr,err) \ 310 310 __asm__ __volatile__( \ 311 - "1: strbt %1,[%2]\n" \ 311 + "1: " T(strb) " %1,[%2],#0\n" \ 312 312 "2:\n" \ 313 313 " .pushsection .fixup,\"ax\"\n" \ 314 314 " .align 2\n" \ ··· 341 341 342 342 #define __put_user_asm_word(x,__pu_addr,err) \ 343 343 __asm__ __volatile__( \ 344 - "1: strt %1,[%2]\n" \ 344 + "1: " T(str) " %1,[%2],#0\n" \ 345 345 "2:\n" \ 346 346 " .pushsection .fixup,\"ax\"\n" \ 347 347 " .align 2\n" \ ··· 366 366 367 367 #define __put_user_asm_dword(x,__pu_addr,err) \ 368 368 __asm__ __volatile__( \ 369 - ARM( "1: strt " __reg_oper1 ", [%1], #4\n" ) \ 370 - ARM( "2: strt " __reg_oper0 ", [%1]\n" ) \ 371 - THUMB( "1: strt " __reg_oper1 ", [%1]\n" ) \ 372 - THUMB( "2: strt " __reg_oper0 ", [%1, #4]\n" ) \ 369 + ARM( "1: " T(str) " " __reg_oper1 ", [%1], #4\n" ) \ 370 + ARM( "2: " T(str) " " __reg_oper0 ", [%1]\n" ) \ 371 + THUMB( "1: " T(str) " " __reg_oper1 ", [%1]\n" ) \ 372 + THUMB( "2: " T(str) " " __reg_oper0 ", [%1, #4]\n" ) \ 373 373 "3:\n" \ 374 374 " .pushsection .fixup,\"ax\"\n" \ 375 375 " .align 2\n" \
+3 -1
arch/arm/kernel/Makefile
··· 30 30 obj-$(CONFIG_ISA_DMA) += dma-isa.o 31 31 obj-$(CONFIG_PCI) += bios32.o isa.o 32 32 obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o 33 - obj-$(CONFIG_SMP) += smp.o 33 + obj-$(CONFIG_SMP) += smp.o smp_tlb.o 34 34 obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o 35 35 obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o 36 36 obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o ··· 44 44 obj-$(CONFIG_ARM_UNWIND) += unwind.o 45 45 obj-$(CONFIG_HAVE_TCM) += tcm.o 46 46 obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 47 + obj-$(CONFIG_SWP_EMULATE) += swp_emulate.o 48 + CFLAGS_swp_emulate.o := -Wa,-march=armv7-a 47 49 obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o 48 50 49 51 obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
+19 -33
arch/arm/kernel/entry-armv.S
··· 25 25 #include <asm/tls.h> 26 26 27 27 #include "entry-header.S" 28 + #include <asm/entry-macro-multi.S> 28 29 29 30 /* 30 31 * Interrupt handling. Preserves r7, r8, r9 31 32 */ 32 33 .macro irq_handler 33 - get_irqnr_preamble r5, lr 34 - 1: get_irqnr_and_base r0, r6, r5, lr 35 - movne r1, sp 36 - @ 37 - @ routine called with r0 = irq number, r1 = struct pt_regs * 38 - @ 39 - adrne lr, BSYM(1b) 40 - bne asm_do_IRQ 41 - 42 - #ifdef CONFIG_SMP 43 - /* 44 - * XXX 45 - * 46 - * this macro assumes that irqstat (r6) and base (r5) are 47 - * preserved from get_irqnr_and_base above 48 - */ 49 - ALT_SMP(test_for_ipi r0, r6, r5, lr) 50 - ALT_UP_B(9997f) 51 - movne r0, sp 52 - adrne lr, BSYM(1b) 53 - bne do_IPI 54 - 55 - #ifdef CONFIG_LOCAL_TIMERS 56 - test_for_ltirq r0, r6, r5, lr 57 - movne r0, sp 58 - adrne lr, BSYM(1b) 59 - bne do_local_timer 34 + #ifdef CONFIG_MULTI_IRQ_HANDLER 35 + ldr r5, =handle_arch_irq 36 + mov r0, sp 37 + ldr r5, [r5] 38 + adr lr, BSYM(9997f) 39 + teq r5, #0 40 + movne pc, r5 60 41 #endif 42 + arch_irq_handler_default 61 43 9997: 62 - #endif 63 - 64 44 .endm 65 45 66 46 #ifdef CONFIG_KPROBES ··· 715 735 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack 716 736 THUMB( str sp, [ip], #4 ) 717 737 THUMB( str lr, [ip], #4 ) 718 - #ifdef CONFIG_MMU 738 + #ifdef CONFIG_CPU_USE_DOMAINS 719 739 ldr r6, [r2, #TI_CPU_DOMAIN] 720 740 #endif 721 741 set_tls r3, r4, r5 ··· 724 744 ldr r8, =__stack_chk_guard 725 745 ldr r7, [r7, #TSK_STACK_CANARY] 726 746 #endif 727 - #ifdef CONFIG_MMU 747 + #ifdef CONFIG_CPU_USE_DOMAINS 728 748 mcr p15, 0, r6, c3, c0, 0 @ Set domain register 729 749 #endif 730 750 mov r5, r0 ··· 822 842 */ 823 843 824 844 __kuser_memory_barrier: @ 0xffff0fa0 825 - smp_dmb 845 + smp_dmb arm 826 846 usr_ret lr 827 847 828 848 .align 5 ··· 939 959 940 960 #else 941 961 942 - smp_dmb 962 + smp_dmb arm 943 963 1: ldrex r3, [r2] 944 964 subs r3, r3, r0 945 965 strexeq r3, r1, [r2] ··· 1225 1245 .space 4 1226 1246 cr_no_alignment: 1227 1247 .space 4 1248 + 1249 + #ifdef CONFIG_MULTI_IRQ_HANDLER 1250 + .globl handle_arch_irq 1251 + handle_arch_irq: 1252 + .space 4 1253 + #endif
+8 -2
arch/arm/kernel/fiq.c
··· 45 45 #include <asm/fiq.h> 46 46 #include <asm/irq.h> 47 47 #include <asm/system.h> 48 + #include <asm/traps.h> 48 49 49 50 static unsigned long no_fiq_insn; 50 51 ··· 68 67 69 68 static struct fiq_handler *current_fiq = &default_owner; 70 69 71 - int show_fiq_list(struct seq_file *p, void *v) 70 + int show_fiq_list(struct seq_file *p, int prec) 72 71 { 73 72 if (current_fiq != &default_owner) 74 - seq_printf(p, "FIQ: %s\n", current_fiq->name); 73 + seq_printf(p, "%*s: %s\n", prec, "FIQ", 74 + current_fiq->name); 75 75 76 76 return 0; 77 77 } 78 78 79 79 void set_fiq_handler(void *start, unsigned int length) 80 80 { 81 + #if defined(CONFIG_CPU_USE_DOMAINS) 81 82 memcpy((void *)0xffff001c, start, length); 83 + #else 84 + memcpy(vectors_page + 0x1c, start, length); 85 + #endif 82 86 flush_icache_range(0xffff001c, 0xffff001c + length); 83 87 if (!vectors_high()) 84 88 flush_icache_range(0x1c, 0x1c + length);
+31 -19
arch/arm/kernel/head.S
··· 91 91 movs r8, r5 @ invalid machine (r5=0)? 92 92 THUMB( it eq ) @ force fixup-able long branch encoding 93 93 beq __error_a @ yes, error 'a' 94 + 95 + /* 96 + * r1 = machine no, r2 = atags, 97 + * r8 = machinfo, r9 = cpuid, r10 = procinfo 98 + */ 94 99 bl __vet_atags 95 100 #ifdef CONFIG_SMP_ON_UP 96 101 bl __fixup_smp ··· 392 387 393 388 #ifdef CONFIG_SMP_ON_UP 394 389 __fixup_smp: 395 - mov r7, #0x00070000 396 - orr r6, r7, #0xff000000 @ mask 0xff070000 397 - orr r7, r7, #0x41000000 @ val 0x41070000 398 - and r0, r9, r6 399 - teq r0, r7 @ ARM CPU and ARMv6/v7? 390 + mov r4, #0x00070000 391 + orr r3, r4, #0xff000000 @ mask 0xff070000 392 + orr r4, r4, #0x41000000 @ val 0x41070000 393 + and r0, r9, r3 394 + teq r0, r4 @ ARM CPU and ARMv6/v7? 400 395 bne __fixup_smp_on_up @ no, assume UP 401 396 402 - orr r6, r6, #0x0000ff00 403 - orr r6, r6, #0x000000f0 @ mask 0xff07fff0 404 - orr r7, r7, #0x0000b000 405 - orr r7, r7, #0x00000020 @ val 0x4107b020 406 - and r0, r9, r6 407 - teq r0, r7 @ ARM 11MPCore? 397 + orr r3, r3, #0x0000ff00 398 + orr r3, r3, #0x000000f0 @ mask 0xff07fff0 399 + orr r4, r4, #0x0000b000 400 + orr r4, r4, #0x00000020 @ val 0x4107b020 401 + and r0, r9, r3 402 + teq r0, r4 @ ARM 11MPCore? 408 403 moveq pc, lr @ yes, assume SMP 409 404 410 405 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR ··· 413 408 414 409 __fixup_smp_on_up: 415 410 adr r0, 1f 416 - ldmia r0, {r3, r6, r7} 411 + ldmia r0, {r3 - r5} 417 412 sub r3, r0, r3 418 - add r6, r6, r3 419 - add r7, r7, r3 420 - 2: cmp r6, r7 421 - ldmia r6!, {r0, r4} 422 - strlo r4, [r0, r3] 423 - blo 2b 424 - mov pc, lr 413 + add r4, r4, r3 414 + add r5, r5, r3 415 + 2: cmp r4, r5 416 + movhs pc, lr 417 + ldmia r4!, {r0, r6} 418 + ARM( str r6, [r0, r3] ) 419 + THUMB( add r0, r0, r3 ) 420 + #ifdef __ARMEB__ 421 + THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian. 422 + #endif 423 + THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords 424 + THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3. 425 + THUMB( strh r6, [r0] ) 426 + b 2b 425 427 ENDPROC(__fixup_smp) 426 428 427 429 .align
+20 -10
arch/arm/kernel/irq.c
··· 38 38 #include <linux/ftrace.h> 39 39 40 40 #include <asm/system.h> 41 + #include <asm/mach/arch.h> 41 42 #include <asm/mach/irq.h> 42 43 #include <asm/mach/time.h> 43 44 ··· 49 48 #define irq_finish(irq) do { } while (0) 50 49 #endif 51 50 52 - unsigned int arch_nr_irqs; 53 - void (*init_arch_irq)(void) __initdata = NULL; 54 51 unsigned long irq_err_count; 55 52 56 53 int show_interrupts(struct seq_file *p, void *v) ··· 57 58 struct irq_desc *desc; 58 59 struct irqaction * action; 59 60 unsigned long flags; 61 + int prec, n; 62 + 63 + for (prec = 3, n = 1000; prec < 10 && n <= nr_irqs; prec++) 64 + n *= 10; 65 + 66 + #ifdef CONFIG_SMP 67 + if (prec < 4) 68 + prec = 4; 69 + #endif 60 70 61 71 if (i == 0) { 62 72 char cpuname[12]; 63 73 64 - seq_printf(p, " "); 74 + seq_printf(p, "%*s ", prec, ""); 65 75 for_each_present_cpu(cpu) { 66 76 sprintf(cpuname, "CPU%d", cpu); 67 77 seq_printf(p, " %10s", cpuname); ··· 85 77 if (!action) 86 78 goto unlock; 87 79 88 - seq_printf(p, "%3d: ", i); 80 + seq_printf(p, "%*d: ", prec, i); 89 81 for_each_present_cpu(cpu) 90 82 seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); 91 83 seq_printf(p, " %10s", desc->chip->name ? : "-"); ··· 98 90 raw_spin_unlock_irqrestore(&desc->lock, flags); 99 91 } else if (i == nr_irqs) { 100 92 #ifdef CONFIG_FIQ 101 - show_fiq_list(p, v); 93 + show_fiq_list(p, prec); 102 94 #endif 103 95 #ifdef CONFIG_SMP 104 - show_ipi_list(p); 105 - show_local_irqs(p); 96 + show_ipi_list(p, prec); 106 97 #endif 107 - seq_printf(p, "Err: %10lu\n", irq_err_count); 98 + #ifdef CONFIG_LOCAL_TIMERS 99 + show_local_irqs(p, prec); 100 + #endif 101 + seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); 108 102 } 109 103 return 0; 110 104 } ··· 166 156 167 157 void __init init_IRQ(void) 168 158 { 169 - init_arch_irq(); 159 + machine_desc->init_irq(); 170 160 } 171 161 172 162 #ifdef CONFIG_SPARSE_IRQ 173 163 int __init arch_probe_nr_irqs(void) 174 164 { 175 - nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS; 165 + nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS; 176 166 return nr_irqs; 177 167 } 178 168 #endif
+18 -19
arch/arm/kernel/setup.c
··· 75 75 76 76 unsigned int processor_id; 77 77 EXPORT_SYMBOL(processor_id); 78 - unsigned int __machine_arch_type; 78 + unsigned int __machine_arch_type __read_mostly; 79 79 EXPORT_SYMBOL(__machine_arch_type); 80 - unsigned int cacheid; 80 + unsigned int cacheid __read_mostly; 81 81 EXPORT_SYMBOL(cacheid); 82 82 83 83 unsigned int __atags_pointer __initdata; ··· 91 91 unsigned int system_serial_high; 92 92 EXPORT_SYMBOL(system_serial_high); 93 93 94 - unsigned int elf_hwcap; 94 + unsigned int elf_hwcap __read_mostly; 95 95 EXPORT_SYMBOL(elf_hwcap); 96 96 97 97 98 98 #ifdef MULTI_CPU 99 - struct processor processor; 99 + struct processor processor __read_mostly; 100 100 #endif 101 101 #ifdef MULTI_TLB 102 - struct cpu_tlb_fns cpu_tlb; 102 + struct cpu_tlb_fns cpu_tlb __read_mostly; 103 103 #endif 104 104 #ifdef MULTI_USER 105 - struct cpu_user_fns cpu_user; 105 + struct cpu_user_fns cpu_user __read_mostly; 106 106 #endif 107 107 #ifdef MULTI_CACHE 108 - struct cpu_cache_fns cpu_cache; 108 + struct cpu_cache_fns cpu_cache __read_mostly; 109 109 #endif 110 110 #ifdef CONFIG_OUTER_CACHE 111 - struct outer_cache_fns outer_cache; 111 + struct outer_cache_fns outer_cache __read_mostly; 112 112 EXPORT_SYMBOL(outer_cache); 113 113 #endif 114 114 ··· 126 126 static const char *cpu_name; 127 127 static const char *machine_name; 128 128 static char __initdata cmd_line[COMMAND_LINE_SIZE]; 129 + struct machine_desc *machine_desc __initdata; 129 130 130 131 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; 131 132 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; ··· 709 708 { 0, ATAG_NONE } 710 709 }; 711 710 712 - static void (*init_machine)(void) __initdata; 713 - 714 711 static int __init customize_machine(void) 715 712 { 716 713 /* customizes platform devices, or adds new ones */ 717 - if (init_machine) 718 - init_machine(); 714 + if (machine_desc->init_machine) 715 + machine_desc->init_machine(); 719 716 return 0; 720 717 } 721 718 arch_initcall(customize_machine); ··· 808 809 809 810 setup_processor(); 810 811 mdesc = setup_machine(machine_arch_type); 812 + machine_desc = mdesc; 811 813 machine_name = mdesc->name; 812 814 813 815 if (mdesc->soft_reboot) ··· 868 868 cpu_init(); 869 869 tcm_init(); 870 870 871 - /* 872 - * Set up various architecture-specific pointers 873 - */ 874 - arch_nr_irqs = mdesc->nr_irqs; 875 - init_arch_irq = mdesc->init_irq; 876 - system_timer = mdesc->timer; 877 - init_machine = mdesc->init_machine; 871 + #ifdef CONFIG_MULTI_IRQ_HANDLER 872 + handle_arch_irq = mdesc->handle_irq; 873 + #endif 878 874 879 875 #ifdef CONFIG_VT 880 876 #if defined(CONFIG_VGA_CONSOLE) ··· 880 884 #endif 881 885 #endif 882 886 early_trap_init(); 887 + 888 + if (mdesc->init_early) 889 + mdesc->init_early(); 883 890 } 884 891 885 892
+158 -253
arch/arm/kernel/smp.c
··· 25 25 #include <linux/irq.h> 26 26 #include <linux/percpu.h> 27 27 #include <linux/clockchips.h> 28 + #include <linux/completion.h> 28 29 29 30 #include <asm/atomic.h> 30 31 #include <asm/cacheflush.h> ··· 39 38 #include <asm/tlbflush.h> 40 39 #include <asm/ptrace.h> 41 40 #include <asm/localtimer.h> 42 - #include <asm/smp_plat.h> 43 41 44 42 /* 45 43 * as from 2.5, kernels no longer have an init_tasks structure ··· 47 47 */ 48 48 struct secondary_data secondary_data; 49 49 50 - /* 51 - * structures for inter-processor calls 52 - * - A collection of single bit ipi messages. 53 - */ 54 - struct ipi_data { 55 - spinlock_t lock; 56 - unsigned long ipi_count; 57 - unsigned long bits; 58 - }; 59 - 60 - static DEFINE_PER_CPU(struct ipi_data, ipi_data) = { 61 - .lock = SPIN_LOCK_UNLOCKED, 62 - }; 63 - 64 50 enum ipi_msg_type { 65 - IPI_TIMER, 51 + IPI_TIMER = 2, 66 52 IPI_RESCHEDULE, 67 53 IPI_CALL_FUNC, 68 54 IPI_CALL_FUNC_SINGLE, ··· 164 178 barrier(); 165 179 } 166 180 167 - if (!cpu_online(cpu)) 181 + if (!cpu_online(cpu)) { 182 + pr_crit("CPU%u: failed to come online\n", cpu); 168 183 ret = -EIO; 184 + } 185 + } else { 186 + pr_err("CPU%u: failed to boot: %d\n", cpu, ret); 169 187 } 170 188 171 189 secondary_data.stack = NULL; ··· 185 195 186 196 pgd_free(&init_mm, pgd); 187 197 188 - if (ret) { 189 - printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu); 190 - 191 - /* 192 - * FIXME: We need to clean up the new idle thread. --rmk 193 - */ 194 - } 195 - 196 198 return ret; 197 199 } 198 200 199 201 #ifdef CONFIG_HOTPLUG_CPU 202 + static void percpu_timer_stop(void); 203 + 200 204 /* 201 205 * __cpu_disable runs on the processor to be shutdown. 202 206 */ ··· 218 234 /* 219 235 * Stop the local timer for this CPU. 220 236 */ 221 - local_timer_stop(); 237 + percpu_timer_stop(); 222 238 223 239 /* 224 240 * Flush user cache and TLB mappings, and then remove this CPU ··· 237 253 return 0; 238 254 } 239 255 256 + static DECLARE_COMPLETION(cpu_died); 257 + 240 258 /* 241 259 * called on the thread which is asking for a CPU to be shutdown - 242 260 * waits until shutdown has completed, or it is timed out. 243 261 */ 244 262 void __cpu_die(unsigned int cpu) 245 263 { 264 + if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { 265 + pr_err("CPU%u: cpu didn't die\n", cpu); 266 + return; 267 + } 268 + printk(KERN_NOTICE "CPU%u: shutdown\n", cpu); 269 + 246 270 if (!platform_cpu_kill(cpu)) 247 271 printk("CPU%u: unable to kill\n", cpu); 248 272 } ··· 267 275 { 268 276 unsigned int cpu = smp_processor_id(); 269 277 270 - local_irq_disable(); 271 278 idle_task_exit(); 279 + 280 + local_irq_disable(); 281 + mb(); 282 + 283 + /* Tell __cpu_die() that this CPU is now safe to dispose of */ 284 + complete(&cpu_died); 272 285 273 286 /* 274 287 * actual CPU shutdown procedure is at least platform (if not 275 - * CPU) specific 288 + * CPU) specific. 276 289 */ 277 290 platform_cpu_die(cpu); 278 291 ··· 287 290 * to be repeated to undo the effects of taking the CPU offline. 288 291 */ 289 292 __asm__("mov sp, %0\n" 293 + " mov fp, #0\n" 290 294 " b secondary_start_kernel" 291 295 : 292 296 : "r" (task_stack_page(current) + THREAD_SIZE - 8)); 293 297 } 294 298 #endif /* CONFIG_HOTPLUG_CPU */ 299 + 300 + /* 301 + * Called by both boot and secondaries to move global data into 302 + * per-processor storage. 303 + */ 304 + static void __cpuinit smp_store_cpu_info(unsigned int cpuid) 305 + { 306 + struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); 307 + 308 + cpu_info->loops_per_jiffy = loops_per_jiffy; 309 + } 295 310 296 311 /* 297 312 * This is the secondary CPU boot entry. We're using this CPUs ··· 329 320 330 321 cpu_init(); 331 322 preempt_disable(); 323 + trace_hardirqs_off(); 332 324 333 325 /* 334 326 * Give the platform a chance to do its own initialisation. ··· 363 353 cpu_idle(); 364 354 } 365 355 366 - /* 367 - * Called by both boot and secondaries to move global data into 368 - * per-processor storage. 369 - */ 370 - void __cpuinit smp_store_cpu_info(unsigned int cpuid) 371 - { 372 - struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); 373 - 374 - cpu_info->loops_per_jiffy = loops_per_jiffy; 375 - } 376 - 377 356 void __init smp_cpus_done(unsigned int max_cpus) 378 357 { 379 358 int cpu; ··· 385 386 per_cpu(cpu_data, cpu).idle = current; 386 387 } 387 388 388 - static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg) 389 + void __init smp_prepare_cpus(unsigned int max_cpus) 389 390 { 390 - unsigned long flags; 391 - unsigned int cpu; 391 + unsigned int ncores = num_possible_cpus(); 392 392 393 - local_irq_save(flags); 394 - 395 - for_each_cpu(cpu, mask) { 396 - struct ipi_data *ipi = &per_cpu(ipi_data, cpu); 397 - 398 - spin_lock(&ipi->lock); 399 - ipi->bits |= 1 << msg; 400 - spin_unlock(&ipi->lock); 401 - } 393 + smp_store_cpu_info(smp_processor_id()); 402 394 403 395 /* 404 - * Call the platform specific cross-CPU call function. 396 + * are we trying to boot more cores than exist? 405 397 */ 406 - smp_cross_call(mask); 398 + if (max_cpus > ncores) 399 + max_cpus = ncores; 407 400 408 - local_irq_restore(flags); 401 + if (max_cpus > 1) { 402 + /* 403 + * Enable the local timer or broadcast device for the 404 + * boot CPU, but only if we have more than one CPU. 405 + */ 406 + percpu_timer_setup(); 407 + 408 + /* 409 + * Initialise the SCU if there are more than one CPU 410 + * and let them know where to start. 411 + */ 412 + platform_smp_prepare_cpus(max_cpus); 413 + } 409 414 } 410 415 411 416 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 412 417 { 413 - send_ipi_message(mask, IPI_CALL_FUNC); 418 + smp_cross_call(mask, IPI_CALL_FUNC); 414 419 } 415 420 416 421 void arch_send_call_function_single_ipi(int cpu) 417 422 { 418 - send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); 423 + smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); 419 424 } 420 425 421 - void show_ipi_list(struct seq_file *p) 426 + static const char *ipi_types[NR_IPI] = { 427 + #define S(x,s) [x - IPI_TIMER] = s 428 + S(IPI_TIMER, "Timer broadcast interrupts"), 429 + S(IPI_RESCHEDULE, "Rescheduling interrupts"), 430 + S(IPI_CALL_FUNC, "Function call interrupts"), 431 + S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), 432 + S(IPI_CPU_STOP, "CPU stop interrupts"), 433 + }; 434 + 435 + void show_ipi_list(struct seq_file *p, int prec) 422 436 { 423 - unsigned int cpu; 437 + unsigned int cpu, i; 424 438 425 - seq_puts(p, "IPI:"); 439 + for (i = 0; i < NR_IPI; i++) { 440 + seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); 426 441 427 - for_each_present_cpu(cpu) 428 - seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); 442 + for_each_present_cpu(cpu) 443 + seq_printf(p, "%10u ", 444 + __get_irq_stat(cpu, ipi_irqs[i])); 429 445 430 - seq_putc(p, '\n'); 446 + seq_printf(p, " %s\n", ipi_types[i]); 447 + } 431 448 } 432 449 433 - void show_local_irqs(struct seq_file *p) 450 + u64 smp_irq_stat_cpu(unsigned int cpu) 434 451 { 435 - unsigned int cpu; 452 + u64 sum = 0; 453 + int i; 436 454 437 - seq_printf(p, "LOC: "); 455 + for (i = 0; i < NR_IPI; i++) 456 + sum += __get_irq_stat(cpu, ipi_irqs[i]); 438 457 439 - for_each_present_cpu(cpu) 440 - seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs); 458 + #ifdef CONFIG_LOCAL_TIMERS 459 + sum += __get_irq_stat(cpu, local_timer_irqs); 460 + #endif 441 461 442 - seq_putc(p, '\n'); 462 + return sum; 443 463 } 444 464 445 465 /* ··· 481 463 int cpu = smp_processor_id(); 482 464 483 465 if (local_timer_ack()) { 484 - irq_stat[cpu].local_timer_irqs++; 466 + __inc_irq_stat(cpu, local_timer_irqs); 485 467 ipi_timer(); 486 468 } 487 469 488 470 set_irq_regs(old_regs); 471 + } 472 + 473 + void show_local_irqs(struct seq_file *p, int prec) 474 + { 475 + unsigned int cpu; 476 + 477 + seq_printf(p, "%*s: ", prec, "LOC"); 478 + 479 + for_each_present_cpu(cpu) 480 + seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs)); 481 + 482 + seq_printf(p, " Local timer interrupts\n"); 489 483 } 490 484 #endif 491 485 492 486 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 493 487 static void smp_timer_broadcast(const struct cpumask *mask) 494 488 { 495 - send_ipi_message(mask, IPI_TIMER); 489 + smp_cross_call(mask, IPI_TIMER); 496 490 } 497 491 #else 498 492 #define smp_timer_broadcast NULL ··· 541 511 local_timer_setup(evt); 542 512 } 543 513 514 + #ifdef CONFIG_HOTPLUG_CPU 515 + /* 516 + * The generic clock events code purposely does not stop the local timer 517 + * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it 518 + * manually here. 519 + */ 520 + static void percpu_timer_stop(void) 521 + { 522 + unsigned int cpu = smp_processor_id(); 523 + struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); 524 + 525 + evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); 526 + } 527 + #endif 528 + 544 529 static DEFINE_SPINLOCK(stop_lock); 545 530 546 531 /* ··· 582 537 583 538 /* 584 539 * Main handler for inter-processor interrupts 585 - * 586 - * For ARM, the ipimask now only identifies a single 587 - * category of IPI (Bit 1 IPIs have been replaced by a 588 - * different mechanism): 589 - * 590 - * Bit 0 - Inter-processor function call 591 540 */ 592 - asmlinkage void __exception_irq_entry do_IPI(struct pt_regs *regs) 541 + asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) 593 542 { 594 543 unsigned int cpu = smp_processor_id(); 595 - struct ipi_data *ipi = &per_cpu(ipi_data, cpu); 596 544 struct pt_regs *old_regs = set_irq_regs(regs); 597 545 598 - ipi->ipi_count++; 546 + if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI) 547 + __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]); 599 548 600 - for (;;) { 601 - unsigned long msgs; 549 + switch (ipinr) { 550 + case IPI_TIMER: 551 + ipi_timer(); 552 + break; 602 553 603 - spin_lock(&ipi->lock); 604 - msgs = ipi->bits; 605 - ipi->bits = 0; 606 - spin_unlock(&ipi->lock); 554 + case IPI_RESCHEDULE: 555 + /* 556 + * nothing more to do - eveything is 557 + * done on the interrupt return path 558 + */ 559 + break; 607 560 608 - if (!msgs) 609 - break; 561 + case IPI_CALL_FUNC: 562 + generic_smp_call_function_interrupt(); 563 + break; 610 564 611 - do { 612 - unsigned nextmsg; 565 + case IPI_CALL_FUNC_SINGLE: 566 + generic_smp_call_function_single_interrupt(); 567 + break; 613 568 614 - nextmsg = msgs & -msgs; 615 - msgs &= ~nextmsg; 616 - nextmsg = ffz(~nextmsg); 569 + case IPI_CPU_STOP: 570 + ipi_cpu_stop(cpu); 571 + break; 617 572 618 - switch (nextmsg) { 619 - case IPI_TIMER: 620 - ipi_timer(); 621 - break; 622 - 623 - case IPI_RESCHEDULE: 624 - /* 625 - * nothing more to do - eveything is 626 - * done on the interrupt return path 627 - */ 628 - break; 629 - 630 - case IPI_CALL_FUNC: 631 - generic_smp_call_function_interrupt(); 632 - break; 633 - 634 - case IPI_CALL_FUNC_SINGLE: 635 - generic_smp_call_function_single_interrupt(); 636 - break; 637 - 638 - case IPI_CPU_STOP: 639 - ipi_cpu_stop(cpu); 640 - break; 641 - 642 - default: 643 - printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", 644 - cpu, nextmsg); 645 - break; 646 - } 647 - } while (msgs); 573 + default: 574 + printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", 575 + cpu, ipinr); 576 + break; 648 577 } 649 - 650 578 set_irq_regs(old_regs); 651 579 } 652 580 653 581 void smp_send_reschedule(int cpu) 654 582 { 655 - send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); 583 + smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); 656 584 } 657 585 658 586 void smp_send_stop(void) 659 587 { 660 - cpumask_t mask = cpu_online_map; 661 - cpu_clear(smp_processor_id(), mask); 662 - if (!cpus_empty(mask)) 663 - send_ipi_message(&mask, IPI_CPU_STOP); 588 + unsigned long timeout; 589 + 590 + if (num_online_cpus() > 1) { 591 + cpumask_t mask = cpu_online_map; 592 + cpu_clear(smp_processor_id(), mask); 593 + 594 + smp_cross_call(&mask, IPI_CPU_STOP); 595 + } 596 + 597 + /* Wait up to one second for other CPUs to stop */ 598 + timeout = USEC_PER_SEC; 599 + while (num_online_cpus() > 1 && timeout--) 600 + udelay(1); 601 + 602 + if (num_online_cpus() > 1) 603 + pr_warning("SMP: failed to stop secondary CPUs\n"); 664 604 } 665 605 666 606 /* ··· 654 624 int setup_profiling_timer(unsigned int multiplier) 655 625 { 656 626 return -EINVAL; 657 - } 658 - 659 - static void 660 - on_each_cpu_mask(void (*func)(void *), void *info, int wait, 661 - const struct cpumask *mask) 662 - { 663 - preempt_disable(); 664 - 665 - smp_call_function_many(mask, func, info, wait); 666 - if (cpumask_test_cpu(smp_processor_id(), mask)) 667 - func(info); 668 - 669 - preempt_enable(); 670 - } 671 - 672 - /**********************************************************************/ 673 - 674 - /* 675 - * TLB operations 676 - */ 677 - struct tlb_args { 678 - struct vm_area_struct *ta_vma; 679 - unsigned long ta_start; 680 - unsigned long ta_end; 681 - }; 682 - 683 - static inline void ipi_flush_tlb_all(void *ignored) 684 - { 685 - local_flush_tlb_all(); 686 - } 687 - 688 - static inline void ipi_flush_tlb_mm(void *arg) 689 - { 690 - struct mm_struct *mm = (struct mm_struct *)arg; 691 - 692 - local_flush_tlb_mm(mm); 693 - } 694 - 695 - static inline void ipi_flush_tlb_page(void *arg) 696 - { 697 - struct tlb_args *ta = (struct tlb_args *)arg; 698 - 699 - local_flush_tlb_page(ta->ta_vma, ta->ta_start); 700 - } 701 - 702 - static inline void ipi_flush_tlb_kernel_page(void *arg) 703 - { 704 - struct tlb_args *ta = (struct tlb_args *)arg; 705 - 706 - local_flush_tlb_kernel_page(ta->ta_start); 707 - } 708 - 709 - static inline void ipi_flush_tlb_range(void *arg) 710 - { 711 - struct tlb_args *ta = (struct tlb_args *)arg; 712 - 713 - local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); 714 - } 715 - 716 - static inline void ipi_flush_tlb_kernel_range(void *arg) 717 - { 718 - struct tlb_args *ta = (struct tlb_args *)arg; 719 - 720 - local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); 721 - } 722 - 723 - void flush_tlb_all(void) 724 - { 725 - if (tlb_ops_need_broadcast()) 726 - on_each_cpu(ipi_flush_tlb_all, NULL, 1); 727 - else 728 - local_flush_tlb_all(); 729 - } 730 - 731 - void flush_tlb_mm(struct mm_struct *mm) 732 - { 733 - if (tlb_ops_need_broadcast()) 734 - on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm)); 735 - else 736 - local_flush_tlb_mm(mm); 737 - } 738 - 739 - void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) 740 - { 741 - if (tlb_ops_need_broadcast()) { 742 - struct tlb_args ta; 743 - ta.ta_vma = vma; 744 - ta.ta_start = uaddr; 745 - on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm)); 746 - } else 747 - local_flush_tlb_page(vma, uaddr); 748 - } 749 - 750 - void flush_tlb_kernel_page(unsigned long kaddr) 751 - { 752 - if (tlb_ops_need_broadcast()) { 753 - struct tlb_args ta; 754 - ta.ta_start = kaddr; 755 - on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); 756 - } else 757 - local_flush_tlb_kernel_page(kaddr); 758 - } 759 - 760 - void flush_tlb_range(struct vm_area_struct *vma, 761 - unsigned long start, unsigned long end) 762 - { 763 - if (tlb_ops_need_broadcast()) { 764 - struct tlb_args ta; 765 - ta.ta_vma = vma; 766 - ta.ta_start = start; 767 - ta.ta_end = end; 768 - on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm)); 769 - } else 770 - local_flush_tlb_range(vma, start, end); 771 - } 772 - 773 - void flush_tlb_kernel_range(unsigned long start, unsigned long end) 774 - { 775 - if (tlb_ops_need_broadcast()) { 776 - struct tlb_args ta; 777 - ta.ta_start = start; 778 - ta.ta_end = end; 779 - on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); 780 - } else 781 - local_flush_tlb_kernel_range(start, end); 782 627 }
+139
arch/arm/kernel/smp_tlb.c
··· 1 + /* 2 + * linux/arch/arm/kernel/smp_tlb.c 3 + * 4 + * Copyright (C) 2002 ARM Limited, All Rights Reserved. 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + */ 10 + #include <linux/preempt.h> 11 + #include <linux/smp.h> 12 + 13 + #include <asm/smp_plat.h> 14 + #include <asm/tlbflush.h> 15 + 16 + static void on_each_cpu_mask(void (*func)(void *), void *info, int wait, 17 + const struct cpumask *mask) 18 + { 19 + preempt_disable(); 20 + 21 + smp_call_function_many(mask, func, info, wait); 22 + if (cpumask_test_cpu(smp_processor_id(), mask)) 23 + func(info); 24 + 25 + preempt_enable(); 26 + } 27 + 28 + /**********************************************************************/ 29 + 30 + /* 31 + * TLB operations 32 + */ 33 + struct tlb_args { 34 + struct vm_area_struct *ta_vma; 35 + unsigned long ta_start; 36 + unsigned long ta_end; 37 + }; 38 + 39 + static inline void ipi_flush_tlb_all(void *ignored) 40 + { 41 + local_flush_tlb_all(); 42 + } 43 + 44 + static inline void ipi_flush_tlb_mm(void *arg) 45 + { 46 + struct mm_struct *mm = (struct mm_struct *)arg; 47 + 48 + local_flush_tlb_mm(mm); 49 + } 50 + 51 + static inline void ipi_flush_tlb_page(void *arg) 52 + { 53 + struct tlb_args *ta = (struct tlb_args *)arg; 54 + 55 + local_flush_tlb_page(ta->ta_vma, ta->ta_start); 56 + } 57 + 58 + static inline void ipi_flush_tlb_kernel_page(void *arg) 59 + { 60 + struct tlb_args *ta = (struct tlb_args *)arg; 61 + 62 + local_flush_tlb_kernel_page(ta->ta_start); 63 + } 64 + 65 + static inline void ipi_flush_tlb_range(void *arg) 66 + { 67 + struct tlb_args *ta = (struct tlb_args *)arg; 68 + 69 + local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); 70 + } 71 + 72 + static inline void ipi_flush_tlb_kernel_range(void *arg) 73 + { 74 + struct tlb_args *ta = (struct tlb_args *)arg; 75 + 76 + local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); 77 + } 78 + 79 + void flush_tlb_all(void) 80 + { 81 + if (tlb_ops_need_broadcast()) 82 + on_each_cpu(ipi_flush_tlb_all, NULL, 1); 83 + else 84 + local_flush_tlb_all(); 85 + } 86 + 87 + void flush_tlb_mm(struct mm_struct *mm) 88 + { 89 + if (tlb_ops_need_broadcast()) 90 + on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm)); 91 + else 92 + local_flush_tlb_mm(mm); 93 + } 94 + 95 + void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) 96 + { 97 + if (tlb_ops_need_broadcast()) { 98 + struct tlb_args ta; 99 + ta.ta_vma = vma; 100 + ta.ta_start = uaddr; 101 + on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm)); 102 + } else 103 + local_flush_tlb_page(vma, uaddr); 104 + } 105 + 106 + void flush_tlb_kernel_page(unsigned long kaddr) 107 + { 108 + if (tlb_ops_need_broadcast()) { 109 + struct tlb_args ta; 110 + ta.ta_start = kaddr; 111 + on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); 112 + } else 113 + local_flush_tlb_kernel_page(kaddr); 114 + } 115 + 116 + void flush_tlb_range(struct vm_area_struct *vma, 117 + unsigned long start, unsigned long end) 118 + { 119 + if (tlb_ops_need_broadcast()) { 120 + struct tlb_args ta; 121 + ta.ta_vma = vma; 122 + ta.ta_start = start; 123 + ta.ta_end = end; 124 + on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm)); 125 + } else 126 + local_flush_tlb_range(vma, start, end); 127 + } 128 + 129 + void flush_tlb_kernel_range(unsigned long start, unsigned long end) 130 + { 131 + if (tlb_ops_need_broadcast()) { 132 + struct tlb_args ta; 133 + ta.ta_start = start; 134 + ta.ta_end = end; 135 + on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); 136 + } else 137 + local_flush_tlb_kernel_range(start, end); 138 + } 139 +
-10
arch/arm/kernel/smp_twd.c
··· 145 145 146 146 clockevents_register_device(clk); 147 147 } 148 - 149 - #ifdef CONFIG_HOTPLUG_CPU 150 - /* 151 - * take a local timer down 152 - */ 153 - void twd_timer_stop(void) 154 - { 155 - __raw_writel(0, twd_base + TWD_TIMER_CONTROL); 156 - } 157 - #endif
+267
arch/arm/kernel/swp_emulate.c
··· 1 + /* 2 + * linux/arch/arm/kernel/swp_emulate.c 3 + * 4 + * Copyright (C) 2009 ARM Limited 5 + * __user_* functions adapted from include/asm/uaccess.h 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + * 11 + * Implements emulation of the SWP/SWPB instructions using load-exclusive and 12 + * store-exclusive for processors that have them disabled (or future ones that 13 + * might not implement them). 14 + * 15 + * Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>] 16 + * Where: Rt = destination 17 + * Rt2 = source 18 + * Rn = address 19 + */ 20 + 21 + #include <linux/init.h> 22 + #include <linux/kernel.h> 23 + #include <linux/proc_fs.h> 24 + #include <linux/sched.h> 25 + #include <linux/syscalls.h> 26 + #include <linux/perf_event.h> 27 + 28 + #include <asm/traps.h> 29 + #include <asm/uaccess.h> 30 + 31 + /* 32 + * Error-checking SWP macros implemented using ldrex{b}/strex{b} 33 + */ 34 + #define __user_swpX_asm(data, addr, res, temp, B) \ 35 + __asm__ __volatile__( \ 36 + " mov %2, %1\n" \ 37 + "0: ldrex"B" %1, [%3]\n" \ 38 + "1: strex"B" %0, %2, [%3]\n" \ 39 + " cmp %0, #0\n" \ 40 + " movne %0, %4\n" \ 41 + "2:\n" \ 42 + " .section .fixup,\"ax\"\n" \ 43 + " .align 2\n" \ 44 + "3: mov %0, %5\n" \ 45 + " b 2b\n" \ 46 + " .previous\n" \ 47 + " .section __ex_table,\"a\"\n" \ 48 + " .align 3\n" \ 49 + " .long 0b, 3b\n" \ 50 + " .long 1b, 3b\n" \ 51 + " .previous" \ 52 + : "=&r" (res), "+r" (data), "=&r" (temp) \ 53 + : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \ 54 + : "cc", "memory") 55 + 56 + #define __user_swp_asm(data, addr, res, temp) \ 57 + __user_swpX_asm(data, addr, res, temp, "") 58 + #define __user_swpb_asm(data, addr, res, temp) \ 59 + __user_swpX_asm(data, addr, res, temp, "b") 60 + 61 + /* 62 + * Macros/defines for extracting register numbers from instruction. 63 + */ 64 + #define EXTRACT_REG_NUM(instruction, offset) \ 65 + (((instruction) & (0xf << (offset))) >> (offset)) 66 + #define RN_OFFSET 16 67 + #define RT_OFFSET 12 68 + #define RT2_OFFSET 0 69 + /* 70 + * Bit 22 of the instruction encoding distinguishes between 71 + * the SWP and SWPB variants (bit set means SWPB). 72 + */ 73 + #define TYPE_SWPB (1 << 22) 74 + 75 + static unsigned long swpcounter; 76 + static unsigned long swpbcounter; 77 + static unsigned long abtcounter; 78 + static pid_t previous_pid; 79 + 80 + #ifdef CONFIG_PROC_FS 81 + static int proc_read_status(char *page, char **start, off_t off, int count, 82 + int *eof, void *data) 83 + { 84 + char *p = page; 85 + int len; 86 + 87 + p += sprintf(p, "Emulated SWP:\t\t%lu\n", swpcounter); 88 + p += sprintf(p, "Emulated SWPB:\t\t%lu\n", swpbcounter); 89 + p += sprintf(p, "Aborted SWP{B}:\t\t%lu\n", abtcounter); 90 + if (previous_pid != 0) 91 + p += sprintf(p, "Last process:\t\t%d\n", previous_pid); 92 + 93 + len = (p - page) - off; 94 + if (len < 0) 95 + len = 0; 96 + 97 + *eof = (len <= count) ? 1 : 0; 98 + *start = page + off; 99 + 100 + return len; 101 + } 102 + #endif 103 + 104 + /* 105 + * Set up process info to signal segmentation fault - called on access error. 106 + */ 107 + static void set_segfault(struct pt_regs *regs, unsigned long addr) 108 + { 109 + siginfo_t info; 110 + 111 + if (find_vma(current->mm, addr) == NULL) 112 + info.si_code = SEGV_MAPERR; 113 + else 114 + info.si_code = SEGV_ACCERR; 115 + 116 + info.si_signo = SIGSEGV; 117 + info.si_errno = 0; 118 + info.si_addr = (void *) instruction_pointer(regs); 119 + 120 + pr_debug("SWP{B} emulation: access caused memory abort!\n"); 121 + arm_notify_die("Illegal memory access", regs, &info, 0, 0); 122 + 123 + abtcounter++; 124 + } 125 + 126 + static int emulate_swpX(unsigned int address, unsigned int *data, 127 + unsigned int type) 128 + { 129 + unsigned int res = 0; 130 + 131 + if ((type != TYPE_SWPB) && (address & 0x3)) { 132 + /* SWP to unaligned address not permitted */ 133 + pr_debug("SWP instruction on unaligned pointer!\n"); 134 + return -EFAULT; 135 + } 136 + 137 + while (1) { 138 + unsigned long temp; 139 + 140 + /* 141 + * Barrier required between accessing protected resource and 142 + * releasing a lock for it. Legacy code might not have done 143 + * this, and we cannot determine that this is not the case 144 + * being emulated, so insert always. 145 + */ 146 + smp_mb(); 147 + 148 + if (type == TYPE_SWPB) 149 + __user_swpb_asm(*data, address, res, temp); 150 + else 151 + __user_swp_asm(*data, address, res, temp); 152 + 153 + if (likely(res != -EAGAIN) || signal_pending(current)) 154 + break; 155 + 156 + cond_resched(); 157 + } 158 + 159 + if (res == 0) { 160 + /* 161 + * Barrier also required between aquiring a lock for a 162 + * protected resource and accessing the resource. Inserted for 163 + * same reason as above. 164 + */ 165 + smp_mb(); 166 + 167 + if (type == TYPE_SWPB) 168 + swpbcounter++; 169 + else 170 + swpcounter++; 171 + } 172 + 173 + return res; 174 + } 175 + 176 + /* 177 + * swp_handler logs the id of calling process, dissects the instruction, sanity 178 + * checks the memory location, calls emulate_swpX for the actual operation and 179 + * deals with fixup/error handling before returning 180 + */ 181 + static int swp_handler(struct pt_regs *regs, unsigned int instr) 182 + { 183 + unsigned int address, destreg, data, type; 184 + unsigned int res = 0; 185 + 186 + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc); 187 + 188 + if (current->pid != previous_pid) { 189 + pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n", 190 + current->comm, (unsigned long)current->pid); 191 + previous_pid = current->pid; 192 + } 193 + 194 + address = regs->uregs[EXTRACT_REG_NUM(instr, RN_OFFSET)]; 195 + data = regs->uregs[EXTRACT_REG_NUM(instr, RT2_OFFSET)]; 196 + destreg = EXTRACT_REG_NUM(instr, RT_OFFSET); 197 + 198 + type = instr & TYPE_SWPB; 199 + 200 + pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n", 201 + EXTRACT_REG_NUM(instr, RN_OFFSET), address, 202 + destreg, EXTRACT_REG_NUM(instr, RT2_OFFSET), data); 203 + 204 + /* Check access in reasonable access range for both SWP and SWPB */ 205 + if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) { 206 + pr_debug("SWP{B} emulation: access to %p not allowed!\n", 207 + (void *)address); 208 + res = -EFAULT; 209 + } else { 210 + res = emulate_swpX(address, &data, type); 211 + } 212 + 213 + if (res == 0) { 214 + /* 215 + * On successful emulation, revert the adjustment to the PC 216 + * made in kernel/traps.c in order to resume execution at the 217 + * instruction following the SWP{B}. 218 + */ 219 + regs->ARM_pc += 4; 220 + regs->uregs[destreg] = data; 221 + } else if (res == -EFAULT) { 222 + /* 223 + * Memory errors do not mean emulation failed. 224 + * Set up signal info to return SEGV, then return OK 225 + */ 226 + set_segfault(regs, address); 227 + } 228 + 229 + return 0; 230 + } 231 + 232 + /* 233 + * Only emulate SWP/SWPB executed in ARM state/User mode. 234 + * The kernel must be SWP free and SWP{B} does not exist in Thumb/ThumbEE. 235 + */ 236 + static struct undef_hook swp_hook = { 237 + .instr_mask = 0x0fb00ff0, 238 + .instr_val = 0x01000090, 239 + .cpsr_mask = MODE_MASK | PSR_T_BIT | PSR_J_BIT, 240 + .cpsr_val = USR_MODE, 241 + .fn = swp_handler 242 + }; 243 + 244 + /* 245 + * Register handler and create status file in /proc/cpu 246 + * Invoked as late_initcall, since not needed before init spawned. 247 + */ 248 + static int __init swp_emulation_init(void) 249 + { 250 + #ifdef CONFIG_PROC_FS 251 + struct proc_dir_entry *res; 252 + 253 + res = create_proc_entry("cpu/swp_emulation", S_IRUGO, NULL); 254 + 255 + if (!res) 256 + return -ENOMEM; 257 + 258 + res->read_proc = proc_read_status; 259 + #endif /* CONFIG_PROC_FS */ 260 + 261 + printk(KERN_NOTICE "Registering SWP/SWPB emulation handler\n"); 262 + register_undef_hook(&swp_hook); 263 + 264 + return 0; 265 + } 266 + 267 + late_initcall(swp_emulation_init);
+3 -1
arch/arm/kernel/time.c
··· 30 30 #include <asm/leds.h> 31 31 #include <asm/thread_info.h> 32 32 #include <asm/stacktrace.h> 33 + #include <asm/mach/arch.h> 33 34 #include <asm/mach/time.h> 34 35 35 36 /* 36 37 * Our system timer. 37 38 */ 38 - struct sys_timer *system_timer; 39 + static struct sys_timer *system_timer; 39 40 40 41 #if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) 41 42 /* this needs a better home */ ··· 161 160 162 161 void __init time_init(void) 163 162 { 163 + system_timer = machine_desc->timer; 164 164 system_timer->init(); 165 165 } 166 166
+10 -4
arch/arm/kernel/traps.c
··· 37 37 38 38 static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; 39 39 40 + void *vectors_page; 41 + 40 42 #ifdef CONFIG_DEBUG_USER 41 43 unsigned int user_debug; 42 44 ··· 758 756 759 757 void __init early_trap_init(void) 760 758 { 759 + #if defined(CONFIG_CPU_USE_DOMAINS) 761 760 unsigned long vectors = CONFIG_VECTORS_BASE; 761 + #else 762 + unsigned long vectors = (unsigned long)vectors_page; 763 + #endif 762 764 extern char __stubs_start[], __stubs_end[]; 763 765 extern char __vectors_start[], __vectors_end[]; 764 766 extern char __kuser_helper_start[], __kuser_helper_end[]; ··· 786 780 * Copy signal return handlers into the vector page, and 787 781 * set sigreturn to be a pointer to these. 788 782 */ 789 - memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes, 790 - sizeof(sigreturn_codes)); 791 - memcpy((void *)KERN_RESTART_CODE, syscall_restart_code, 792 - sizeof(syscall_restart_code)); 783 + memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE), 784 + sigreturn_codes, sizeof(sigreturn_codes)); 785 + memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE), 786 + syscall_restart_code, sizeof(syscall_restart_code)); 793 787 794 788 flush_icache_range(vectors, vectors + PAGE_SIZE); 795 789 modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
+1
arch/arm/kernel/vmlinux.lds.S
··· 168 168 169 169 NOSAVE_DATA 170 170 CACHELINE_ALIGNED_DATA(32) 171 + READ_MOSTLY_DATA(32) 171 172 172 173 /* 173 174 * The exception fixup table (might need resorting at runtime)
+7 -6
arch/arm/lib/getuser.S
··· 28 28 */ 29 29 #include <linux/linkage.h> 30 30 #include <asm/errno.h> 31 + #include <asm/domain.h> 31 32 32 33 ENTRY(__get_user_1) 33 - 1: ldrbt r2, [r0] 34 + 1: T(ldrb) r2, [r0] 34 35 mov r0, #0 35 36 mov pc, lr 36 37 ENDPROC(__get_user_1) 37 38 38 39 ENTRY(__get_user_2) 39 40 #ifdef CONFIG_THUMB2_KERNEL 40 - 2: ldrbt r2, [r0] 41 - 3: ldrbt r3, [r0, #1] 41 + 2: T(ldrb) r2, [r0] 42 + 3: T(ldrb) r3, [r0, #1] 42 43 #else 43 - 2: ldrbt r2, [r0], #1 44 - 3: ldrbt r3, [r0] 44 + 2: T(ldrb) r2, [r0], #1 45 + 3: T(ldrb) r3, [r0] 45 46 #endif 46 47 #ifndef __ARMEB__ 47 48 orr r2, r2, r3, lsl #8 ··· 54 53 ENDPROC(__get_user_2) 55 54 56 55 ENTRY(__get_user_4) 57 - 4: ldrt r2, [r0] 56 + 4: T(ldr) r2, [r0] 58 57 mov r0, #0 59 58 mov pc, lr 60 59 ENDPROC(__get_user_4)
+15 -14
arch/arm/lib/putuser.S
··· 28 28 */ 29 29 #include <linux/linkage.h> 30 30 #include <asm/errno.h> 31 + #include <asm/domain.h> 31 32 32 33 ENTRY(__put_user_1) 33 - 1: strbt r2, [r0] 34 + 1: T(strb) r2, [r0] 34 35 mov r0, #0 35 36 mov pc, lr 36 37 ENDPROC(__put_user_1) ··· 40 39 mov ip, r2, lsr #8 41 40 #ifdef CONFIG_THUMB2_KERNEL 42 41 #ifndef __ARMEB__ 43 - 2: strbt r2, [r0] 44 - 3: strbt ip, [r0, #1] 42 + 2: T(strb) r2, [r0] 43 + 3: T(strb) ip, [r0, #1] 45 44 #else 46 - 2: strbt ip, [r0] 47 - 3: strbt r2, [r0, #1] 45 + 2: T(strb) ip, [r0] 46 + 3: T(strb) r2, [r0, #1] 48 47 #endif 49 48 #else /* !CONFIG_THUMB2_KERNEL */ 50 49 #ifndef __ARMEB__ 51 - 2: strbt r2, [r0], #1 52 - 3: strbt ip, [r0] 50 + 2: T(strb) r2, [r0], #1 51 + 3: T(strb) ip, [r0] 53 52 #else 54 - 2: strbt ip, [r0], #1 55 - 3: strbt r2, [r0] 53 + 2: T(strb) ip, [r0], #1 54 + 3: T(strb) r2, [r0] 56 55 #endif 57 56 #endif /* CONFIG_THUMB2_KERNEL */ 58 57 mov r0, #0 ··· 60 59 ENDPROC(__put_user_2) 61 60 62 61 ENTRY(__put_user_4) 63 - 4: strt r2, [r0] 62 + 4: T(str) r2, [r0] 64 63 mov r0, #0 65 64 mov pc, lr 66 65 ENDPROC(__put_user_4) 67 66 68 67 ENTRY(__put_user_8) 69 68 #ifdef CONFIG_THUMB2_KERNEL 70 - 5: strt r2, [r0] 71 - 6: strt r3, [r0, #4] 69 + 5: T(str) r2, [r0] 70 + 6: T(str) r3, [r0, #4] 72 71 #else 73 - 5: strt r2, [r0], #4 74 - 6: strt r3, [r0] 72 + 5: T(str) r2, [r0], #4 73 + 6: T(str) r3, [r0] 75 74 #endif 76 75 mov r0, #0 77 76 mov pc, lr
+42 -41
arch/arm/lib/uaccess.S
··· 14 14 #include <linux/linkage.h> 15 15 #include <asm/assembler.h> 16 16 #include <asm/errno.h> 17 + #include <asm/domain.h> 17 18 18 19 .text 19 20 ··· 32 31 rsb ip, ip, #4 33 32 cmp ip, #2 34 33 ldrb r3, [r1], #1 35 - USER( strbt r3, [r0], #1) @ May fault 34 + USER( T(strb) r3, [r0], #1) @ May fault 36 35 ldrgeb r3, [r1], #1 37 - USER( strgebt r3, [r0], #1) @ May fault 36 + USER( T(strgeb) r3, [r0], #1) @ May fault 38 37 ldrgtb r3, [r1], #1 39 - USER( strgtbt r3, [r0], #1) @ May fault 38 + USER( T(strgtb) r3, [r0], #1) @ May fault 40 39 sub r2, r2, ip 41 40 b .Lc2u_dest_aligned 42 41 ··· 59 58 addmi ip, r2, #4 60 59 bmi .Lc2u_0nowords 61 60 ldr r3, [r1], #4 62 - USER( strt r3, [r0], #4) @ May fault 61 + USER( T(str) r3, [r0], #4) @ May fault 63 62 mov ip, r0, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction 64 63 rsb ip, ip, #0 65 64 movs ip, ip, lsr #32 - PAGE_SHIFT ··· 88 87 stmneia r0!, {r3 - r4} @ Shouldnt fault 89 88 tst ip, #4 90 89 ldrne r3, [r1], #4 91 - strnet r3, [r0], #4 @ Shouldnt fault 90 + T(strne) r3, [r0], #4 @ Shouldnt fault 92 91 ands ip, ip, #3 93 92 beq .Lc2u_0fupi 94 93 .Lc2u_0nowords: teq ip, #0 95 94 beq .Lc2u_finished 96 95 .Lc2u_nowords: cmp ip, #2 97 96 ldrb r3, [r1], #1 98 - USER( strbt r3, [r0], #1) @ May fault 97 + USER( T(strb) r3, [r0], #1) @ May fault 99 98 ldrgeb r3, [r1], #1 100 - USER( strgebt r3, [r0], #1) @ May fault 99 + USER( T(strgeb) r3, [r0], #1) @ May fault 101 100 ldrgtb r3, [r1], #1 102 - USER( strgtbt r3, [r0], #1) @ May fault 101 + USER( T(strgtb) r3, [r0], #1) @ May fault 103 102 b .Lc2u_finished 104 103 105 104 .Lc2u_not_enough: ··· 120 119 mov r3, r7, pull #8 121 120 ldr r7, [r1], #4 122 121 orr r3, r3, r7, push #24 123 - USER( strt r3, [r0], #4) @ May fault 122 + USER( T(str) r3, [r0], #4) @ May fault 124 123 mov ip, r0, lsl #32 - PAGE_SHIFT 125 124 rsb ip, ip, #0 126 125 movs ip, ip, lsr #32 - PAGE_SHIFT ··· 155 154 movne r3, r7, pull #8 156 155 ldrne r7, [r1], #4 157 156 orrne r3, r3, r7, push #24 158 - strnet r3, [r0], #4 @ Shouldnt fault 157 + T(strne) r3, [r0], #4 @ Shouldnt fault 159 158 ands ip, ip, #3 160 159 beq .Lc2u_1fupi 161 160 .Lc2u_1nowords: mov r3, r7, get_byte_1 162 161 teq ip, #0 163 162 beq .Lc2u_finished 164 163 cmp ip, #2 165 - USER( strbt r3, [r0], #1) @ May fault 164 + USER( T(strb) r3, [r0], #1) @ May fault 166 165 movge r3, r7, get_byte_2 167 - USER( strgebt r3, [r0], #1) @ May fault 166 + USER( T(strgeb) r3, [r0], #1) @ May fault 168 167 movgt r3, r7, get_byte_3 169 - USER( strgtbt r3, [r0], #1) @ May fault 168 + USER( T(strgtb) r3, [r0], #1) @ May fault 170 169 b .Lc2u_finished 171 170 172 171 .Lc2u_2fupi: subs r2, r2, #4 ··· 175 174 mov r3, r7, pull #16 176 175 ldr r7, [r1], #4 177 176 orr r3, r3, r7, push #16 178 - USER( strt r3, [r0], #4) @ May fault 177 + USER( T(str) r3, [r0], #4) @ May fault 179 178 mov ip, r0, lsl #32 - PAGE_SHIFT 180 179 rsb ip, ip, #0 181 180 movs ip, ip, lsr #32 - PAGE_SHIFT ··· 210 209 movne r3, r7, pull #16 211 210 ldrne r7, [r1], #4 212 211 orrne r3, r3, r7, push #16 213 - strnet r3, [r0], #4 @ Shouldnt fault 212 + T(strne) r3, [r0], #4 @ Shouldnt fault 214 213 ands ip, ip, #3 215 214 beq .Lc2u_2fupi 216 215 .Lc2u_2nowords: mov r3, r7, get_byte_2 217 216 teq ip, #0 218 217 beq .Lc2u_finished 219 218 cmp ip, #2 220 - USER( strbt r3, [r0], #1) @ May fault 219 + USER( T(strb) r3, [r0], #1) @ May fault 221 220 movge r3, r7, get_byte_3 222 - USER( strgebt r3, [r0], #1) @ May fault 221 + USER( T(strgeb) r3, [r0], #1) @ May fault 223 222 ldrgtb r3, [r1], #0 224 - USER( strgtbt r3, [r0], #1) @ May fault 223 + USER( T(strgtb) r3, [r0], #1) @ May fault 225 224 b .Lc2u_finished 226 225 227 226 .Lc2u_3fupi: subs r2, r2, #4 ··· 230 229 mov r3, r7, pull #24 231 230 ldr r7, [r1], #4 232 231 orr r3, r3, r7, push #8 233 - USER( strt r3, [r0], #4) @ May fault 232 + USER( T(str) r3, [r0], #4) @ May fault 234 233 mov ip, r0, lsl #32 - PAGE_SHIFT 235 234 rsb ip, ip, #0 236 235 movs ip, ip, lsr #32 - PAGE_SHIFT ··· 265 264 movne r3, r7, pull #24 266 265 ldrne r7, [r1], #4 267 266 orrne r3, r3, r7, push #8 268 - strnet r3, [r0], #4 @ Shouldnt fault 267 + T(strne) r3, [r0], #4 @ Shouldnt fault 269 268 ands ip, ip, #3 270 269 beq .Lc2u_3fupi 271 270 .Lc2u_3nowords: mov r3, r7, get_byte_3 272 271 teq ip, #0 273 272 beq .Lc2u_finished 274 273 cmp ip, #2 275 - USER( strbt r3, [r0], #1) @ May fault 274 + USER( T(strb) r3, [r0], #1) @ May fault 276 275 ldrgeb r3, [r1], #1 277 - USER( strgebt r3, [r0], #1) @ May fault 276 + USER( T(strgeb) r3, [r0], #1) @ May fault 278 277 ldrgtb r3, [r1], #0 279 - USER( strgtbt r3, [r0], #1) @ May fault 278 + USER( T(strgtb) r3, [r0], #1) @ May fault 280 279 b .Lc2u_finished 281 280 ENDPROC(__copy_to_user) 282 281 ··· 295 294 .Lcfu_dest_not_aligned: 296 295 rsb ip, ip, #4 297 296 cmp ip, #2 298 - USER( ldrbt r3, [r1], #1) @ May fault 297 + USER( T(ldrb) r3, [r1], #1) @ May fault 299 298 strb r3, [r0], #1 300 - USER( ldrgebt r3, [r1], #1) @ May fault 299 + USER( T(ldrgeb) r3, [r1], #1) @ May fault 301 300 strgeb r3, [r0], #1 302 - USER( ldrgtbt r3, [r1], #1) @ May fault 301 + USER( T(ldrgtb) r3, [r1], #1) @ May fault 303 302 strgtb r3, [r0], #1 304 303 sub r2, r2, ip 305 304 b .Lcfu_dest_aligned ··· 322 321 .Lcfu_0fupi: subs r2, r2, #4 323 322 addmi ip, r2, #4 324 323 bmi .Lcfu_0nowords 325 - USER( ldrt r3, [r1], #4) 324 + USER( T(ldr) r3, [r1], #4) 326 325 str r3, [r0], #4 327 326 mov ip, r1, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction 328 327 rsb ip, ip, #0 ··· 351 350 ldmneia r1!, {r3 - r4} @ Shouldnt fault 352 351 stmneia r0!, {r3 - r4} 353 352 tst ip, #4 354 - ldrnet r3, [r1], #4 @ Shouldnt fault 353 + T(ldrne) r3, [r1], #4 @ Shouldnt fault 355 354 strne r3, [r0], #4 356 355 ands ip, ip, #3 357 356 beq .Lcfu_0fupi 358 357 .Lcfu_0nowords: teq ip, #0 359 358 beq .Lcfu_finished 360 359 .Lcfu_nowords: cmp ip, #2 361 - USER( ldrbt r3, [r1], #1) @ May fault 360 + USER( T(ldrb) r3, [r1], #1) @ May fault 362 361 strb r3, [r0], #1 363 - USER( ldrgebt r3, [r1], #1) @ May fault 362 + USER( T(ldrgeb) r3, [r1], #1) @ May fault 364 363 strgeb r3, [r0], #1 365 - USER( ldrgtbt r3, [r1], #1) @ May fault 364 + USER( T(ldrgtb) r3, [r1], #1) @ May fault 366 365 strgtb r3, [r0], #1 367 366 b .Lcfu_finished 368 367 ··· 375 374 376 375 .Lcfu_src_not_aligned: 377 376 bic r1, r1, #3 378 - USER( ldrt r7, [r1], #4) @ May fault 377 + USER( T(ldr) r7, [r1], #4) @ May fault 379 378 cmp ip, #2 380 379 bgt .Lcfu_3fupi 381 380 beq .Lcfu_2fupi ··· 383 382 addmi ip, r2, #4 384 383 bmi .Lcfu_1nowords 385 384 mov r3, r7, pull #8 386 - USER( ldrt r7, [r1], #4) @ May fault 385 + USER( T(ldr) r7, [r1], #4) @ May fault 387 386 orr r3, r3, r7, push #24 388 387 str r3, [r0], #4 389 388 mov ip, r1, lsl #32 - PAGE_SHIFT ··· 418 417 stmneia r0!, {r3 - r4} 419 418 tst ip, #4 420 419 movne r3, r7, pull #8 421 - USER( ldrnet r7, [r1], #4) @ May fault 420 + USER( T(ldrne) r7, [r1], #4) @ May fault 422 421 orrne r3, r3, r7, push #24 423 422 strne r3, [r0], #4 424 423 ands ip, ip, #3 ··· 438 437 addmi ip, r2, #4 439 438 bmi .Lcfu_2nowords 440 439 mov r3, r7, pull #16 441 - USER( ldrt r7, [r1], #4) @ May fault 440 + USER( T(ldr) r7, [r1], #4) @ May fault 442 441 orr r3, r3, r7, push #16 443 442 str r3, [r0], #4 444 443 mov ip, r1, lsl #32 - PAGE_SHIFT ··· 474 473 stmneia r0!, {r3 - r4} 475 474 tst ip, #4 476 475 movne r3, r7, pull #16 477 - USER( ldrnet r7, [r1], #4) @ May fault 476 + USER( T(ldrne) r7, [r1], #4) @ May fault 478 477 orrne r3, r3, r7, push #16 479 478 strne r3, [r0], #4 480 479 ands ip, ip, #3 ··· 486 485 strb r3, [r0], #1 487 486 movge r3, r7, get_byte_3 488 487 strgeb r3, [r0], #1 489 - USER( ldrgtbt r3, [r1], #0) @ May fault 488 + USER( T(ldrgtb) r3, [r1], #0) @ May fault 490 489 strgtb r3, [r0], #1 491 490 b .Lcfu_finished 492 491 ··· 494 493 addmi ip, r2, #4 495 494 bmi .Lcfu_3nowords 496 495 mov r3, r7, pull #24 497 - USER( ldrt r7, [r1], #4) @ May fault 496 + USER( T(ldr) r7, [r1], #4) @ May fault 498 497 orr r3, r3, r7, push #8 499 498 str r3, [r0], #4 500 499 mov ip, r1, lsl #32 - PAGE_SHIFT ··· 529 528 stmneia r0!, {r3 - r4} 530 529 tst ip, #4 531 530 movne r3, r7, pull #24 532 - USER( ldrnet r7, [r1], #4) @ May fault 531 + USER( T(ldrne) r7, [r1], #4) @ May fault 533 532 orrne r3, r3, r7, push #8 534 533 strne r3, [r0], #4 535 534 ands ip, ip, #3 ··· 539 538 beq .Lcfu_finished 540 539 cmp ip, #2 541 540 strb r3, [r0], #1 542 - USER( ldrgebt r3, [r1], #1) @ May fault 541 + USER( T(ldrgeb) r3, [r1], #1) @ May fault 543 542 strgeb r3, [r0], #1 544 - USER( ldrgtbt r3, [r1], #1) @ May fault 543 + USER( T(ldrgtb) r3, [r1], #1) @ May fault 545 544 strgtb r3, [r0], #1 546 545 b .Lcfu_finished 547 546 ENDPROC(__copy_from_user)
+1 -2
arch/arm/mach-bcmring/clock.c
··· 21 21 #include <linux/string.h> 22 22 #include <linux/clk.h> 23 23 #include <linux/spinlock.h> 24 + #include <linux/clkdev.h> 24 25 #include <mach/csp/hw_cfg.h> 25 26 #include <mach/csp/chipcHw_def.h> 26 27 #include <mach/csp/chipcHw_reg.h> 27 28 #include <mach/csp/chipcHw_inline.h> 28 - 29 - #include <asm/clkdev.h> 30 29 31 30 #include "clock.h" 32 31
+1 -1
arch/arm/mach-bcmring/core.c
··· 30 30 #include <linux/amba/bus.h> 31 31 #include <linux/clocksource.h> 32 32 #include <linux/clockchips.h> 33 + #include <linux/clkdev.h> 33 34 34 35 #include <mach/csp/mm_addr.h> 35 36 #include <mach/hardware.h> 36 - #include <asm/clkdev.h> 37 37 #include <linux/io.h> 38 38 #include <asm/irq.h> 39 39 #include <asm/hardware/arm_timer.h>
+1
arch/arm/mach-cns3xxx/Kconfig
··· 3 3 4 4 config MACH_CNS3420VB 5 5 bool "Support for CNS3420 Validation Board" 6 + select MIGHT_HAVE_PCI 6 7 help 7 8 Include support for the Cavium Networks CNS3420 MPCore Platform 8 9 Baseboard.
+1 -1
arch/arm/mach-davinci/clock.h
··· 68 68 #ifndef __ASSEMBLER__ 69 69 70 70 #include <linux/list.h> 71 - #include <asm/clkdev.h> 71 + #include <linux/clkdev.h> 72 72 73 73 #define PLLSTAT_GOSTAT BIT(0) 74 74 #define PLLCMD_GOSET BIT(0)
+1 -1
arch/arm/mach-ep93xx/clock.c
··· 19 19 #include <linux/string.h> 20 20 #include <linux/io.h> 21 21 #include <linux/spinlock.h> 22 + #include <linux/clkdev.h> 22 23 23 24 #include <mach/hardware.h> 24 25 25 - #include <asm/clkdev.h> 26 26 #include <asm/div64.h> 27 27 28 28
+1 -2
arch/arm/mach-imx/clock-imx1.c
··· 22 22 #include <linux/err.h> 23 23 #include <linux/clk.h> 24 24 #include <linux/io.h> 25 - 26 - #include <asm/clkdev.h> 25 + #include <linux/clkdev.h> 27 26 28 27 #include <mach/clock.h> 29 28 #include <mach/hardware.h>
+1 -1
arch/arm/mach-imx/clock-imx21.c
··· 21 21 #include <linux/clk.h> 22 22 #include <linux/io.h> 23 23 #include <linux/module.h> 24 + #include <linux/clkdev.h> 24 25 25 26 #include <mach/clock.h> 26 27 #include <mach/hardware.h> 27 28 #include <mach/common.h> 28 - #include <asm/clkdev.h> 29 29 #include <asm/div64.h> 30 30 31 31 #define IO_ADDR_CCM(off) (MX21_IO_ADDRESS(MX21_CCM_BASE_ADDR + (off)))
+1 -1
arch/arm/mach-imx/clock-imx27.c
··· 21 21 #include <linux/clk.h> 22 22 #include <linux/io.h> 23 23 #include <linux/module.h> 24 + #include <linux/clkdev.h> 24 25 25 - #include <asm/clkdev.h> 26 26 #include <asm/div64.h> 27 27 28 28 #include <mach/clock.h>
+1
arch/arm/mach-integrator/Kconfig
··· 4 4 5 5 config ARCH_INTEGRATOR_AP 6 6 bool "Support Integrator/AP and Integrator/PP2 platforms" 7 + select MIGHT_HAVE_PCI 7 8 help 8 9 Include support for the ARM(R) Integrator/AP and 9 10 Integrator/PP2 platforms.
+1 -2
arch/arm/mach-integrator/core.c
··· 21 21 #include <linux/amba/bus.h> 22 22 #include <linux/amba/serial.h> 23 23 #include <linux/io.h> 24 + #include <linux/clkdev.h> 24 25 25 - #include <asm/clkdev.h> 26 - #include <mach/clkdev.h> 27 26 #include <mach/hardware.h> 28 27 #include <mach/platform.h> 29 28 #include <asm/irq.h>
+1 -2
arch/arm/mach-integrator/impd1.c
··· 22 22 #include <linux/amba/clcd.h> 23 23 #include <linux/io.h> 24 24 #include <linux/slab.h> 25 + #include <linux/clkdev.h> 25 26 26 - #include <asm/clkdev.h> 27 - #include <mach/clkdev.h> 28 27 #include <asm/hardware/icst.h> 29 28 #include <mach/lm.h> 30 29 #include <mach/impd1.h>
+1 -2
arch/arm/mach-integrator/integrator_cp.c
··· 21 21 #include <linux/amba/mmci.h> 22 22 #include <linux/io.h> 23 23 #include <linux/gfp.h> 24 + #include <linux/clkdev.h> 24 25 25 - #include <asm/clkdev.h> 26 - #include <mach/clkdev.h> 27 26 #include <mach/hardware.h> 28 27 #include <mach/platform.h> 29 28 #include <asm/irq.h>
+3 -3
arch/arm/mach-iop13xx/include/mach/memory.h
··· 58 58 __dma; \ 59 59 }) 60 60 61 - #define __arch_page_to_dma(dev, page) \ 61 + #define __arch_pfn_to_dma(dev, pfn) \ 62 62 ({ \ 63 63 /* __is_lbus_virt() can never be true for RAM pages */ \ 64 - (dma_addr_t)page_to_phys(page); \ 64 + (dma_addr_t)__pfn_to_phys(pfn); \ 65 65 }) 66 66 67 - #define __arch_dma_to_page(dev, addr) phys_to_page(addr) 67 + #define __arch_dma_to_pfn(dev, addr) __phys_to_pfn(addr) 68 68 69 69 #endif /* CONFIG_ARCH_IOP13XX */ 70 70 #endif /* !ASSEMBLY */
+1
arch/arm/mach-ks8695/Kconfig
··· 4 4 5 5 config MACH_KS8695 6 6 bool "KS8695 development board" 7 + select MIGHT_HAVE_PCI 7 8 help 8 9 Say 'Y' here if you want your kernel to run on the original 9 10 Kendin-Micrel KS8695 development board.
+4 -4
arch/arm/mach-ks8695/include/mach/memory.h
··· 35 35 __phys_to_virt(x) : __bus_to_virt(x)); }) 36 36 #define __arch_virt_to_dma(dev, x) ({ is_lbus_device(dev) ? \ 37 37 (dma_addr_t)__virt_to_phys(x) : (dma_addr_t)__virt_to_bus(x); }) 38 - #define __arch_page_to_dma(dev, x) \ 39 - ({ dma_addr_t __dma = page_to_phys(page); \ 38 + #define __arch_pfn_to_dma(dev, pfn) \ 39 + ({ dma_addr_t __dma = __pfn_to_phys(pfn); \ 40 40 if (!is_lbus_device(dev)) \ 41 41 __dma = __dma - PHYS_OFFSET + KS8695_PCIMEM_PA; \ 42 42 __dma; }) 43 43 44 - #define __arch_dma_to_page(dev, x) \ 44 + #define __arch_dma_to_pfn(dev, x) \ 45 45 ({ dma_addr_t __dma = x; \ 46 46 if (!is_lbus_device(dev)) \ 47 47 __dma += PHYS_OFFSET - KS8695_PCIMEM_PA; \ 48 - phys_to_page(__dma); \ 48 + __phys_to_pfn(__dma); \ 49 49 }) 50 50 51 51 #endif
+1 -2
arch/arm/mach-lpc32xx/clock.c
··· 90 90 #include <linux/clk.h> 91 91 #include <linux/amba/bus.h> 92 92 #include <linux/amba/clcd.h> 93 + #include <linux/clkdev.h> 93 94 94 95 #include <mach/hardware.h> 95 - #include <asm/clkdev.h> 96 - #include <mach/clkdev.h> 97 96 #include <mach/platform.h> 98 97 #include "clock.h" 99 98 #include "common.h"
+1 -1
arch/arm/mach-mmp/clock.h
··· 6 6 * published by the Free Software Foundation. 7 7 */ 8 8 9 - #include <asm/clkdev.h> 9 + #include <linux/clkdev.h> 10 10 11 11 struct clkops { 12 12 void (*enable)(struct clk *);
+2 -2
arch/arm/mach-msm/include/mach/smp.h
··· 31 31 32 32 #include <asm/hardware/gic.h> 33 33 34 - static inline void smp_cross_call(const struct cpumask *mask) 34 + static inline void smp_cross_call(const struct cpumask *mask, int ipi) 35 35 { 36 - gic_raise_softirq(mask, 1); 36 + gic_raise_softirq(mask, ipi); 37 37 } 38 38 39 39 #endif
+1 -2
arch/arm/mach-mx25/clock.c
··· 21 21 #include <linux/list.h> 22 22 #include <linux/clk.h> 23 23 #include <linux/io.h> 24 - 25 - #include <asm/clkdev.h> 24 + #include <linux/clkdev.h> 26 25 27 26 #include <mach/clock.h> 28 27 #include <mach/hardware.h>
+1 -1
arch/arm/mach-mx3/clock-imx31.c
··· 23 23 #include <linux/clk.h> 24 24 #include <linux/err.h> 25 25 #include <linux/io.h> 26 + #include <linux/clkdev.h> 26 27 27 - #include <asm/clkdev.h> 28 28 #include <asm/div64.h> 29 29 30 30 #include <mach/clock.h>
+1 -2
arch/arm/mach-mx3/clock-imx35.c
··· 21 21 #include <linux/list.h> 22 22 #include <linux/clk.h> 23 23 #include <linux/io.h> 24 - 25 - #include <asm/clkdev.h> 24 + #include <linux/clkdev.h> 26 25 27 26 #include <mach/clock.h> 28 27 #include <mach/hardware.h>
+1 -1
arch/arm/mach-mx5/clock-mx51.c
··· 14 14 #include <linux/delay.h> 15 15 #include <linux/clk.h> 16 16 #include <linux/io.h> 17 + #include <linux/clkdev.h> 17 18 18 - #include <asm/clkdev.h> 19 19 #include <asm/div64.h> 20 20 21 21 #include <mach/hardware.h>
+1 -1
arch/arm/mach-mxc91231/clock.c
··· 2 2 #include <linux/kernel.h> 3 3 #include <linux/init.h> 4 4 #include <linux/io.h> 5 + #include <linux/clkdev.h> 5 6 6 7 #include <mach/clock.h> 7 8 #include <mach/hardware.h> 8 9 #include <mach/common.h> 9 10 10 - #include <asm/clkdev.h> 11 11 #include <asm/bug.h> 12 12 #include <asm/div64.h> 13 13
+1 -1
arch/arm/mach-nomadik/clock.c
··· 7 7 #include <linux/module.h> 8 8 #include <linux/errno.h> 9 9 #include <linux/clk.h> 10 - #include <asm/clkdev.h> 10 + #include <linux/clkdev.h> 11 11 #include "clock.h" 12 12 13 13 /*
+1 -1
arch/arm/mach-nuc93x/clock.h
··· 10 10 * the Free Software Foundation; either version 2 of the License. 11 11 */ 12 12 13 - #include <asm/clkdev.h> 13 + #include <linux/clkdev.h> 14 14 15 15 void nuc93x_clk_enable(struct clk *clk, int enable); 16 16 void clks_register(struct clk_lookup *clks, size_t num);
+1 -1
arch/arm/mach-omap1/clock.c
··· 17 17 #include <linux/err.h> 18 18 #include <linux/clk.h> 19 19 #include <linux/io.h> 20 + #include <linux/clkdev.h> 20 21 21 22 #include <asm/mach-types.h> 22 - #include <asm/clkdev.h> 23 23 24 24 #include <plat/cpu.h> 25 25 #include <plat/usb.h>
+1 -1
arch/arm/mach-omap2/dpll3xxx.c
··· 26 26 #include <linux/clk.h> 27 27 #include <linux/io.h> 28 28 #include <linux/bitops.h> 29 + #include <linux/clkdev.h> 29 30 30 31 #include <plat/cpu.h> 31 32 #include <plat/clock.h> 32 - #include <asm/clkdev.h> 33 33 34 34 #include "clock.h" 35 35 #include "prm.h"
+1 -13
arch/arm/mach-omap2/omap-hotplug.c
··· 17 17 #include <linux/kernel.h> 18 18 #include <linux/errno.h> 19 19 #include <linux/smp.h> 20 - #include <linux/completion.h> 21 20 22 21 #include <asm/cacheflush.h> 23 22 #include <mach/omap4-common.h> 24 23 25 - static DECLARE_COMPLETION(cpu_killed); 26 - 27 24 int platform_cpu_kill(unsigned int cpu) 28 25 { 29 - return wait_for_completion_timeout(&cpu_killed, 5000); 26 + return 1; 30 27 } 31 28 32 29 /* ··· 32 35 */ 33 36 void platform_cpu_die(unsigned int cpu) 34 37 { 35 - unsigned int this_cpu = hard_smp_processor_id(); 36 - 37 - if (cpu != this_cpu) { 38 - pr_crit("platform_cpu_die running on %u, should be %u\n", 39 - this_cpu, cpu); 40 - BUG(); 41 - } 42 - pr_notice("CPU%u: shutdown\n", cpu); 43 - complete(&cpu_killed); 44 38 flush_cache_all(); 45 39 dsb(); 46 40
+15 -51
arch/arm/mach-omap2/omap-smp.c
··· 21 21 #include <linux/io.h> 22 22 23 23 #include <asm/cacheflush.h> 24 - #include <asm/localtimer.h> 25 24 #include <asm/smp_scu.h> 26 25 #include <mach/hardware.h> 27 26 #include <mach/omap4-common.h> ··· 28 29 /* SCU base address */ 29 30 static void __iomem *scu_base; 30 31 31 - /* 32 - * Use SCU config register to count number of cores 33 - */ 34 - static inline unsigned int get_core_count(void) 35 - { 36 - if (scu_base) 37 - return scu_get_core_count(scu_base); 38 - return 1; 39 - } 40 - 41 32 static DEFINE_SPINLOCK(boot_lock); 42 33 43 34 void __cpuinit platform_secondary_init(unsigned int cpu) 44 35 { 45 - trace_hardirqs_off(); 46 - 47 36 /* 48 37 * If any interrupts are already enabled for the primary 49 38 * core (e.g. timer irq), then they will not have been enabled ··· 63 76 omap_modify_auxcoreboot0(0x200, 0xfffffdff); 64 77 flush_cache_all(); 65 78 smp_wmb(); 66 - smp_cross_call(cpumask_of(cpu)); 79 + smp_cross_call(cpumask_of(cpu), 1); 67 80 68 81 /* 69 82 * Now the secondary core is starting up let it run its ··· 105 118 scu_base = ioremap(OMAP44XX_SCU_BASE, SZ_256); 106 119 BUG_ON(!scu_base); 107 120 108 - ncores = get_core_count(); 109 - 110 - for (i = 0; i < ncores; i++) 111 - set_cpu_possible(i, true); 112 - } 113 - 114 - void __init smp_prepare_cpus(unsigned int max_cpus) 115 - { 116 - unsigned int ncores = get_core_count(); 117 - unsigned int cpu = smp_processor_id(); 118 - int i; 121 + ncores = scu_get_core_count(scu_base); 119 122 120 123 /* sanity check */ 121 - if (ncores == 0) { 122 - printk(KERN_ERR 123 - "OMAP4: strange core count of 0? Default to 1\n"); 124 - ncores = 1; 125 - } 126 - 127 124 if (ncores > NR_CPUS) { 128 125 printk(KERN_WARNING 129 126 "OMAP4: no. of cores (%d) greater than configured " ··· 115 144 ncores, NR_CPUS); 116 145 ncores = NR_CPUS; 117 146 } 118 - smp_store_cpu_info(cpu); 119 147 120 - /* 121 - * are we trying to boot more cores than exist? 122 - */ 123 - if (max_cpus > ncores) 124 - max_cpus = ncores; 148 + for (i = 0; i < ncores; i++) 149 + set_cpu_possible(i, true); 150 + } 151 + 152 + void __init platform_smp_prepare_cpus(unsigned int max_cpus) 153 + { 154 + int i; 125 155 126 156 /* 127 157 * Initialise the present map, which describes the set of CPUs ··· 131 159 for (i = 0; i < max_cpus; i++) 132 160 set_cpu_present(i, true); 133 161 134 - if (max_cpus > 1) { 135 - /* 136 - * Enable the local timer or broadcast device for the 137 - * boot CPU, but only if we have more than one CPU. 138 - */ 139 - percpu_timer_setup(); 140 - 141 - /* 142 - * Initialise the SCU and wake up the secondary core using 143 - * wakeup_secondary(). 144 - */ 145 - scu_enable(scu_base); 146 - wakeup_secondary(); 147 - } 162 + /* 163 + * Initialise the SCU and wake up the secondary core using 164 + * wakeup_secondary(). 165 + */ 166 + scu_enable(scu_base); 167 + wakeup_secondary(); 148 168 }
+1 -2
arch/arm/mach-pnx4008/clock.c
··· 21 21 #include <linux/err.h> 22 22 #include <linux/delay.h> 23 23 #include <linux/io.h> 24 - 25 - #include <asm/clkdev.h> 24 + #include <linux/clkdev.h> 26 25 27 26 #include <mach/hardware.h> 28 27 #include <mach/clock.h>
+1
arch/arm/mach-pxa/Kconfig
··· 94 94 select PXA27x 95 95 select IWMMXT 96 96 select PXA25x 97 + select MIGHT_HAVE_PCI 97 98 98 99 config MACH_EM_X270 99 100 bool "CompuLab EM-x270 platform"
+1 -1
arch/arm/mach-pxa/clock.c
··· 11 11 #include <linux/spinlock.h> 12 12 #include <linux/platform_device.h> 13 13 #include <linux/delay.h> 14 + #include <linux/clkdev.h> 14 15 15 - #include <asm/clkdev.h> 16 16 #include <mach/pxa2xx-regs.h> 17 17 #include <mach/hardware.h> 18 18
+1 -1
arch/arm/mach-pxa/clock.h
··· 1 - #include <asm/clkdev.h> 1 + #include <linux/clkdev.h> 2 2 3 3 struct clkops { 4 4 void (*enable)(struct clk *);
+1 -2
arch/arm/mach-realview/core.c
··· 30 30 #include <linux/ata_platform.h> 31 31 #include <linux/amba/mmci.h> 32 32 #include <linux/gfp.h> 33 + #include <linux/clkdev.h> 33 34 34 - #include <asm/clkdev.h> 35 35 #include <asm/system.h> 36 36 #include <mach/hardware.h> 37 37 #include <asm/irq.h> ··· 47 47 48 48 #include <asm/hardware/gic.h> 49 49 50 - #include <mach/clkdev.h> 51 50 #include <mach/platform.h> 52 51 #include <mach/irqs.h> 53 52 #include <asm/hardware/timer-sp.h>
+15 -29
arch/arm/mach-realview/hotplug.c
··· 11 11 #include <linux/kernel.h> 12 12 #include <linux/errno.h> 13 13 #include <linux/smp.h> 14 - #include <linux/completion.h> 15 14 16 15 #include <asm/cacheflush.h> 17 16 18 17 extern volatile int pen_release; 19 - 20 - static DECLARE_COMPLETION(cpu_killed); 21 18 22 19 static inline void cpu_enter_lowpower(void) 23 20 { ··· 31 34 " bic %0, %0, #0x20\n" 32 35 " mcr p15, 0, %0, c1, c0, 1\n" 33 36 " mrc p15, 0, %0, c1, c0, 0\n" 34 - " bic %0, %0, #0x04\n" 37 + " bic %0, %0, %2\n" 35 38 " mcr p15, 0, %0, c1, c0, 0\n" 36 39 : "=&r" (v) 37 - : "r" (0) 40 + : "r" (0), "Ir" (CR_C) 38 41 : "cc"); 39 42 } 40 43 ··· 43 46 unsigned int v; 44 47 45 48 asm volatile( "mrc p15, 0, %0, c1, c0, 0\n" 46 - " orr %0, %0, #0x04\n" 49 + " orr %0, %0, %1\n" 47 50 " mcr p15, 0, %0, c1, c0, 0\n" 48 51 " mrc p15, 0, %0, c1, c0, 1\n" 49 52 " orr %0, %0, #0x20\n" 50 53 " mcr p15, 0, %0, c1, c0, 1\n" 51 54 : "=&r" (v) 52 - : 55 + : "Ir" (CR_C) 53 56 : "cc"); 54 57 } 55 58 56 - static inline void platform_do_lowpower(unsigned int cpu) 59 + static inline void platform_do_lowpower(unsigned int cpu, int *spurious) 57 60 { 58 61 /* 59 62 * there is no power-control hardware on this platform, so all ··· 77 80 } 78 81 79 82 /* 80 - * getting here, means that we have come out of WFI without 83 + * Getting here, means that we have come out of WFI without 81 84 * having been woken up - this shouldn't happen 82 85 * 83 - * The trouble is, letting people know about this is not really 84 - * possible, since we are currently running incoherently, and 85 - * therefore cannot safely call printk() or anything else 86 + * Just note it happening - when we're woken, we can report 87 + * its occurrence. 86 88 */ 87 - #ifdef DEBUG 88 - printk("CPU%u: spurious wakeup call\n", cpu); 89 - #endif 89 + (*spurious)++; 90 90 } 91 91 } 92 92 93 93 int platform_cpu_kill(unsigned int cpu) 94 94 { 95 - return wait_for_completion_timeout(&cpu_killed, 5000); 95 + return 1; 96 96 } 97 97 98 98 /* ··· 99 105 */ 100 106 void platform_cpu_die(unsigned int cpu) 101 107 { 102 - #ifdef DEBUG 103 - unsigned int this_cpu = hard_smp_processor_id(); 104 - 105 - if (cpu != this_cpu) { 106 - printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n", 107 - this_cpu, cpu); 108 - BUG(); 109 - } 110 - #endif 111 - 112 - printk(KERN_NOTICE "CPU%u: shutdown\n", cpu); 113 - complete(&cpu_killed); 108 + int spurious = 0; 114 109 115 110 /* 116 111 * we're ready for shutdown now, so do it 117 112 */ 118 113 cpu_enter_lowpower(); 119 - platform_do_lowpower(cpu); 114 + platform_do_lowpower(cpu, &spurious); 120 115 121 116 /* 122 117 * bring this CPU back into the world of cache 123 118 * coherency, and then restore interrupts 124 119 */ 125 120 cpu_leave_lowpower(); 121 + 122 + if (spurious) 123 + pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); 126 124 } 127 125 128 126 int platform_cpu_disable(unsigned int cpu)
+2 -3
arch/arm/mach-realview/include/mach/smp.h
··· 2 2 #define ASMARM_ARCH_SMP_H 3 3 4 4 #include <asm/hardware/gic.h> 5 - #include <asm/smp_mpidr.h> 6 5 7 6 /* 8 7 * We use IRQ1 as the IPI 9 8 */ 10 - static inline void smp_cross_call(const struct cpumask *mask) 9 + static inline void smp_cross_call(const struct cpumask *mask, int ipi) 11 10 { 12 - gic_raise_softirq(mask, 1); 11 + gic_raise_softirq(mask, ipi); 13 12 } 14 13 15 14 #endif
+37 -79
arch/arm/mach-realview/platsmp.c
··· 19 19 #include <asm/cacheflush.h> 20 20 #include <mach/hardware.h> 21 21 #include <asm/mach-types.h> 22 - #include <asm/localtimer.h> 23 22 #include <asm/unified.h> 24 23 25 24 #include <mach/board-eb.h> ··· 36 37 */ 37 38 volatile int __cpuinitdata pen_release = -1; 38 39 40 + /* 41 + * Write pen_release in a way that is guaranteed to be visible to all 42 + * observers, irrespective of whether they're taking part in coherency 43 + * or not. This is necessary for the hotplug code to work reliably. 44 + */ 45 + static void write_pen_release(int val) 46 + { 47 + pen_release = val; 48 + smp_wmb(); 49 + __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); 50 + outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); 51 + } 52 + 39 53 static void __iomem *scu_base_addr(void) 40 54 { 41 55 if (machine_is_realview_eb_mp()) ··· 62 50 return (void __iomem *)0; 63 51 } 64 52 65 - static inline unsigned int get_core_count(void) 66 - { 67 - void __iomem *scu_base = scu_base_addr(); 68 - if (scu_base) 69 - return scu_get_core_count(scu_base); 70 - return 1; 71 - } 72 - 73 53 static DEFINE_SPINLOCK(boot_lock); 74 54 75 55 void __cpuinit platform_secondary_init(unsigned int cpu) 76 56 { 77 - trace_hardirqs_off(); 78 - 79 57 /* 80 58 * if any interrupts are already enabled for the primary 81 59 * core (e.g. timer irq), then they will not have been enabled ··· 77 75 * let the primary processor know we're out of the 78 76 * pen, then head off into the C entry point 79 77 */ 80 - pen_release = -1; 81 - smp_wmb(); 78 + write_pen_release(-1); 82 79 83 80 /* 84 81 * Synchronise with the boot thread. ··· 104 103 * Note that "pen_release" is the hardware CPU ID, whereas 105 104 * "cpu" is Linux's internal ID. 106 105 */ 107 - pen_release = cpu; 108 - flush_cache_all(); 106 + write_pen_release(cpu); 109 107 110 108 /* 111 - * XXX 112 - * 113 - * This is a later addition to the booting protocol: the 114 - * bootMonitor now puts secondary cores into WFI, so 115 - * poke_milo() no longer gets the cores moving; we need 116 - * to send a soft interrupt to wake the secondary core. 117 - * Use smp_cross_call() for this, since there's little 118 - * point duplicating the code here 109 + * Send the secondary CPU a soft interrupt, thereby causing 110 + * the boot monitor to read the system wide flags register, 111 + * and branch to the address found there. 119 112 */ 120 - smp_cross_call(cpumask_of(cpu)); 113 + smp_cross_call(cpumask_of(cpu), 1); 121 114 122 115 timeout = jiffies + (1 * HZ); 123 116 while (time_before(jiffies, timeout)) { ··· 131 136 return pen_release != -1 ? -ENOSYS : 0; 132 137 } 133 138 134 - static void __init poke_milo(void) 135 - { 136 - /* nobody is to be released from the pen yet */ 137 - pen_release = -1; 138 - 139 - /* 140 - * Write the address of secondary startup into the system-wide flags 141 - * register. The BootMonitor waits for this register to become 142 - * non-zero. 143 - */ 144 - __raw_writel(BSYM(virt_to_phys(realview_secondary_startup)), 145 - __io_address(REALVIEW_SYS_FLAGSSET)); 146 - 147 - mb(); 148 - } 149 - 150 139 /* 151 140 * Initialise the CPU possible map early - this describes the CPUs 152 141 * which may be present or become present in the system. 153 142 */ 154 143 void __init smp_init_cpus(void) 155 144 { 156 - unsigned int i, ncores = get_core_count(); 145 + void __iomem *scu_base = scu_base_addr(); 146 + unsigned int i, ncores; 157 147 158 - for (i = 0; i < ncores; i++) 159 - set_cpu_possible(i, true); 160 - } 161 - 162 - void __init smp_prepare_cpus(unsigned int max_cpus) 163 - { 164 - unsigned int ncores = get_core_count(); 165 - unsigned int cpu = smp_processor_id(); 166 - int i; 148 + ncores = scu_base ? scu_get_core_count(scu_base) : 1; 167 149 168 150 /* sanity check */ 169 - if (ncores == 0) { 170 - printk(KERN_ERR 171 - "Realview: strange CM count of 0? Default to 1\n"); 172 - 173 - ncores = 1; 174 - } 175 - 176 151 if (ncores > NR_CPUS) { 177 152 printk(KERN_WARNING 178 153 "Realview: no. of cores (%d) greater than configured " ··· 151 186 ncores = NR_CPUS; 152 187 } 153 188 154 - smp_store_cpu_info(cpu); 189 + for (i = 0; i < ncores; i++) 190 + set_cpu_possible(i, true); 191 + } 155 192 156 - /* 157 - * are we trying to boot more cores than exist? 158 - */ 159 - if (max_cpus > ncores) 160 - max_cpus = ncores; 193 + void __init platform_smp_prepare_cpus(unsigned int max_cpus) 194 + { 195 + int i; 161 196 162 197 /* 163 198 * Initialise the present map, which describes the set of CPUs ··· 166 201 for (i = 0; i < max_cpus; i++) 167 202 set_cpu_present(i, true); 168 203 169 - /* 170 - * Initialise the SCU if there are more than one CPU and let 171 - * them know where to start. Note that, on modern versions of 172 - * MILO, the "poke" doesn't actually do anything until each 173 - * individual core is sent a soft interrupt to get it out of 174 - * WFI 175 - */ 176 - if (max_cpus > 1) { 177 - /* 178 - * Enable the local timer or broadcast device for the 179 - * boot CPU, but only if we have more than one CPU. 180 - */ 181 - percpu_timer_setup(); 204 + scu_enable(scu_base_addr()); 182 205 183 - scu_enable(scu_base_addr()); 184 - poke_milo(); 185 - } 206 + /* 207 + * Write the address of secondary startup into the 208 + * system-wide flags register. The BootMonitor waits 209 + * until it receives a soft interrupt, and then the 210 + * secondary CPU branches to this address. 211 + */ 212 + __raw_writel(BSYM(virt_to_phys(realview_secondary_startup)), 213 + __io_address(REALVIEW_SYS_FLAGSSET)); 186 214 }
+1 -1
arch/arm/mach-s3c2412/Kconfig
··· 59 59 Say Y here if you are using the Logitech Jive. 60 60 61 61 config MACH_JIVE_SHOW_BOOTLOADER 62 - bool "Allow access to bootloader partitions in MTD" 62 + bool "Allow access to bootloader partitions in MTD (EXPERIMENTAL)" 63 63 depends on MACH_JIVE && EXPERIMENTAL 64 64 65 65 config MACH_SMDK2413
+15 -29
arch/arm/mach-s5pv310/hotplug.c
··· 13 13 #include <linux/kernel.h> 14 14 #include <linux/errno.h> 15 15 #include <linux/smp.h> 16 - #include <linux/completion.h> 17 16 18 17 #include <asm/cacheflush.h> 19 18 20 19 extern volatile int pen_release; 21 - 22 - static DECLARE_COMPLETION(cpu_killed); 23 20 24 21 static inline void cpu_enter_lowpower(void) 25 22 { ··· 30 33 * Turn off coherency 31 34 */ 32 35 " mrc p15, 0, %0, c1, c0, 1\n" 33 - " bic %0, %0, #0x20\n" 36 + " bic %0, %0, %2\n" 34 37 " mcr p15, 0, %0, c1, c0, 1\n" 35 38 " mrc p15, 0, %0, c1, c0, 0\n" 36 39 " bic %0, %0, #0x04\n" 37 40 " mcr p15, 0, %0, c1, c0, 0\n" 38 41 : "=&r" (v) 39 - : "r" (0) 42 + : "r" (0), "Ir" (CR_C) 40 43 : "cc"); 41 44 } 42 45 ··· 46 49 47 50 asm volatile( 48 51 "mrc p15, 0, %0, c1, c0, 0\n" 49 - " orr %0, %0, #0x04\n" 52 + " orr %0, %0, %1\n" 50 53 " mcr p15, 0, %0, c1, c0, 0\n" 51 54 " mrc p15, 0, %0, c1, c0, 1\n" 52 55 " orr %0, %0, #0x20\n" 53 56 " mcr p15, 0, %0, c1, c0, 1\n" 54 57 : "=&r" (v) 55 - : 58 + : "Ir" (CR_C) 56 59 : "cc"); 57 60 } 58 61 59 - static inline void platform_do_lowpower(unsigned int cpu) 62 + static inline void platform_do_lowpower(unsigned int cpu, int *spurious) 60 63 { 61 64 /* 62 65 * there is no power-control hardware on this platform, so all ··· 80 83 } 81 84 82 85 /* 83 - * getting here, means that we have come out of WFI without 86 + * Getting here, means that we have come out of WFI without 84 87 * having been woken up - this shouldn't happen 85 88 * 86 - * The trouble is, letting people know about this is not really 87 - * possible, since we are currently running incoherently, and 88 - * therefore cannot safely call printk() or anything else 89 + * Just note it happening - when we're woken, we can report 90 + * its occurrence. 89 91 */ 90 - #ifdef DEBUG 91 - printk(KERN_WARN "CPU%u: spurious wakeup call\n", cpu); 92 - #endif 92 + (*spurious)++; 93 93 } 94 94 } 95 95 96 96 int platform_cpu_kill(unsigned int cpu) 97 97 { 98 - return wait_for_completion_timeout(&cpu_killed, 5000); 98 + return 1; 99 99 } 100 100 101 101 /* ··· 102 108 */ 103 109 void platform_cpu_die(unsigned int cpu) 104 110 { 105 - #ifdef DEBUG 106 - unsigned int this_cpu = hard_smp_processor_id(); 107 - 108 - if (cpu != this_cpu) { 109 - printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n", 110 - this_cpu, cpu); 111 - BUG(); 112 - } 113 - #endif 114 - 115 - printk(KERN_NOTICE "CPU%u: shutdown\n", cpu); 116 - complete(&cpu_killed); 111 + int spurious = 0; 117 112 118 113 /* 119 114 * we're ready for shutdown now, so do it 120 115 */ 121 116 cpu_enter_lowpower(); 122 - platform_do_lowpower(cpu); 117 + platform_do_lowpower(cpu, &spurious); 123 118 124 119 /* 125 120 * bring this CPU back into the world of cache 126 121 * coherency, and then restore interrupts 127 122 */ 128 123 cpu_leave_lowpower(); 124 + 125 + if (spurious) 126 + pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); 129 127 } 130 128 131 129 int platform_cpu_disable(unsigned int cpu)
+2 -3
arch/arm/mach-s5pv310/include/mach/smp.h
··· 7 7 #define ASM_ARCH_SMP_H __FILE__ 8 8 9 9 #include <asm/hardware/gic.h> 10 - #include <asm/smp_mpidr.h> 11 10 12 11 /* 13 12 * We use IRQ1 as the IPI 14 13 */ 15 - static inline void smp_cross_call(const struct cpumask *mask) 14 + static inline void smp_cross_call(const struct cpumask *mask, int ipi) 16 15 { 17 - gic_raise_softirq(mask, 1); 16 + gic_raise_softirq(mask, ipi); 18 17 } 19 18 20 19 #endif
+23 -43
arch/arm/mach-s5pv310/platsmp.c
··· 22 22 #include <linux/io.h> 23 23 24 24 #include <asm/cacheflush.h> 25 - #include <asm/localtimer.h> 26 25 #include <asm/smp_scu.h> 27 26 #include <asm/unified.h> 28 27 ··· 37 38 38 39 volatile int __cpuinitdata pen_release = -1; 39 40 41 + /* 42 + * Write pen_release in a way that is guaranteed to be visible to all 43 + * observers, irrespective of whether they're taking part in coherency 44 + * or not. This is necessary for the hotplug code to work reliably. 45 + */ 46 + static void write_pen_release(int val) 47 + { 48 + pen_release = val; 49 + smp_wmb(); 50 + __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); 51 + outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); 52 + } 53 + 40 54 static void __iomem *scu_base_addr(void) 41 55 { 42 56 return (void __iomem *)(S5P_VA_SCU); ··· 59 47 60 48 void __cpuinit platform_secondary_init(unsigned int cpu) 61 49 { 62 - trace_hardirqs_off(); 63 - 64 50 /* 65 51 * if any interrupts are already enabled for the primary 66 52 * core (e.g. timer irq), then they will not have been enabled ··· 70 60 * let the primary processor know we're out of the 71 61 * pen, then head off into the C entry point 72 62 */ 73 - pen_release = -1; 74 - smp_wmb(); 63 + write_pen_release(-1); 75 64 76 65 /* 77 66 * Synchronise with the boot thread. ··· 97 88 * Note that "pen_release" is the hardware CPU ID, whereas 98 89 * "cpu" is Linux's internal ID. 99 90 */ 100 - pen_release = cpu; 101 - __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); 102 - outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); 91 + write_pen_release(cpu); 103 92 104 93 /* 105 94 * Send the secondary CPU a soft interrupt, thereby causing 106 95 * the boot monitor to read the system wide flags register, 107 96 * and branch to the address found there. 108 97 */ 109 - smp_cross_call(cpumask_of(cpu)); 98 + smp_cross_call(cpumask_of(cpu), 1); 110 99 111 100 timeout = jiffies + (1 * HZ); 112 101 while (time_before(jiffies, timeout)) { ··· 137 130 ncores = scu_base ? scu_get_core_count(scu_base) : 1; 138 131 139 132 /* sanity check */ 140 - if (ncores == 0) { 141 - printk(KERN_ERR 142 - "S5PV310: strange CM count of 0? Default to 1\n"); 143 - 144 - ncores = 1; 145 - } 146 - 147 133 if (ncores > NR_CPUS) { 148 134 printk(KERN_WARNING 149 135 "S5PV310: no. of cores (%d) greater than configured " ··· 149 149 set_cpu_possible(i, true); 150 150 } 151 151 152 - void __init smp_prepare_cpus(unsigned int max_cpus) 152 + void __init platform_smp_prepare_cpus(unsigned int max_cpus) 153 153 { 154 - unsigned int ncores = num_possible_cpus(); 155 - unsigned int cpu = smp_processor_id(); 156 154 int i; 157 - 158 - smp_store_cpu_info(cpu); 159 - 160 - /* are we trying to boot more cores than exist? */ 161 - if (max_cpus > ncores) 162 - max_cpus = ncores; 163 155 164 156 /* 165 157 * Initialise the present map, which describes the set of CPUs ··· 160 168 for (i = 0; i < max_cpus; i++) 161 169 set_cpu_present(i, true); 162 170 171 + scu_enable(scu_base_addr()); 172 + 163 173 /* 164 - * Initialise the SCU if there are more than one CPU and let 165 - * them know where to start. 174 + * Write the address of secondary startup into the 175 + * system-wide flags register. The boot monitor waits 176 + * until it receives a soft interrupt, and then the 177 + * secondary CPU branches to this address. 166 178 */ 167 - if (max_cpus > 1) { 168 - /* 169 - * Enable the local timer or broadcast device for the 170 - * boot CPU, but only if we have more than one CPU. 171 - */ 172 - percpu_timer_setup(); 173 - 174 - scu_enable(scu_base_addr()); 175 - 176 - /* 177 - * Write the address of secondary startup into the 178 - * system-wide flags register. The boot monitor waits 179 - * until it receives a soft interrupt, and then the 180 - * secondary CPU branches to this address. 181 - */ 182 179 __raw_writel(BSYM(virt_to_phys(s5pv310_secondary_startup)), S5P_VA_SYSRAM); 183 - } 184 180 }
+3 -3
arch/arm/mach-shmobile/Kconfig
··· 6 6 bool "SH-Mobile G3 (SH7367)" 7 7 select CPU_V6 8 8 select HAVE_CLK 9 - select COMMON_CLKDEV 9 + select CLKDEV_LOOKUP 10 10 select SH_CLK_CPG 11 11 select GENERIC_CLOCKEVENTS 12 12 ··· 14 14 bool "SH-Mobile G4 (SH7377)" 15 15 select CPU_V7 16 16 select HAVE_CLK 17 - select COMMON_CLKDEV 17 + select CLKDEV_LOOKUP 18 18 select SH_CLK_CPG 19 19 select GENERIC_CLOCKEVENTS 20 20 ··· 22 22 bool "SH-Mobile AP4 (SH7372)" 23 23 select CPU_V7 24 24 select HAVE_CLK 25 - select COMMON_CLKDEV 25 + select CLKDEV_LOOKUP 26 26 select SH_CLK_CPG 27 27 select GENERIC_CLOCKEVENTS 28 28
+1 -1
arch/arm/mach-shmobile/clock-sh7367.c
··· 20 20 #include <linux/kernel.h> 21 21 #include <linux/io.h> 22 22 #include <linux/sh_clk.h> 23 + #include <linux/clkdev.h> 23 24 #include <mach/common.h> 24 - #include <asm/clkdev.h> 25 25 26 26 /* SH7367 registers */ 27 27 #define RTFRQCR 0xe6150000
+1 -1
arch/arm/mach-shmobile/clock-sh7372.c
··· 20 20 #include <linux/kernel.h> 21 21 #include <linux/io.h> 22 22 #include <linux/sh_clk.h> 23 + #include <linux/clkdev.h> 23 24 #include <mach/common.h> 24 - #include <asm/clkdev.h> 25 25 26 26 /* SH7372 registers */ 27 27 #define FRQCRA 0xe6150000
+1 -1
arch/arm/mach-shmobile/clock-sh7377.c
··· 20 20 #include <linux/kernel.h> 21 21 #include <linux/io.h> 22 22 #include <linux/sh_clk.h> 23 + #include <linux/clkdev.h> 23 24 #include <mach/common.h> 24 - #include <asm/clkdev.h> 25 25 26 26 /* SH7377 registers */ 27 27 #define RTFRQCR 0xe6150000
+1 -2
arch/arm/mach-tcc8k/clock.c
··· 12 12 #include <linux/io.h> 13 13 #include <linux/module.h> 14 14 #include <linux/spinlock.h> 15 - 16 - #include <asm/clkdev.h> 15 + #include <linux/clkdev.h> 17 16 18 17 #include <mach/clock.h> 19 18 #include <mach/irqs.h>
+1 -1
arch/arm/mach-tegra/clock.c
··· 25 25 #include <linux/slab.h> 26 26 #include <linux/seq_file.h> 27 27 #include <linux/regulator/consumer.h> 28 - #include <asm/clkdev.h> 28 + #include <linux/clkdev.h> 29 29 30 30 #include "clock.h" 31 31 #include "board.h"
+1 -1
arch/arm/mach-tegra/clock.h
··· 21 21 #define __MACH_TEGRA_CLOCK_H 22 22 23 23 #include <linux/list.h> 24 - #include <asm/clkdev.h> 24 + #include <linux/clkdev.h> 25 25 26 26 #define DIV_BUS (1 << 0) 27 27 #define DIV_U71 (1 << 1)
+15 -29
arch/arm/mach-tegra/hotplug.c
··· 11 11 #include <linux/kernel.h> 12 12 #include <linux/errno.h> 13 13 #include <linux/smp.h> 14 - #include <linux/completion.h> 15 14 16 15 #include <asm/cacheflush.h> 17 - 18 - static DECLARE_COMPLETION(cpu_killed); 19 16 20 17 static inline void cpu_enter_lowpower(void) 21 18 { ··· 26 29 * Turn off coherency 27 30 */ 28 31 " mrc p15, 0, %0, c1, c0, 1\n" 29 - " bic %0, %0, #0x20\n" 32 + " bic %0, %0, %2\n" 30 33 " mcr p15, 0, %0, c1, c0, 1\n" 31 34 " mrc p15, 0, %0, c1, c0, 0\n" 32 35 " bic %0, %0, #0x04\n" 33 36 " mcr p15, 0, %0, c1, c0, 0\n" 34 37 : "=&r" (v) 35 - : "r" (0) 38 + : "r" (0), "Ir" (CR_C) 36 39 : "cc"); 37 40 } 38 41 ··· 42 45 43 46 asm volatile( 44 47 "mrc p15, 0, %0, c1, c0, 0\n" 45 - " orr %0, %0, #0x04\n" 48 + " orr %0, %0, %1\n" 46 49 " mcr p15, 0, %0, c1, c0, 0\n" 47 50 " mrc p15, 0, %0, c1, c0, 1\n" 48 51 " orr %0, %0, #0x20\n" 49 52 " mcr p15, 0, %0, c1, c0, 1\n" 50 53 : "=&r" (v) 51 - : 54 + : "Ir" (CR_C) 52 55 : "cc"); 53 56 } 54 57 55 - static inline void platform_do_lowpower(unsigned int cpu) 58 + static inline void platform_do_lowpower(unsigned int cpu, int *spurious) 56 59 { 57 60 /* 58 61 * there is no power-control hardware on this platform, so all ··· 76 79 /*}*/ 77 80 78 81 /* 79 - * getting here, means that we have come out of WFI without 82 + * Getting here, means that we have come out of WFI without 80 83 * having been woken up - this shouldn't happen 81 84 * 82 - * The trouble is, letting people know about this is not really 83 - * possible, since we are currently running incoherently, and 84 - * therefore cannot safely call printk() or anything else 85 + * Just note it happening - when we're woken, we can report 86 + * its occurrence. 85 87 */ 86 - #ifdef DEBUG 87 - printk(KERN_WARN "CPU%u: spurious wakeup call\n", cpu); 88 - #endif 88 + (*spurious)++; 89 89 } 90 90 } 91 91 92 92 int platform_cpu_kill(unsigned int cpu) 93 93 { 94 - return wait_for_completion_timeout(&cpu_killed, 5000); 94 + return 1; 95 95 } 96 96 97 97 /* ··· 98 104 */ 99 105 void platform_cpu_die(unsigned int cpu) 100 106 { 101 - #ifdef DEBUG 102 - unsigned int this_cpu = hard_smp_processor_id(); 103 - 104 - if (cpu != this_cpu) { 105 - printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n", 106 - this_cpu, cpu); 107 - BUG(); 108 - } 109 - #endif 110 - 111 - printk(KERN_NOTICE "CPU%u: shutdown\n", cpu); 112 - complete(&cpu_killed); 107 + int spurious = 0; 113 108 114 109 /* 115 110 * we're ready for shutdown now, so do it 116 111 */ 117 112 cpu_enter_lowpower(); 118 - platform_do_lowpower(cpu); 113 + platform_do_lowpower(cpu, &spurious); 119 114 120 115 /* 121 116 * bring this CPU back into the world of cache 122 117 * coherency, and then restore interrupts 123 118 */ 124 119 cpu_leave_lowpower(); 120 + 121 + if (spurious) 122 + pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); 125 123 } 126 124 127 125 int platform_cpu_disable(unsigned int cpu)
+2 -10
arch/arm/mach-tegra/include/mach/smp.h
··· 2 2 #define ASMARM_ARCH_SMP_H 3 3 4 4 #include <asm/hardware/gic.h> 5 - #include <asm/smp_mpidr.h> 6 5 7 6 /* 8 7 * We use IRQ1 as the IPI 9 8 */ 10 - static inline void smp_cross_call(const struct cpumask *mask) 9 + static inline void smp_cross_call(const struct cpumask *mask, int ipi) 11 10 { 12 - gic_raise_softirq(mask, 1); 13 - } 14 - 15 - /* 16 - * Do nothing on MPcore. 17 - */ 18 - static inline void smp_cross_call_done(cpumask_t callmap) 19 - { 11 + gic_raise_softirq(mask, ipi); 20 12 } 21 13 22 14 #endif
+8 -25
arch/arm/mach-tegra/platsmp.c
··· 22 22 #include <asm/cacheflush.h> 23 23 #include <mach/hardware.h> 24 24 #include <asm/mach-types.h> 25 - #include <asm/localtimer.h> 26 25 #include <asm/smp_scu.h> 27 26 28 27 #include <mach/iomap.h> ··· 40 41 41 42 void __cpuinit platform_secondary_init(unsigned int cpu) 42 43 { 43 - trace_hardirqs_off(); 44 - 45 44 /* 46 45 * if any interrupts are already enabled for the primary 47 46 * core (e.g. timer irq), then they will not have been enabled ··· 114 117 { 115 118 unsigned int i, ncores = scu_get_core_count(scu_base); 116 119 120 + if (ncores > NR_CPUS) { 121 + printk(KERN_ERR "Tegra: no. of cores (%u) greater than configured (%u), clipping\n", 122 + ncores, NR_CPUS); 123 + ncores = NR_CPUS; 124 + } 125 + 117 126 for (i = 0; i < ncores; i++) 118 127 cpu_set(i, cpu_possible_map); 119 128 } 120 129 121 - void __init smp_prepare_cpus(unsigned int max_cpus) 130 + void __init platform_smp_prepare_cpus(unsigned int max_cpus) 122 131 { 123 - unsigned int ncores = scu_get_core_count(scu_base); 124 - unsigned int cpu = smp_processor_id(); 125 132 int i; 126 - 127 - smp_store_cpu_info(cpu); 128 - 129 - /* 130 - * are we trying to boot more cores than exist? 131 - */ 132 - if (max_cpus > ncores) 133 - max_cpus = ncores; 134 133 135 134 /* 136 135 * Initialise the present map, which describes the set of CPUs ··· 135 142 for (i = 0; i < max_cpus; i++) 136 143 set_cpu_present(i, true); 137 144 138 - /* 139 - * Initialise the SCU if there are more than one CPU and let 140 - * them know where to start. Note that, on modern versions of 141 - * MILO, the "poke" doesn't actually do anything until each 142 - * individual core is sent a soft interrupt to get it out of 143 - * WFI 144 - */ 145 - if (max_cpus > 1) { 146 - percpu_timer_setup(); 147 - scu_enable(scu_base); 148 - } 145 + scu_enable(scu_base); 149 146 }
+1 -2
arch/arm/mach-tegra/tegra2_clocks.c
··· 24 24 #include <linux/delay.h> 25 25 #include <linux/io.h> 26 26 #include <linux/hrtimer.h> 27 - 28 - #include <asm/clkdev.h> 27 + #include <linux/clkdev.h> 29 28 30 29 #include <mach/iomap.h> 31 30
+1 -1
arch/arm/mach-u300/clock.c
··· 25 25 #include <linux/timer.h> 26 26 #include <linux/io.h> 27 27 #include <linux/seq_file.h> 28 + #include <linux/clkdev.h> 28 29 29 - #include <asm/clkdev.h> 30 30 #include <mach/hardware.h> 31 31 #include <mach/syscon.h> 32 32
+1 -2
arch/arm/mach-ux500/clock.c
··· 13 13 #include <linux/err.h> 14 14 #include <linux/clk.h> 15 15 #include <linux/io.h> 16 - 17 - #include <asm/clkdev.h> 16 + #include <linux/clkdev.h> 18 17 19 18 #include <plat/mtu.h> 20 19 #include <mach/hardware.h>
-1
arch/arm/mach-ux500/headsmp.S
··· 23 23 ldmia r4, {r5, r6} 24 24 sub r4, r4, r5 25 25 add r6, r6, r4 26 - dsb 27 26 pen: ldr r7, [r6] 28 27 cmp r7, r0 29 28 bne pen
+1 -17
arch/arm/mach-ux500/hotplug.c
··· 11 11 #include <linux/kernel.h> 12 12 #include <linux/errno.h> 13 13 #include <linux/smp.h> 14 - #include <linux/completion.h> 15 14 16 15 #include <asm/cacheflush.h> 17 16 18 17 extern volatile int pen_release; 19 - 20 - static DECLARE_COMPLETION(cpu_killed); 21 18 22 19 static inline void platform_do_lowpower(unsigned int cpu) 23 20 { ··· 35 38 36 39 int platform_cpu_kill(unsigned int cpu) 37 40 { 38 - return wait_for_completion_timeout(&cpu_killed, 5000); 41 + return 1; 39 42 } 40 43 41 44 /* ··· 45 48 */ 46 49 void platform_cpu_die(unsigned int cpu) 47 50 { 48 - #ifdef DEBUG 49 - unsigned int this_cpu = hard_smp_processor_id(); 50 - 51 - if (cpu != this_cpu) { 52 - printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n", 53 - this_cpu, cpu); 54 - BUG(); 55 - } 56 - #endif 57 - 58 - printk(KERN_NOTICE "CPU%u: shutdown\n", cpu); 59 - complete(&cpu_killed); 60 - 61 51 /* directly enter low power state, skipping secure registers */ 62 52 platform_do_lowpower(cpu); 63 53 }
+2 -3
arch/arm/mach-ux500/include/mach/smp.h
··· 10 10 #define ASMARM_ARCH_SMP_H 11 11 12 12 #include <asm/hardware/gic.h> 13 - #include <asm/smp_mpidr.h> 14 13 15 14 /* This is required to wakeup the secondary core */ 16 15 extern void u8500_secondary_startup(void); ··· 17 18 /* 18 19 * We use IRQ1 as the IPI 19 20 */ 20 - static inline void smp_cross_call(const struct cpumask *mask) 21 + static inline void smp_cross_call(const struct cpumask *mask, int ipi) 21 22 { 22 - gic_raise_softirq(mask, 1); 23 + gic_raise_softirq(mask, ipi); 23 24 } 24 25 #endif
+28 -49
arch/arm/mach-ux500/platsmp.c
··· 18 18 #include <linux/io.h> 19 19 20 20 #include <asm/cacheflush.h> 21 - #include <asm/localtimer.h> 22 21 #include <asm/smp_scu.h> 23 22 #include <mach/hardware.h> 24 23 ··· 27 28 */ 28 29 volatile int __cpuinitdata pen_release = -1; 29 30 30 - static unsigned int __init get_core_count(void) 31 + /* 32 + * Write pen_release in a way that is guaranteed to be visible to all 33 + * observers, irrespective of whether they're taking part in coherency 34 + * or not. This is necessary for the hotplug code to work reliably. 35 + */ 36 + static void write_pen_release(int val) 31 37 { 32 - return scu_get_core_count(__io_address(UX500_SCU_BASE)); 38 + pen_release = val; 39 + smp_wmb(); 40 + __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); 41 + outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); 33 42 } 34 43 35 44 static DEFINE_SPINLOCK(boot_lock); 36 45 37 46 void __cpuinit platform_secondary_init(unsigned int cpu) 38 47 { 39 - trace_hardirqs_off(); 40 - 41 48 /* 42 49 * if any interrupts are already enabled for the primary 43 50 * core (e.g. timer irq), then they will not have been enabled ··· 55 50 * let the primary processor know we're out of the 56 51 * pen, then head off into the C entry point 57 52 */ 58 - pen_release = -1; 53 + write_pen_release(-1); 59 54 60 55 /* 61 56 * Synchronise with the boot thread. ··· 79 74 * the holding pen - release it, then wait for it to flag 80 75 * that it has been released by resetting pen_release. 81 76 */ 82 - pen_release = cpu; 83 - __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); 84 - outer_clean_range(__pa(&pen_release), __pa(&pen_release) + 1); 77 + write_pen_release(cpu); 85 78 86 - smp_cross_call(cpumask_of(cpu)); 79 + smp_cross_call(cpumask_of(cpu), 1); 87 80 88 81 timeout = jiffies + (1 * HZ); 89 82 while (time_before(jiffies, timeout)) { ··· 100 97 101 98 static void __init wakeup_secondary(void) 102 99 { 103 - /* nobody is to be released from the pen yet */ 104 - pen_release = -1; 105 - 106 100 /* 107 101 * write the address of secondary startup into the backup ram register 108 102 * at offset 0x1FF4, then write the magic number 0xA1FEED01 to the ··· 126 126 */ 127 127 void __init smp_init_cpus(void) 128 128 { 129 - unsigned int i, ncores = get_core_count(); 129 + unsigned int i, ncores; 130 + 131 + ncores = scu_get_core_count(__io_address(UX500_SCU_BASE)); 132 + 133 + /* sanity check */ 134 + if (ncores > NR_CPUS) { 135 + printk(KERN_WARNING 136 + "U8500: no. of cores (%d) greater than configured " 137 + "maximum of %d - clipping\n", 138 + ncores, NR_CPUS); 139 + ncores = NR_CPUS; 140 + } 130 141 131 142 for (i = 0; i < ncores; i++) 132 143 set_cpu_possible(i, true); 133 144 } 134 145 135 - void __init smp_prepare_cpus(unsigned int max_cpus) 146 + void __init platform_smp_prepare_cpus(unsigned int max_cpus) 136 147 { 137 - unsigned int ncores = get_core_count(); 138 - unsigned int cpu = smp_processor_id(); 139 148 int i; 140 - 141 - /* sanity check */ 142 - if (ncores == 0) { 143 - printk(KERN_ERR 144 - "U8500: strange CM count of 0? Default to 1\n"); 145 - ncores = 1; 146 - } 147 - 148 - if (ncores > num_possible_cpus()) { 149 - printk(KERN_WARNING 150 - "U8500: no. of cores (%d) greater than configured " 151 - "maximum of %d - clipping\n", 152 - ncores, num_possible_cpus()); 153 - ncores = num_possible_cpus(); 154 - } 155 - 156 - smp_store_cpu_info(cpu); 157 - 158 - /* 159 - * are we trying to boot more cores than exist? 160 - */ 161 - if (max_cpus > ncores) 162 - max_cpus = ncores; 163 149 164 150 /* 165 151 * Initialise the present map, which describes the set of CPUs ··· 154 168 for (i = 0; i < max_cpus; i++) 155 169 set_cpu_present(i, true); 156 170 157 - if (max_cpus > 1) { 158 - /* 159 - * Enable the local timer or broadcast device for the 160 - * boot CPU, but only if we have more than one CPU. 161 - */ 162 - percpu_timer_setup(); 163 - scu_enable(__io_address(UX500_SCU_BASE)); 164 - wakeup_secondary(); 165 - } 171 + scu_enable(__io_address(UX500_SCU_BASE)); 172 + wakeup_secondary(); 166 173 }
+1
arch/arm/mach-versatile/Kconfig
··· 4 4 config ARCH_VERSATILE_PB 5 5 bool "Support Versatile/PB platform" 6 6 select CPU_ARM926T 7 + select MIGHT_HAVE_PCI 7 8 default y 8 9 help 9 10 Include support for the ARM(R) Versatile/PB platform.
+1 -2
arch/arm/mach-versatile/core.c
··· 31 31 #include <linux/amba/pl022.h> 32 32 #include <linux/io.h> 33 33 #include <linux/gfp.h> 34 + #include <linux/clkdev.h> 34 35 35 - #include <asm/clkdev.h> 36 36 #include <asm/system.h> 37 37 #include <asm/irq.h> 38 38 #include <asm/leds.h> ··· 46 46 #include <asm/mach/irq.h> 47 47 #include <asm/mach/time.h> 48 48 #include <asm/mach/map.h> 49 - #include <mach/clkdev.h> 50 49 #include <mach/hardware.h> 51 50 #include <mach/platform.h> 52 51 #include <asm/hardware/timer-sp.h>
+1
arch/arm/mach-vexpress/Makefile
··· 5 5 obj-y := v2m.o 6 6 obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o 7 7 obj-$(CONFIG_SMP) += platsmp.o headsmp.o 8 + obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o 8 9 obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
+1 -2
arch/arm/mach-vexpress/ct-ca9x4.c
··· 8 8 #include <linux/platform_device.h> 9 9 #include <linux/amba/bus.h> 10 10 #include <linux/amba/clcd.h> 11 + #include <linux/clkdev.h> 11 12 12 - #include <asm/clkdev.h> 13 13 #include <asm/pgtable.h> 14 14 #include <asm/hardware/arm_timer.h> 15 15 #include <asm/hardware/cache-l2x0.h> ··· 18 18 #include <asm/pmu.h> 19 19 #include <asm/smp_twd.h> 20 20 21 - #include <mach/clkdev.h> 22 21 #include <mach/ct-ca9x4.h> 23 22 24 23 #include <asm/hardware/timer-sp.h>
+128
arch/arm/mach-vexpress/hotplug.c
··· 1 + /* 2 + * linux/arch/arm/mach-realview/hotplug.c 3 + * 4 + * Copyright (C) 2002 ARM Ltd. 5 + * All Rights Reserved 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + */ 11 + #include <linux/kernel.h> 12 + #include <linux/errno.h> 13 + #include <linux/smp.h> 14 + 15 + #include <asm/cacheflush.h> 16 + 17 + extern volatile int pen_release; 18 + 19 + static inline void cpu_enter_lowpower(void) 20 + { 21 + unsigned int v; 22 + 23 + flush_cache_all(); 24 + asm volatile( 25 + "mcr p15, 0, %1, c7, c5, 0\n" 26 + " mcr p15, 0, %1, c7, c10, 4\n" 27 + /* 28 + * Turn off coherency 29 + */ 30 + " mrc p15, 0, %0, c1, c0, 1\n" 31 + " bic %0, %0, %3\n" 32 + " mcr p15, 0, %0, c1, c0, 1\n" 33 + " mrc p15, 0, %0, c1, c0, 0\n" 34 + " bic %0, %0, %2\n" 35 + " mcr p15, 0, %0, c1, c0, 0\n" 36 + : "=&r" (v) 37 + : "r" (0), "Ir" (CR_C), "Ir" (0x40) 38 + : "cc"); 39 + } 40 + 41 + static inline void cpu_leave_lowpower(void) 42 + { 43 + unsigned int v; 44 + 45 + asm volatile( 46 + "mrc p15, 0, %0, c1, c0, 0\n" 47 + " orr %0, %0, %1\n" 48 + " mcr p15, 0, %0, c1, c0, 0\n" 49 + " mrc p15, 0, %0, c1, c0, 1\n" 50 + " orr %0, %0, %2\n" 51 + " mcr p15, 0, %0, c1, c0, 1\n" 52 + : "=&r" (v) 53 + : "Ir" (CR_C), "Ir" (0x40) 54 + : "cc"); 55 + } 56 + 57 + static inline void platform_do_lowpower(unsigned int cpu, int *spurious) 58 + { 59 + /* 60 + * there is no power-control hardware on this platform, so all 61 + * we can do is put the core into WFI; this is safe as the calling 62 + * code will have already disabled interrupts 63 + */ 64 + for (;;) { 65 + /* 66 + * here's the WFI 67 + */ 68 + asm(".word 0xe320f003\n" 69 + : 70 + : 71 + : "memory", "cc"); 72 + 73 + if (pen_release == cpu) { 74 + /* 75 + * OK, proper wakeup, we're done 76 + */ 77 + break; 78 + } 79 + 80 + /* 81 + * Getting here, means that we have come out of WFI without 82 + * having been woken up - this shouldn't happen 83 + * 84 + * Just note it happening - when we're woken, we can report 85 + * its occurrence. 86 + */ 87 + (*spurious)++; 88 + } 89 + } 90 + 91 + int platform_cpu_kill(unsigned int cpu) 92 + { 93 + return 1; 94 + } 95 + 96 + /* 97 + * platform-specific code to shutdown a CPU 98 + * 99 + * Called with IRQs disabled 100 + */ 101 + void platform_cpu_die(unsigned int cpu) 102 + { 103 + int spurious = 0; 104 + 105 + /* 106 + * we're ready for shutdown now, so do it 107 + */ 108 + cpu_enter_lowpower(); 109 + platform_do_lowpower(cpu, &spurious); 110 + 111 + /* 112 + * bring this CPU back into the world of cache 113 + * coherency, and then restore interrupts 114 + */ 115 + cpu_leave_lowpower(); 116 + 117 + if (spurious) 118 + pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); 119 + } 120 + 121 + int platform_cpu_disable(unsigned int cpu) 122 + { 123 + /* 124 + * we don't allow CPU 0 to be shutdown (it is still too special 125 + * e.g. clock tick interrupts) 126 + */ 127 + return cpu == 0 ? -EPERM : 0; 128 + }
+2 -3
arch/arm/mach-vexpress/include/mach/smp.h
··· 2 2 #define __MACH_SMP_H 3 3 4 4 #include <asm/hardware/gic.h> 5 - #include <asm/smp_mpidr.h> 6 5 7 6 /* 8 7 * We use IRQ1 as the IPI 9 8 */ 10 - static inline void smp_cross_call(const struct cpumask *mask) 9 + static inline void smp_cross_call(const struct cpumask *mask, int ipi) 11 10 { 12 - gic_raise_softirq(mask, 1); 11 + gic_raise_softirq(mask, ipi); 13 12 } 14 13 #endif
+26 -48
arch/arm/mach-vexpress/platsmp.c
··· 17 17 #include <linux/io.h> 18 18 19 19 #include <asm/cacheflush.h> 20 - #include <asm/localtimer.h> 21 20 #include <asm/smp_scu.h> 22 21 #include <asm/unified.h> 23 22 ··· 34 35 */ 35 36 volatile int __cpuinitdata pen_release = -1; 36 37 38 + /* 39 + * Write pen_release in a way that is guaranteed to be visible to all 40 + * observers, irrespective of whether they're taking part in coherency 41 + * or not. This is necessary for the hotplug code to work reliably. 42 + */ 43 + static void write_pen_release(int val) 44 + { 45 + pen_release = val; 46 + smp_wmb(); 47 + __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); 48 + outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); 49 + } 50 + 37 51 static void __iomem *scu_base_addr(void) 38 52 { 39 53 return MMIO_P2V(A9_MPCORE_SCU); ··· 56 44 57 45 void __cpuinit platform_secondary_init(unsigned int cpu) 58 46 { 59 - trace_hardirqs_off(); 60 - 61 47 /* 62 48 * if any interrupts are already enabled for the primary 63 49 * core (e.g. timer irq), then they will not have been enabled ··· 67 57 * let the primary processor know we're out of the 68 58 * pen, then head off into the C entry point 69 59 */ 70 - pen_release = -1; 71 - smp_wmb(); 60 + write_pen_release(-1); 72 61 73 62 /* 74 63 * Synchronise with the boot thread. ··· 92 83 * since we haven't sent them a soft interrupt, they shouldn't 93 84 * be there. 94 85 */ 95 - pen_release = cpu; 96 - __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); 97 - outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); 86 + write_pen_release(cpu); 98 87 99 88 /* 100 89 * Send the secondary CPU a soft interrupt, thereby causing 101 90 * the boot monitor to read the system wide flags register, 102 91 * and branch to the address found there. 103 92 */ 104 - smp_cross_call(cpumask_of(cpu)); 93 + smp_cross_call(cpumask_of(cpu), 1); 105 94 106 95 timeout = jiffies + (1 * HZ); 107 96 while (time_before(jiffies, timeout)) { ··· 131 124 ncores = scu_base ? scu_get_core_count(scu_base) : 1; 132 125 133 126 /* sanity check */ 134 - if (ncores == 0) { 135 - printk(KERN_ERR 136 - "vexpress: strange CM count of 0? Default to 1\n"); 137 - 138 - ncores = 1; 139 - } 140 - 141 127 if (ncores > NR_CPUS) { 142 128 printk(KERN_WARNING 143 129 "vexpress: no. of cores (%d) greater than configured " ··· 143 143 set_cpu_possible(i, true); 144 144 } 145 145 146 - void __init smp_prepare_cpus(unsigned int max_cpus) 146 + void __init platform_smp_prepare_cpus(unsigned int max_cpus) 147 147 { 148 - unsigned int ncores = num_possible_cpus(); 149 - unsigned int cpu = smp_processor_id(); 150 148 int i; 151 - 152 - smp_store_cpu_info(cpu); 153 - 154 - /* 155 - * are we trying to boot more cores than exist? 156 - */ 157 - if (max_cpus > ncores) 158 - max_cpus = ncores; 159 149 160 150 /* 161 151 * Initialise the present map, which describes the set of CPUs ··· 154 164 for (i = 0; i < max_cpus; i++) 155 165 set_cpu_present(i, true); 156 166 167 + scu_enable(scu_base_addr()); 168 + 157 169 /* 158 - * Initialise the SCU if there are more than one CPU and let 159 - * them know where to start. 170 + * Write the address of secondary startup into the 171 + * system-wide flags register. The boot monitor waits 172 + * until it receives a soft interrupt, and then the 173 + * secondary CPU branches to this address. 160 174 */ 161 - if (max_cpus > 1) { 162 - /* 163 - * Enable the local timer or broadcast device for the 164 - * boot CPU, but only if we have more than one CPU. 165 - */ 166 - percpu_timer_setup(); 167 - 168 - scu_enable(scu_base_addr()); 169 - 170 - /* 171 - * Write the address of secondary startup into the 172 - * system-wide flags register. The boot monitor waits 173 - * until it receives a soft interrupt, and then the 174 - * secondary CPU branches to this address. 175 - */ 176 - writel(~0, MMIO_P2V(V2M_SYS_FLAGSCLR)); 177 - writel(BSYM(virt_to_phys(vexpress_secondary_startup)), 178 - MMIO_P2V(V2M_SYS_FLAGSSET)); 179 - } 175 + writel(~0, MMIO_P2V(V2M_SYS_FLAGSCLR)); 176 + writel(BSYM(virt_to_phys(vexpress_secondary_startup)), 177 + MMIO_P2V(V2M_SYS_FLAGSSET)); 180 178 }
+1 -2
arch/arm/mach-vexpress/v2m.c
··· 11 11 #include <linux/spinlock.h> 12 12 #include <linux/sysdev.h> 13 13 #include <linux/usb/isp1760.h> 14 + #include <linux/clkdev.h> 14 15 15 - #include <asm/clkdev.h> 16 16 #include <asm/sizes.h> 17 17 #include <asm/mach/flash.h> 18 18 #include <asm/mach/map.h> ··· 20 20 #include <asm/hardware/arm_timer.h> 21 21 #include <asm/hardware/timer-sp.h> 22 22 23 - #include <mach/clkdev.h> 24 23 #include <mach/motherboard.h> 25 24 26 25 #include <plat/sched_clock.h>
+1 -1
arch/arm/mach-w90x900/clock.h
··· 10 10 * the Free Software Foundation; either version 2 of the License. 11 11 */ 12 12 13 - #include <asm/clkdev.h> 13 + #include <linux/clkdev.h> 14 14 15 15 void nuc900_clk_enable(struct clk *clk, int enable); 16 16 void nuc900_subclk_enable(struct clk *clk, int enable);
+35
arch/arm/mm/Kconfig
··· 599 599 help 600 600 Processor has the CP15 register, which has MPU related registers. 601 601 602 + config CPU_USE_DOMAINS 603 + bool 604 + depends on MMU 605 + default y if !CPU_32v6K 606 + help 607 + This option enables or disables the use of domain switching 608 + via the set_fs() function. 609 + 602 610 # 603 611 # CPU supports 36-bit I/O 604 612 # ··· 635 627 help 636 628 Say Y here if you have a CPU with the ThumbEE extension and code to 637 629 make use of it. Say N for code that can run on CPUs without ThumbEE. 630 + 631 + config SWP_EMULATE 632 + bool "Emulate SWP/SWPB instructions" 633 + depends on CPU_V7 634 + select HAVE_PROC_CPU if PROC_FS 635 + default y if SMP 636 + help 637 + ARMv6 architecture deprecates use of the SWP/SWPB instructions. 638 + ARMv7 multiprocessing extensions introduce the ability to disable 639 + these instructions, triggering an undefined instruction exception 640 + when executed. Say Y here to enable software emulation of these 641 + instructions for userspace (not kernel) using LDREX/STREX. 642 + Also creates /proc/cpu/swp_emulation for statistics. 643 + 644 + In some older versions of glibc [<=2.8] SWP is used during futex 645 + trylock() operations with the assumption that the code will not 646 + be preempted. This invalid assumption may be more likely to fail 647 + with SWP emulation enabled, leading to deadlock of the user 648 + application. 649 + 650 + NOTE: when accessing uncached shared regions, LDREX/STREX rely 651 + on an external transaction monitoring block called a global 652 + monitor to maintain update atomicity. If your system does not 653 + implement a global monitor, this option can cause programs that 654 + perform SWP operations to uncached memory to deadlock. 655 + 656 + If unsure, say Y. 638 657 639 658 config CPU_BIG_ENDIAN 640 659 bool "Build big-endian kernel"
+23 -5
arch/arm/mm/dma-mapping.c
··· 312 312 addr = page_address(page); 313 313 314 314 if (addr) 315 - *handle = page_to_dma(dev, page); 315 + *handle = pfn_to_dma(dev, page_to_pfn(page)); 316 316 317 317 return addr; 318 318 } ··· 407 407 if (!arch_is_coherent()) 408 408 __dma_free_remap(cpu_addr, size); 409 409 410 - __dma_free_buffer(dma_to_page(dev, handle), size); 410 + __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size); 411 411 } 412 412 EXPORT_SYMBOL(dma_free_coherent); 413 413 ··· 555 555 struct scatterlist *s; 556 556 int i, j; 557 557 558 + BUG_ON(!valid_dma_direction(dir)); 559 + 558 560 for_each_sg(sg, s, nents, i) { 559 - s->dma_address = dma_map_page(dev, sg_page(s), s->offset, 561 + s->dma_address = __dma_map_page(dev, sg_page(s), s->offset, 560 562 s->length, dir); 561 563 if (dma_mapping_error(dev, s->dma_address)) 562 564 goto bad_mapping; 563 565 } 566 + debug_dma_map_sg(dev, sg, nents, nents, dir); 564 567 return nents; 565 568 566 569 bad_mapping: 567 570 for_each_sg(sg, s, i, j) 568 - dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); 571 + __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); 569 572 return 0; 570 573 } 571 574 EXPORT_SYMBOL(dma_map_sg); ··· 589 586 struct scatterlist *s; 590 587 int i; 591 588 589 + debug_dma_unmap_sg(dev, sg, nents, dir); 590 + 592 591 for_each_sg(sg, s, nents, i) 593 - dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); 592 + __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); 594 593 } 595 594 EXPORT_SYMBOL(dma_unmap_sg); 596 595 ··· 617 612 __dma_page_dev_to_cpu(sg_page(s), s->offset, 618 613 s->length, dir); 619 614 } 615 + 616 + debug_dma_sync_sg_for_cpu(dev, sg, nents, dir); 620 617 } 621 618 EXPORT_SYMBOL(dma_sync_sg_for_cpu); 622 619 ··· 643 636 __dma_page_cpu_to_dev(sg_page(s), s->offset, 644 637 s->length, dir); 645 638 } 639 + 640 + debug_dma_sync_sg_for_device(dev, sg, nents, dir); 646 641 } 647 642 EXPORT_SYMBOL(dma_sync_sg_for_device); 643 + 644 + #define PREALLOC_DMA_DEBUG_ENTRIES 4096 645 + 646 + static int __init dma_debug_do_init(void) 647 + { 648 + dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 649 + return 0; 650 + } 651 + fs_initcall(dma_debug_do_init);
+2 -6
arch/arm/mm/ioremap.c
··· 204 204 /* 205 205 * Don't allow RAM to be mapped - this causes problems with ARMv6+ 206 206 */ 207 - if (pfn_valid(pfn)) { 208 - printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n" 209 - "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n" 210 - "will fail in the next kernel release. Please fix your driver.\n"); 211 - WARN_ON(1); 212 - } 207 + if (WARN_ON(pfn_valid(pfn))) 208 + return NULL; 213 209 214 210 type = get_mem_type(mtype); 215 211 if (!type)
+3 -3
arch/arm/mm/mmu.c
··· 24 24 #include <asm/smp_plat.h> 25 25 #include <asm/tlb.h> 26 26 #include <asm/highmem.h> 27 + #include <asm/traps.h> 27 28 28 29 #include <asm/mach/arch.h> 29 30 #include <asm/mach/map.h> ··· 915 914 { 916 915 struct map_desc map; 917 916 unsigned long addr; 918 - void *vectors; 919 917 920 918 /* 921 919 * Allocate the vector page early. 922 920 */ 923 - vectors = early_alloc(PAGE_SIZE); 921 + vectors_page = early_alloc(PAGE_SIZE); 924 922 925 923 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) 926 924 pmd_clear(pmd_off_k(addr)); ··· 959 959 * location (0xffff0000). If we aren't using high-vectors, also 960 960 * create a mapping at the low-vectors virtual address. 961 961 */ 962 - map.pfn = __phys_to_pfn(virt_to_phys(vectors)); 962 + map.pfn = __phys_to_pfn(virt_to_phys(vectors_page)); 963 963 map.virtual = 0xffff0000; 964 964 map.length = PAGE_SIZE; 965 965 map.type = MT_HIGH_VECTORS;
+7
arch/arm/mm/proc-macros.S
··· 109 109 * 110x 0 1 0 r/w r/o 110 110 * 11x0 0 1 0 r/w r/o 111 111 * 1111 0 1 1 r/w r/w 112 + * 113 + * If !CONFIG_CPU_USE_DOMAINS, the following permissions are changed: 114 + * 110x 1 1 1 r/o r/o 115 + * 11x0 1 1 1 r/o r/o 112 116 */ 113 117 .macro armv6_mt_table pfx 114 118 \pfx\()_mt_table: ··· 152 148 153 149 tst r1, #L_PTE_USER 154 150 orrne r3, r3, #PTE_EXT_AP1 151 + #ifdef CONFIG_CPU_USE_DOMAINS 152 + @ allow kernel read/write access to read-only user pages 155 153 tstne r3, #PTE_EXT_APX 156 154 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 155 + #endif 157 156 158 157 tst r1, #L_PTE_EXEC 159 158 orreq r3, r3, #PTE_EXT_XN
+7 -2
arch/arm/mm/proc-v7.S
··· 148 148 149 149 tst r1, #L_PTE_USER 150 150 orrne r3, r3, #PTE_EXT_AP1 151 + #ifdef CONFIG_CPU_USE_DOMAINS 152 + @ allow kernel read/write access to read-only user pages 151 153 tstne r3, #PTE_EXT_APX 152 154 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 155 + #endif 153 156 154 157 tst r1, #L_PTE_EXEC 155 158 orreq r3, r3, #PTE_EXT_XN ··· 276 273 ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) 277 274 ALT_UP(orr r4, r4, #TTB_FLAGS_UP) 278 275 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 279 - mov r10, #0x1f @ domains 0, 1 = manager 280 - mcr p15, 0, r10, c3, c0, 0 @ load domain access register 281 276 /* 282 277 * Memory region attributes with SCTLR.TRE=1 283 278 * ··· 313 312 ldmia r5, {r5, r6} 314 313 #ifdef CONFIG_CPU_ENDIAN_BE8 315 314 orr r6, r6, #1 << 25 @ big-endian page tables 315 + #endif 316 + #ifdef CONFIG_SWP_EMULATE 317 + orr r5, r5, #(1 << 10) @ set SW bit in "clear" 318 + bic r6, r6, #(1 << 10) @ clear it in "mmuset" 316 319 #endif 317 320 mrc p15, 0, r0, c1, c0, 0 @ read control register 318 321 bic r0, r0, r5 @ clear bits them
+2 -2
arch/arm/plat-omap/Kconfig
··· 11 11 12 12 config ARCH_OMAP1 13 13 bool "TI OMAP1" 14 - select COMMON_CLKDEV 14 + select CLKDEV_LOOKUP 15 15 help 16 16 "Systems based on omap7xx, omap15xx or omap16xx" 17 17 18 18 config ARCH_OMAP2PLUS 19 19 bool "TI OMAP2/3/4" 20 - select COMMON_CLKDEV 20 + select CLKDEV_LOOKUP 21 21 help 22 22 "Systems based on OMAP2, OMAP3 or OMAP4" 23 23
+1 -1
arch/arm/plat-omap/include/plat/clkdev_omap.h
··· 8 8 #ifndef __ARCH_ARM_PLAT_OMAP_INCLUDE_PLAT_CLKDEV_OMAP_H 9 9 #define __ARCH_ARM_PLAT_OMAP_INCLUDE_PLAT_CLKDEV_OMAP_H 10 10 11 - #include <asm/clkdev.h> 11 + #include <linux/clkdev.h> 12 12 13 13 struct omap_clk { 14 14 u16 cpu;
+4 -4
arch/arm/plat-omap/include/plat/memory.h
··· 61 61 #define lbus_to_virt(x) ((x) - OMAP1510_LB_OFFSET + PAGE_OFFSET) 62 62 #define is_lbus_device(dev) (cpu_is_omap15xx() && dev && (strncmp(dev_name(dev), "ohci", 4) == 0)) 63 63 64 - #define __arch_page_to_dma(dev, page) \ 65 - ({ dma_addr_t __dma = page_to_phys(page); \ 64 + #define __arch_pfn_to_dma(dev, pfn) \ 65 + ({ dma_addr_t __dma = __pfn_to_phys(pfn); \ 66 66 if (is_lbus_device(dev)) \ 67 67 __dma = __dma - PHYS_OFFSET + OMAP1510_LB_OFFSET; \ 68 68 __dma; }) 69 69 70 - #define __arch_dma_to_page(dev, addr) \ 70 + #define __arch_dma_to_pfn(dev, addr) \ 71 71 ({ dma_addr_t __dma = addr; \ 72 72 if (is_lbus_device(dev)) \ 73 73 __dma += PHYS_OFFSET - OMAP1510_LB_OFFSET; \ 74 - phys_to_page(__dma); \ 74 + __phys_to_pfn(__dma); \ 75 75 }) 76 76 77 77 #define __arch_dma_to_virt(dev, addr) ({ (void *) (is_lbus_device(dev) ? \
+2 -3
arch/arm/plat-omap/include/plat/smp.h
··· 18 18 #define OMAP_ARCH_SMP_H 19 19 20 20 #include <asm/hardware/gic.h> 21 - #include <asm/smp_mpidr.h> 22 21 23 22 /* Needed for secondary core boot */ 24 23 extern void omap_secondary_startup(void); ··· 28 29 /* 29 30 * We use Soft IRQ1 as the IPI 30 31 */ 31 - static inline void smp_cross_call(const struct cpumask *mask) 32 + static inline void smp_cross_call(const struct cpumask *mask, int ipi) 32 33 { 33 - gic_raise_softirq(mask, 1); 34 + gic_raise_softirq(mask, ipi); 34 35 } 35 36 36 37 #endif
+1 -1
arch/arm/plat-spear/include/plat/clock.h
··· 15 15 #define __PLAT_CLOCK_H 16 16 17 17 #include <linux/list.h> 18 - #include <asm/clkdev.h> 18 + #include <linux/clkdev.h> 19 19 #include <linux/types.h> 20 20 21 21 /* clk structure flags */
+1 -1
arch/arm/plat-stmp3xxx/clock.c
··· 25 25 #include <linux/err.h> 26 26 #include <linux/delay.h> 27 27 #include <linux/io.h> 28 + #include <linux/clkdev.h> 28 29 29 30 #include <asm/mach-types.h> 30 - #include <asm/clkdev.h> 31 31 #include <mach/platform.h> 32 32 #include <mach/regs-clkctrl.h> 33 33
+23 -1
arch/arm/vfp/vfpmodule.c
··· 10 10 */ 11 11 #include <linux/module.h> 12 12 #include <linux/types.h> 13 + #include <linux/cpu.h> 13 14 #include <linux/kernel.h> 15 + #include <linux/notifier.h> 14 16 #include <linux/signal.h> 15 17 #include <linux/sched.h> 18 + #include <linux/smp.h> 16 19 #include <linux/init.h> 17 20 18 21 #include <asm/cputype.h> ··· 487 484 put_cpu(); 488 485 } 489 486 490 - #include <linux/smp.h> 487 + /* 488 + * VFP hardware can lose all context when a CPU goes offline. 489 + * Safely clear our held state when a CPU has been killed, and 490 + * re-enable access to VFP when the CPU comes back online. 491 + * 492 + * Both CPU_DYING and CPU_STARTING are called on the CPU which 493 + * is being offlined/onlined. 494 + */ 495 + static int vfp_hotplug(struct notifier_block *b, unsigned long action, 496 + void *hcpu) 497 + { 498 + if (action == CPU_DYING || action == CPU_DYING_FROZEN) { 499 + unsigned int cpu = (long)hcpu; 500 + last_VFP_context[cpu] = NULL; 501 + } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) 502 + vfp_enable(NULL); 503 + return NOTIFY_OK; 504 + } 491 505 492 506 /* 493 507 * VFP support code initialisation. ··· 534 514 else if (vfpsid & FPSID_NODOUBLE) { 535 515 printk("no double precision support\n"); 536 516 } else { 517 + hotcpu_notifier(vfp_hotplug, 0); 518 + 537 519 smp_call_function(vfp_enable, NULL, 1); 538 520 539 521 VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
+1 -1
arch/sh/Kconfig
··· 1 1 config SUPERH 2 2 def_bool y 3 3 select EMBEDDED 4 - select HAVE_CLK 4 + select CLKDEV_LOOKUP 5 5 select HAVE_IDE if HAS_IOPORT 6 6 select HAVE_MEMBLOCK 7 7 select HAVE_OPROFILE
+1 -1
arch/sh/boards/mach-highlander/setup.c
··· 24 24 #include <linux/interrupt.h> 25 25 #include <linux/usb/r8a66597.h> 26 26 #include <linux/usb/m66592.h> 27 + #include <linux/clkdev.h> 27 28 #include <net/ax88796.h> 28 29 #include <asm/machvec.h> 29 30 #include <mach/highlander.h> 30 - #include <asm/clkdev.h> 31 31 #include <asm/clock.h> 32 32 #include <asm/heartbeat.h> 33 33 #include <asm/io.h>
+17 -21
arch/sh/include/asm/clkdev.h
··· 1 1 /* 2 - * arch/sh/include/asm/clkdev.h 3 - * 4 - * Cloned from arch/arm/include/asm/clkdev.h: 5 - * 6 - * Copyright (C) 2008 Russell King. 2 + * Copyright (C) 2010 Paul Mundt <lethal@linux-sh.org> 7 3 * 8 4 * This program is free software; you can redistribute it and/or modify 9 5 * it under the terms of the GNU General Public License version 2 as ··· 7 11 * 8 12 * Helper for the clk API to assist looking up a struct clk. 9 13 */ 10 - #ifndef __ASM_CLKDEV_H 11 - #define __ASM_CLKDEV_H 12 14 13 - struct clk; 15 + #ifndef __CLKDEV__H_ 16 + #define __CLKDEV__H_ 14 17 15 - struct clk_lookup { 16 - struct list_head node; 17 - const char *dev_id; 18 - const char *con_id; 19 - struct clk *clk; 20 - }; 18 + #include <linux/bootmem.h> 19 + #include <linux/mm.h> 20 + #include <linux/slab.h> 21 21 22 - struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, 23 - const char *dev_fmt, ...); 22 + #include <asm/clock.h> 24 23 25 - void clkdev_add(struct clk_lookup *cl); 26 - void clkdev_drop(struct clk_lookup *cl); 24 + static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size) 25 + { 26 + if (!slab_is_available()) 27 + return alloc_bootmem_low_pages(size); 28 + else 29 + return kzalloc(size, GFP_KERNEL); 30 + } 27 31 28 - void clkdev_add_table(struct clk_lookup *, size_t); 29 - int clk_add_alias(const char *, const char *, char *, struct device *); 32 + #define __clk_put(clk) 33 + #define __clk_get(clk) ({ 1; }) 30 34 31 - #endif 35 + #endif /* __CLKDEV_H__ */
+1 -1
arch/sh/kernel/Makefile
··· 11 11 12 12 CFLAGS_REMOVE_return_address.o = -pg 13 13 14 - obj-y := clkdev.o debugtraps.o dma-nommu.o dumpstack.o \ 14 + obj-y := debugtraps.o dma-nommu.o dumpstack.o \ 15 15 idle.o io.o irq.o irq_$(BITS).o kdebugfs.o \ 16 16 machvec.o nmi_debug.o process.o \ 17 17 process_$(BITS).o ptrace.o ptrace_$(BITS).o \
+24 -19
arch/sh/kernel/clkdev.c drivers/clk/clkdev.c
··· 1 1 /* 2 - * arch/sh/kernel/clkdev.c 3 - * 4 - * Cloned from arch/arm/common/clkdev.c: 2 + * drivers/clk/clkdev.c 5 3 * 6 4 * Copyright (C) 2008 Russell King. 7 5 * ··· 18 20 #include <linux/string.h> 19 21 #include <linux/mutex.h> 20 22 #include <linux/clk.h> 21 - #include <linux/slab.h> 22 - #include <linux/bootmem.h> 23 - #include <linux/mm.h> 24 - #include <asm/clock.h> 25 - #include <asm/clkdev.h> 23 + #include <linux/clkdev.h> 26 24 27 25 static LIST_HEAD(clocks); 28 26 static DEFINE_MUTEX(clocks_mutex); ··· 50 56 continue; 51 57 match += 1; 52 58 } 53 - if (match == 0) 54 - continue; 55 59 56 60 if (match > best) { 57 61 clk = p->clk; 58 - best = match; 62 + if (match != 3) 63 + best = match; 64 + else 65 + break; 59 66 } 60 67 } 61 68 return clk; ··· 68 73 69 74 mutex_lock(&clocks_mutex); 70 75 clk = clk_find(dev_id, con_id); 76 + if (clk && !__clk_get(clk)) 77 + clk = NULL; 71 78 mutex_unlock(&clocks_mutex); 72 79 73 80 return clk ? clk : ERR_PTR(-ENOENT); 74 81 } 75 82 EXPORT_SYMBOL(clk_get_sys); 83 + 84 + struct clk *clk_get(struct device *dev, const char *con_id) 85 + { 86 + const char *dev_id = dev ? dev_name(dev) : NULL; 87 + 88 + return clk_get_sys(dev_id, con_id); 89 + } 90 + EXPORT_SYMBOL(clk_get); 91 + 92 + void clk_put(struct clk *clk) 93 + { 94 + __clk_put(clk); 95 + } 96 + EXPORT_SYMBOL(clk_put); 76 97 77 98 void clkdev_add(struct clk_lookup *cl) 78 99 { ··· 122 111 { 123 112 struct clk_lookup_alloc *cla; 124 113 125 - if (!slab_is_available()) 126 - cla = alloc_bootmem_low_pages(sizeof(*cla)); 127 - else 128 - cla = kzalloc(sizeof(*cla), GFP_KERNEL); 129 - 114 + cla = __clkdev_alloc(sizeof(*cla)); 130 115 if (!cla) 131 116 return NULL; 132 117 ··· 168 161 */ 169 162 void clkdev_drop(struct clk_lookup *cl) 170 163 { 171 - struct clk_lookup_alloc *cla = container_of(cl, struct clk_lookup_alloc, cl); 172 - 173 164 mutex_lock(&clocks_mutex); 174 165 list_del(&cl->node); 175 166 mutex_unlock(&clocks_mutex); 176 - kfree(cla); 167 + kfree(cl); 177 168 } 178 169 EXPORT_SYMBOL(clkdev_drop);
+1 -1
arch/sh/kernel/cpu/clock-cpg.c
··· 2 2 #include <linux/compiler.h> 3 3 #include <linux/slab.h> 4 4 #include <linux/io.h> 5 - #include <asm/clkdev.h> 5 + #include <linux/clkdev.h> 6 6 #include <asm/clock.h> 7 7 8 8 static struct clk master_clk = {
-16
arch/sh/kernel/cpu/clock.c
··· 48 48 return ret; 49 49 } 50 50 51 - /* 52 - * Returns a clock. Note that we first try to use device id on the bus 53 - * and clock name. If this fails, we try to use clock name only. 54 - */ 55 - struct clk *clk_get(struct device *dev, const char *con_id) 56 - { 57 - const char *dev_id = dev ? dev_name(dev) : NULL; 58 - 59 - return clk_get_sys(dev_id, con_id); 60 - } 61 - EXPORT_SYMBOL_GPL(clk_get); 62 - 63 - void clk_put(struct clk *clk) 64 - { 65 - } 66 - EXPORT_SYMBOL_GPL(clk_put); 67 51
+1 -1
arch/sh/kernel/cpu/sh4/clock-sh4-202.c
··· 13 13 #include <linux/kernel.h> 14 14 #include <linux/err.h> 15 15 #include <linux/io.h> 16 - #include <asm/clkdev.h> 16 + #include <linux/clkdev.h> 17 17 #include <asm/clock.h> 18 18 #include <asm/freq.h> 19 19
+1 -1
arch/sh/kernel/cpu/sh4a/clock-sh7343.c
··· 21 21 #include <linux/init.h> 22 22 #include <linux/kernel.h> 23 23 #include <linux/io.h> 24 - #include <asm/clkdev.h> 24 + #include <linux/clkdev.h> 25 25 #include <asm/clock.h> 26 26 27 27 /* SH7343 registers */
+1 -1
arch/sh/kernel/cpu/sh4a/clock-sh7366.c
··· 21 21 #include <linux/init.h> 22 22 #include <linux/kernel.h> 23 23 #include <linux/io.h> 24 - #include <asm/clkdev.h> 24 + #include <linux/clkdev.h> 25 25 #include <asm/clock.h> 26 26 27 27 /* SH7366 registers */
+1 -1
arch/sh/kernel/cpu/sh4a/clock-sh7722.c
··· 21 21 #include <linux/init.h> 22 22 #include <linux/kernel.h> 23 23 #include <linux/io.h> 24 - #include <asm/clkdev.h> 24 + #include <linux/clkdev.h> 25 25 #include <asm/clock.h> 26 26 #include <asm/hwblk.h> 27 27 #include <cpu/sh7722.h>
+1 -1
arch/sh/kernel/cpu/sh4a/clock-sh7723.c
··· 22 22 #include <linux/kernel.h> 23 23 #include <linux/io.h> 24 24 #include <linux/clk.h> 25 - #include <asm/clkdev.h> 25 + #include <linux/clkdev.h> 26 26 #include <asm/clock.h> 27 27 #include <asm/hwblk.h> 28 28 #include <cpu/sh7723.h>
+1 -1
arch/sh/kernel/cpu/sh4a/clock-sh7724.c
··· 22 22 #include <linux/kernel.h> 23 23 #include <linux/io.h> 24 24 #include <linux/clk.h> 25 - #include <asm/clkdev.h> 25 + #include <linux/clkdev.h> 26 26 #include <asm/clock.h> 27 27 #include <asm/hwblk.h> 28 28 #include <cpu/sh7724.h>
+1 -1
arch/sh/kernel/cpu/sh4a/clock-sh7757.c
··· 12 12 #include <linux/init.h> 13 13 #include <linux/kernel.h> 14 14 #include <linux/io.h> 15 - #include <asm/clkdev.h> 15 + #include <linux/clkdev.h> 16 16 #include <asm/clock.h> 17 17 #include <asm/freq.h> 18 18
+1 -1
arch/sh/kernel/cpu/sh4a/clock-sh7763.c
··· 13 13 #include <linux/init.h> 14 14 #include <linux/kernel.h> 15 15 #include <linux/io.h> 16 - #include <asm/clkdev.h> 16 + #include <linux/clkdev.h> 17 17 #include <asm/clock.h> 18 18 #include <asm/freq.h> 19 19 #include <asm/io.h>
+1 -1
arch/sh/kernel/cpu/sh4a/clock-sh7780.c
··· 12 12 #include <linux/init.h> 13 13 #include <linux/kernel.h> 14 14 #include <linux/io.h> 15 - #include <asm/clkdev.h> 15 + #include <linux/clkdev.h> 16 16 #include <asm/clock.h> 17 17 #include <asm/freq.h> 18 18 #include <asm/io.h>
+1 -1
arch/sh/kernel/cpu/sh4a/clock-sh7785.c
··· 14 14 #include <linux/clk.h> 15 15 #include <linux/io.h> 16 16 #include <linux/cpufreq.h> 17 - #include <asm/clkdev.h> 17 + #include <linux/clkdev.h> 18 18 #include <asm/clock.h> 19 19 #include <asm/freq.h> 20 20 #include <cpu/sh7785.h>
+1 -1
arch/sh/kernel/cpu/sh4a/clock-sh7786.c
··· 13 13 #include <linux/kernel.h> 14 14 #include <linux/clk.h> 15 15 #include <linux/io.h> 16 - #include <asm/clkdev.h> 16 + #include <linux/clkdev.h> 17 17 #include <asm/clock.h> 18 18 #include <asm/freq.h> 19 19
+1 -1
arch/sh/kernel/cpu/sh4a/clock-shx3.c
··· 14 14 #include <linux/init.h> 15 15 #include <linux/kernel.h> 16 16 #include <linux/io.h> 17 - #include <asm/clkdev.h> 17 + #include <linux/clkdev.h> 18 18 #include <asm/clock.h> 19 19 #include <asm/freq.h> 20 20
+2
drivers/Kconfig
··· 111 111 source "drivers/staging/Kconfig" 112 112 113 113 source "drivers/platform/Kconfig" 114 + 115 + source "drivers/clk/Kconfig" 114 116 endmenu
+2
drivers/Makefile
··· 115 115 obj-$(CONFIG_STAGING) += staging/ 116 116 obj-y += platform/ 117 117 obj-y += ieee802154/ 118 + #common clk code 119 + obj-y += clk/
+39
drivers/amba/bus.c
··· 147 147 clk_put(pclk); 148 148 } 149 149 150 + static int amba_get_enable_vcore(struct amba_device *pcdev) 151 + { 152 + struct regulator *vcore = regulator_get(&pcdev->dev, "vcore"); 153 + int ret; 154 + 155 + pcdev->vcore = vcore; 156 + 157 + if (IS_ERR(vcore)) { 158 + /* It is OK not to supply a vcore regulator */ 159 + if (PTR_ERR(vcore) == -ENODEV) 160 + return 0; 161 + return PTR_ERR(vcore); 162 + } 163 + 164 + ret = regulator_enable(vcore); 165 + if (ret) { 166 + regulator_put(vcore); 167 + pcdev->vcore = ERR_PTR(-ENODEV); 168 + } 169 + 170 + return ret; 171 + } 172 + 173 + static void amba_put_disable_vcore(struct amba_device *pcdev) 174 + { 175 + struct regulator *vcore = pcdev->vcore; 176 + 177 + if (!IS_ERR(vcore)) { 178 + regulator_disable(vcore); 179 + regulator_put(vcore); 180 + } 181 + } 182 + 150 183 /* 151 184 * These are the device model conversion veneers; they convert the 152 185 * device model structures to our more specific structures. ··· 192 159 int ret; 193 160 194 161 do { 162 + ret = amba_get_enable_vcore(pcdev); 163 + if (ret) 164 + break; 165 + 195 166 ret = amba_get_enable_pclk(pcdev); 196 167 if (ret) 197 168 break; ··· 205 168 break; 206 169 207 170 amba_put_disable_pclk(pcdev); 171 + amba_put_disable_vcore(pcdev); 208 172 } while (0); 209 173 210 174 return ret; ··· 218 180 int ret = drv->remove(pcdev); 219 181 220 182 amba_put_disable_pclk(pcdev); 183 + amba_put_disable_vcore(pcdev); 221 184 222 185 return ret; 223 186 }
+4
drivers/clk/Kconfig
··· 1 + 2 + config CLKDEV_LOOKUP 3 + bool 4 + select HAVE_CLK
+2
drivers/clk/Makefile
··· 1 + 2 + obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o
+8
include/linux/amba/bus.h
··· 18 18 #include <linux/device.h> 19 19 #include <linux/err.h> 20 20 #include <linux/resource.h> 21 + #include <linux/regulator/consumer.h> 21 22 22 23 #define AMBA_NR_IRQS 2 23 24 #define AMBA_CID 0xb105f00d ··· 29 28 struct device dev; 30 29 struct resource res; 31 30 struct clk *pclk; 31 + struct regulator *vcore; 32 32 u64 dma_mask; 33 33 unsigned int periphid; 34 34 unsigned int irq[AMBA_NR_IRQS]; ··· 72 70 73 71 #define amba_pclk_disable(d) \ 74 72 do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0) 73 + 74 + #define amba_vcore_enable(d) \ 75 + (IS_ERR((d)->vcore) ? 0 : regulator_enable((d)->vcore)) 76 + 77 + #define amba_vcore_disable(d) \ 78 + do { if (!IS_ERR((d)->vcore)) regulator_disable((d)->vcore); } while (0) 75 79 76 80 /* Some drivers don't use the struct amba_device */ 77 81 #define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff)
+36
include/linux/clkdev.h
··· 1 + /* 2 + * include/linux/clkdev.h 3 + * 4 + * Copyright (C) 2008 Russell King. 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + * 10 + * Helper for the clk API to assist looking up a struct clk. 11 + */ 12 + #ifndef __CLKDEV_H 13 + #define __CLKDEV_H 14 + 15 + #include <asm/clkdev.h> 16 + 17 + struct clk; 18 + struct device; 19 + 20 + struct clk_lookup { 21 + struct list_head node; 22 + const char *dev_id; 23 + const char *con_id; 24 + struct clk *clk; 25 + }; 26 + 27 + struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, 28 + const char *dev_fmt, ...); 29 + 30 + void clkdev_add(struct clk_lookup *cl); 31 + void clkdev_drop(struct clk_lookup *cl); 32 + 33 + void clkdev_add_table(struct clk_lookup *, size_t); 34 + int clk_add_alias(const char *, const char *, char *, struct device *); 35 + 36 + #endif