···3434 - description of the virtual memory layout3535nwfpe/3636 - NWFPE floating point emulator documentation3737+swp_emulation3838+ - SWP/SWPB emulation handler/logging description
+27
Documentation/arm/swp_emulation
···11+Software emulation of deprecated SWP instruction (CONFIG_SWP_EMULATE)22+---------------------------------------------------------------------33+44+ARMv6 architecture deprecates use of the SWP/SWPB instructions, and recommeds55+moving to the load-locked/store-conditional instructions LDREX and STREX.66+77+ARMv7 multiprocessing extensions introduce the ability to disable these88+instructions, triggering an undefined instruction exception when executed.99+Trapped instructions are emulated using an LDREX/STREX or LDREXB/STREXB1010+sequence. If a memory access fault (an abort) occurs, a segmentation fault is1111+signalled to the triggering process.1212+1313+/proc/cpu/swp_emulation holds some statistics/information, including the PID of1414+the last process to trigger the emulation to be invocated. For example:1515+---1616+Emulated SWP: 121717+Emulated SWPB: 01818+Aborted SWP{B}: 11919+Last process: 3142020+---2121+2222+NOTE: when accessing uncached shared regions, LDREX/STREX rely on an external2323+transaction monitoring block called a global monitor to maintain update2424+atomicity. If your system does not implement a global monitor, this option can2525+cause programs that perform SWP operations to uncached memory to deadlock, as2626+the STREX operation will always fail.2727+
+39-26
arch/arm/Kconfig
···22 bool33 default y44 select HAVE_AOUT55+ select HAVE_DMA_API_DEBUG56 select HAVE_IDE67 select HAVE_MEMBLOCK78 select RTC_LIB···3534 <http://www.arm.linux.org.uk/>.36353736config HAVE_PWM3737+ bool3838+3939+config MIGHT_HAVE_PCI3840 bool39414042config SYS_SUPPORTS_APM_EMULATION···230226 bool "ARM Ltd. Integrator family"231227 select ARM_AMBA232228 select ARCH_HAS_CPUFREQ233233- select COMMON_CLKDEV229229+ select CLKDEV_LOOKUP234230 select ICST235231 select GENERIC_CLOCKEVENTS236232 select PLAT_VERSATILE···240236config ARCH_REALVIEW241237 bool "ARM Ltd. RealView family"242238 select ARM_AMBA243243- select COMMON_CLKDEV239239+ select CLKDEV_LOOKUP244240 select HAVE_SCHED_CLOCK245241 select ICST246242 select GENERIC_CLOCKEVENTS···255251 bool "ARM Ltd. Versatile family"256252 select ARM_AMBA257253 select ARM_VIC258258- select COMMON_CLKDEV254254+ select CLKDEV_LOOKUP259255 select HAVE_SCHED_CLOCK260256 select ICST261257 select GENERIC_CLOCKEVENTS···270266 select ARCH_WANT_OPTIONAL_GPIOLIB271267 select ARM_AMBA272268 select ARM_TIMER_SP804273273- select COMMON_CLKDEV269269+ select CLKDEV_LOOKUP274270 select GENERIC_CLOCKEVENTS275271 select HAVE_CLK276272 select HAVE_SCHED_CLOCK···292288 depends on MMU293289 select CPU_V6294290 select ARM_AMBA295295- select COMMON_CLKDEV291291+ select CLKDEV_LOOKUP296292 select GENERIC_CLOCKEVENTS297293 select ARCH_WANT_OPTIONAL_GPIOLIB298294 help···310306 select CPU_V6311307 select GENERIC_CLOCKEVENTS312308 select ARM_GIC309309+ select MIGHT_HAVE_PCI313310 select PCI_DOMAINS if PCI314311 help315312 Support for Cavium Networks CNS3XXX platform.···340335 select CPU_ARM920T341336 select ARM_AMBA342337 select ARM_VIC343343- select COMMON_CLKDEV338338+ select CLKDEV_LOOKUP344339 select ARCH_REQUIRE_GPIOLIB345340 select ARCH_HAS_HOLES_MEMORYMODEL346341 select ARCH_USES_GETTIMEOFFSET···360355 bool "Freescale MXC/iMX-based"361356 select GENERIC_CLOCKEVENTS362357 select ARCH_REQUIRE_GPIOLIB363363- select COMMON_CLKDEV358358+ select CLKDEV_LOOKUP364359 help365360 Support for Freescale MXC/iMX-based family of processors366361367362config ARCH_STMP3XXX368363 bool "Freescale STMP3xxx"369364 select CPU_ARM926T370370- select COMMON_CLKDEV365365+ select CLKDEV_LOOKUP371366 select ARCH_REQUIRE_GPIOLIB372367 select GENERIC_CLOCKEVENTS373368 select USB_ARCH_HAS_EHCI···447442 select GENERIC_GPIO448443 select GENERIC_CLOCKEVENTS449444 select HAVE_SCHED_CLOCK445445+ select MIGHT_HAVE_PCI450446 select DMABOUNCE if PCI451447 help452448 Support for Intel's IXP4XX (XScale) family of processors.···487481 select HAVE_IDE488482 select ARM_AMBA489483 select USB_ARCH_HAS_OHCI490490- select COMMON_CLKDEV484484+ select CLKDEV_LOOKUP491485 select GENERIC_TIME492486 select GENERIC_CLOCKEVENTS493487 help···521515 bool "Marvell PXA168/910/MMP2"522516 depends on MMU523517 select ARCH_REQUIRE_GPIOLIB524524- select COMMON_CLKDEV518518+ select CLKDEV_LOOKUP525519 select GENERIC_CLOCKEVENTS526520 select HAVE_SCHED_CLOCK527521 select TICK_ONESHOT···555549 bool "Nuvoton W90X900 CPU"556550 select CPU_ARM926T557551 select ARCH_REQUIRE_GPIOLIB558558- select COMMON_CLKDEV552552+ select CLKDEV_LOOKUP559553 select GENERIC_CLOCKEVENTS560554 help561555 Support for Nuvoton (Winbond logic dept.) ARM9 processor,···569563config ARCH_NUC93X570564 bool "Nuvoton NUC93X CPU"571565 select CPU_ARM926T572572- select COMMON_CLKDEV566566+ select CLKDEV_LOOKUP573567 help574568 Support for Nuvoton (Winbond logic dept.) NUC93X MCU,The NUC93X is a575569 low-power and high performance MPEG-4/JPEG multimedia controller chip.576570577571config ARCH_TEGRA578572 bool "NVIDIA Tegra"573573+ select CLKDEV_LOOKUP579574 select GENERIC_TIME580575 select GENERIC_CLOCKEVENTS581576 select GENERIC_GPIO582577 select HAVE_CLK583578 select HAVE_SCHED_CLOCK584584- select COMMON_CLKDEV585579 select ARCH_HAS_BARRIERS if CACHE_L2X0586580 select ARCH_HAS_CPUFREQ587581 help···591585config ARCH_PNX4008592586 bool "Philips Nexperia PNX4008 Mobile"593587 select CPU_ARM926T594594- select COMMON_CLKDEV588588+ select CLKDEV_LOOKUP595589 select ARCH_USES_GETTIMEOFFSET596590 help597591 This enables support for Philips PNX4008 mobile platform.···601595 depends on MMU602596 select ARCH_MTD_XIP603597 select ARCH_HAS_CPUFREQ604604- select COMMON_CLKDEV598598+ select CLKDEV_LOOKUP605599 select ARCH_REQUIRE_GPIOLIB606600 select GENERIC_CLOCKEVENTS607601 select HAVE_SCHED_CLOCK···780774 bool "Telechips TCC ARM926-based systems"781775 select CPU_ARM926T782776 select HAVE_CLK783783- select COMMON_CLKDEV777777+ select CLKDEV_LOOKUP784778 select GENERIC_CLOCKEVENTS785779 help786780 Support for Telechips TCC ARM926-based systems.···805799 select ARM_AMBA806800 select ARM_VIC807801 select GENERIC_CLOCKEVENTS808808- select COMMON_CLKDEV802802+ select CLKDEV_LOOKUP809803 select GENERIC_GPIO810804 help811805 Support for ST-Ericsson U300 series mobile platforms.···815809 select CPU_V7816810 select ARM_AMBA817811 select GENERIC_CLOCKEVENTS818818- select COMMON_CLKDEV812812+ select CLKDEV_LOOKUP819813 select ARCH_REQUIRE_GPIOLIB820814 help821815 Support for ST-Ericsson's Ux500 architecture···825819 select ARM_AMBA826820 select ARM_VIC827821 select CPU_ARM926T828828- select COMMON_CLKDEV822822+ select CLKDEV_LOOKUP829823 select GENERIC_CLOCKEVENTS830824 select ARCH_REQUIRE_GPIOLIB831825 help···837831 select ARCH_REQUIRE_GPIOLIB838832 select ZONE_DMA839833 select HAVE_IDE840840- select COMMON_CLKDEV834834+ select CLKDEV_LOOKUP841835 select GENERIC_ALLOCATOR842836 select ARCH_HAS_HOLES_MEMORYMODEL843837 help···858852 bool "ST SPEAr"859853 select ARM_AMBA860854 select ARCH_REQUIRE_GPIOLIB861861- select COMMON_CLKDEV855855+ select CLKDEV_LOOKUP862856 select GENERIC_CLOCKEVENTS863857 select HAVE_CLK864858 help···10401034 default y10411035 bool1042103610371037+config MULTI_IRQ_HANDLER10381038+ bool10391039+ help10401040+ Allow each machine to specify it's own IRQ handler at run time.10411041+10431042if !MMU10441043source "arch/arm/Kconfig-nommu"10451044endif···11921181 bool1193118211941183config PCI11951195- bool "PCI support" if ARCH_INTEGRATOR_AP || ARCH_VERSATILE_PB || ARCH_IXP4XX || ARCH_KS8695 || MACH_ARMCORE || ARCH_CNS3XXX || SA1100_NANOENGINE11841184+ bool "PCI support" if MIGHT_HAVE_PCI11961185 help11971186 Find out whether you have a PCI motherboard. PCI is the name of a11981187 bus system, i.e. the way the CPU talks to the other stuff inside···12641253config SMP_ON_UP12651254 bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)"12661255 depends on EXPERIMENTAL12671267- depends on SMP && !XIP && !THUMB2_KERNEL12561256+ depends on SMP && !XIP12681257 default y12691258 help12701259 SMP kernels contain instructions which fail on non-SMP processors.···12831272config HAVE_ARM_TWD12841273 bool12851274 depends on SMP12751275+ select TICK_ONESHOT12861276 help12871277 This options enables support for the ARM timer and watchdog unit12881278···13471335 default 1001348133613491337config THUMB2_KERNEL13501350- bool "Compile the kernel in Thumb-2 mode"13381338+ bool "Compile the kernel in Thumb-2 mode (EXPERIMENTAL)"13511339 depends on CPU_V7 && !CPU_V6 && EXPERIMENTAL13521340 select AEABI13531341 select ARM_ASM_UNIFIED···1561154915621550config CC_STACKPROTECTOR15631551 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"15521552+ depends on EXPERIMENTAL15641553 help15651554 This option turns on the -fstack-protector GCC feature. This15661555 feature puts, at the beginning of functions, a canary value on···17581745 Internal configuration node for common cpufreq on Samsung SoC1759174617601747config CPU_FREQ_S3C24XX17611761- bool "CPUfreq driver for Samsung S3C24XX series CPUs"17481748+ bool "CPUfreq driver for Samsung S3C24XX series CPUs (EXPERIMENTAL)"17621749 depends on ARCH_S3C2410 && CPU_FREQ && EXPERIMENTAL17631750 select CPU_FREQ_S3C17641751 help···17701757 If in doubt, say N.1771175817721759config CPU_FREQ_S3C24XX_PLL17731773- bool "Support CPUfreq changing of PLL frequency"17601760+ bool "Support CPUfreq changing of PLL frequency (EXPERIMENTAL)"17741761 depends on CPU_FREQ_S3C24XX && EXPERIMENTAL17751762 help17761763 Compile in support for changing the PLL frequency from the
+1-1
arch/arm/Kconfig.debug
···3131 reported is severely limited.32323333config ARM_UNWIND3434- bool "Enable stack unwinding support"3434+ bool "Enable stack unwinding support (EXPERIMENTAL)"3535 depends on AEABI && EXPERIMENTAL3636 default y3737 help
···11-/*22- * arch/arm/common/clkdev.c33- *44- * Copyright (C) 2008 Russell King.55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License version 2 as88- * published by the Free Software Foundation.99- *1010- * Helper for the clk API to assist looking up a struct clk.1111- */1212-#include <linux/module.h>1313-#include <linux/kernel.h>1414-#include <linux/device.h>1515-#include <linux/list.h>1616-#include <linux/errno.h>1717-#include <linux/err.h>1818-#include <linux/string.h>1919-#include <linux/mutex.h>2020-#include <linux/clk.h>2121-#include <linux/slab.h>2222-2323-#include <asm/clkdev.h>2424-#include <mach/clkdev.h>2525-2626-static LIST_HEAD(clocks);2727-static DEFINE_MUTEX(clocks_mutex);2828-2929-/*3030- * Find the correct struct clk for the device and connection ID.3131- * We do slightly fuzzy matching here:3232- * An entry with a NULL ID is assumed to be a wildcard.3333- * If an entry has a device ID, it must match3434- * If an entry has a connection ID, it must match3535- * Then we take the most specific entry - with the following3636- * order of precedence: dev+con > dev only > con only.3737- */3838-static struct clk *clk_find(const char *dev_id, const char *con_id)3939-{4040- struct clk_lookup *p;4141- struct clk *clk = NULL;4242- int match, best = 0;4343-4444- list_for_each_entry(p, &clocks, node) {4545- match = 0;4646- if (p->dev_id) {4747- if (!dev_id || strcmp(p->dev_id, dev_id))4848- continue;4949- match += 2;5050- }5151- if (p->con_id) {5252- if (!con_id || strcmp(p->con_id, con_id))5353- continue;5454- match += 1;5555- }5656-5757- if (match > best) {5858- clk = p->clk;5959- if (match != 3)6060- best = match;6161- else6262- break;6363- }6464- }6565- return clk;6666-}6767-6868-struct clk *clk_get_sys(const char *dev_id, const char *con_id)6969-{7070- struct clk *clk;7171-7272- mutex_lock(&clocks_mutex);7373- clk = clk_find(dev_id, con_id);7474- if (clk && !__clk_get(clk))7575- clk = NULL;7676- mutex_unlock(&clocks_mutex);7777-7878- return clk ? clk : ERR_PTR(-ENOENT);7979-}8080-EXPORT_SYMBOL(clk_get_sys);8181-8282-struct clk *clk_get(struct device *dev, const char *con_id)8383-{8484- const char *dev_id = dev ? dev_name(dev) : NULL;8585-8686- return clk_get_sys(dev_id, con_id);8787-}8888-EXPORT_SYMBOL(clk_get);8989-9090-void clk_put(struct clk *clk)9191-{9292- __clk_put(clk);9393-}9494-EXPORT_SYMBOL(clk_put);9595-9696-void clkdev_add(struct clk_lookup *cl)9797-{9898- mutex_lock(&clocks_mutex);9999- list_add_tail(&cl->node, &clocks);100100- mutex_unlock(&clocks_mutex);101101-}102102-EXPORT_SYMBOL(clkdev_add);103103-104104-void __init clkdev_add_table(struct clk_lookup *cl, size_t num)105105-{106106- mutex_lock(&clocks_mutex);107107- while (num--) {108108- list_add_tail(&cl->node, &clocks);109109- cl++;110110- }111111- mutex_unlock(&clocks_mutex);112112-}113113-114114-#define MAX_DEV_ID 20115115-#define MAX_CON_ID 16116116-117117-struct clk_lookup_alloc {118118- struct clk_lookup cl;119119- char dev_id[MAX_DEV_ID];120120- char con_id[MAX_CON_ID];121121-};122122-123123-struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,124124- const char *dev_fmt, ...)125125-{126126- struct clk_lookup_alloc *cla;127127-128128- cla = kzalloc(sizeof(*cla), GFP_KERNEL);129129- if (!cla)130130- return NULL;131131-132132- cla->cl.clk = clk;133133- if (con_id) {134134- strlcpy(cla->con_id, con_id, sizeof(cla->con_id));135135- cla->cl.con_id = cla->con_id;136136- }137137-138138- if (dev_fmt) {139139- va_list ap;140140-141141- va_start(ap, dev_fmt);142142- vscnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap);143143- cla->cl.dev_id = cla->dev_id;144144- va_end(ap);145145- }146146-147147- return &cla->cl;148148-}149149-EXPORT_SYMBOL(clkdev_alloc);150150-151151-int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,152152- struct device *dev)153153-{154154- struct clk *r = clk_get(dev, id);155155- struct clk_lookup *l;156156-157157- if (IS_ERR(r))158158- return PTR_ERR(r);159159-160160- l = clkdev_alloc(r, alias, alias_dev_name);161161- clk_put(r);162162- if (!l)163163- return -ENODEV;164164- clkdev_add(l);165165- return 0;166166-}167167-EXPORT_SYMBOL(clk_add_alias);168168-169169-/*170170- * clkdev_drop - remove a clock dynamically allocated171171- */172172-void clkdev_drop(struct clk_lookup *cl)173173-{174174- mutex_lock(&clocks_mutex);175175- list_del(&cl->node);176176- mutex_unlock(&clocks_mutex);177177- kfree(cl);178178-}179179-EXPORT_SYMBOL(clkdev_drop);
+8-8
arch/arm/common/dmabounce.c
···328328 * substitute the safe buffer for the unsafe one.329329 * (basically move the buffer from an unsafe area to a safe one)330330 */331331-dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,331331+dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size,332332 enum dma_data_direction dir)333333{334334 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",···338338339339 return map_single(dev, ptr, size, dir);340340}341341-EXPORT_SYMBOL(dma_map_single);341341+EXPORT_SYMBOL(__dma_map_single);342342343343/*344344 * see if a mapped address was really a "safe" buffer and if so, copy···346346 * the safe buffer. (basically return things back to the way they347347 * should be)348348 */349349-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,349349+void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,350350 enum dma_data_direction dir)351351{352352 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",···354354355355 unmap_single(dev, dma_addr, size, dir);356356}357357-EXPORT_SYMBOL(dma_unmap_single);357357+EXPORT_SYMBOL(__dma_unmap_single);358358359359-dma_addr_t dma_map_page(struct device *dev, struct page *page,359359+dma_addr_t __dma_map_page(struct device *dev, struct page *page,360360 unsigned long offset, size_t size, enum dma_data_direction dir)361361{362362 dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",···372372373373 return map_single(dev, page_address(page) + offset, size, dir);374374}375375-EXPORT_SYMBOL(dma_map_page);375375+EXPORT_SYMBOL(__dma_map_page);376376377377/*378378 * see if a mapped address was really a "safe" buffer and if so, copy···380380 * the safe buffer. (basically return things back to the way they381381 * should be)382382 */383383-void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,383383+void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,384384 enum dma_data_direction dir)385385{386386 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",···388388389389 unmap_single(dev, dma_addr, size, dir);390390}391391-EXPORT_SYMBOL(dma_unmap_page);391391+EXPORT_SYMBOL(__dma_unmap_page);392392393393int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,394394 unsigned long off, size_t sz, enum dma_data_direction dir)
···55#include <linux/threads.h>66#include <asm/irq.h>7788+#define NR_IPI 599+810typedef struct {911 unsigned int __softirq_pending;1212+#ifdef CONFIG_LOCAL_TIMERS1013 unsigned int local_timer_irqs;1414+#endif1515+#ifdef CONFIG_SMP1616+ unsigned int ipi_irqs[NR_IPI];1717+#endif1118} ____cacheline_aligned irq_cpustat_t;12191320#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */2121+2222+#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++2323+#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)2424+2525+#ifdef CONFIG_SMP2626+u64 smp_irq_stat_cpu(unsigned int cpu);2727+#else2828+#define smp_irq_stat_cpu(cpu) 02929+#endif3030+3131+#define arch_irq_stat_cpu smp_irq_stat_cpu14321533#if NR_IRQS > 5121634#define HARDIRQ_BITS 10
-12
arch/arm/include/asm/localtimer.h
···3030#include "smp_twd.h"31313232#define local_timer_ack() twd_timer_ack()3333-#define local_timer_stop() twd_timer_stop()34333534#else3635···3940 */4041int local_timer_ack(void);41424242-/*4343- * Stop a local timer interrupt.4444- */4545-void local_timer_stop(void);4646-4743#endif48444945/*5046 * Setup a local timer interrupt for a CPU.5147 */5248void local_timer_setup(struct clock_event_device *);5353-5454-#else5555-5656-static inline void local_timer_stop(void)5757-{5858-}59496050#endif6151
+9
arch/arm/include/asm/mach/arch.h
···3737 struct meminfo *);3838 void (*reserve)(void);/* reserve mem blocks */3939 void (*map_io)(void);/* IO mapping function */4040+ void (*init_early)(void);4041 void (*init_irq)(void);4142 struct sys_timer *timer; /* system tick timer */4243 void (*init_machine)(void);4444+#ifdef CONFIG_MULTI_IRQ_HANDLER4545+ void (*handle_irq)(struct pt_regs *);4646+#endif4347};4848+4949+/*5050+ * Current machine - only accessible during boot.5151+ */5252+extern struct machine_desc *machine_desc;44534554/*4655 * Set of macros to define architecture features. This is built into
+5-3
arch/arm/include/asm/mach/irq.h
···1717/*1818 * This is internal. Do not use it.1919 */2020-extern unsigned int arch_nr_irqs;2121-extern void (*init_arch_irq)(void);2220extern void init_FIQ(void);2323-extern int show_fiq_list(struct seq_file *, void *);2121+extern int show_fiq_list(struct seq_file *, int);2222+2323+#ifdef CONFIG_MULTI_IRQ_HANDLER2424+extern void (*handle_arch_irq)(struct pt_regs *);2525+#endif24262527/*2628 * This is for easy migration, but should be changed in the source
···3333/*3434 * generate IPI list text3535 */3636-extern void show_ipi_list(struct seq_file *p);3636+extern void show_ipi_list(struct seq_file *, int);37373838/*3939 * Called from assembly code, this handles an IPI.4040 */4141-asmlinkage void do_IPI(struct pt_regs *regs);4141+asmlinkage void do_IPI(int ipinr, struct pt_regs *regs);42424343/*4444 * Setup the set of possible CPUs (via set_cpu_possible)4545 */4646extern void smp_init_cpus(void);47474848-/*4949- * Move global data into per-processor storage.5050- */5151-extern void smp_store_cpu_info(unsigned int cpuid);52485349/*5450 * Raise an IPI cross call on CPUs in callmap.5551 */5656-extern void smp_cross_call(const struct cpumask *mask);5252+extern void smp_cross_call(const struct cpumask *mask, int ipi);57535854/*5955 * Boot a secondary CPU, and assign it the specified idle task.···6771 * Perform platform specific initialisation of the specified CPU.6872 */6973extern void platform_secondary_init(unsigned int cpu);7474+7575+/*7676+ * Initialize cpu_possible map, and enable coherency7777+ */7878+extern void platform_smp_prepare_cpus(unsigned int);70797180/*7281 * Initial data for bringing up a secondary CPU.···9897/*9998 * show local interrupt info10099 */101101-extern void show_local_irqs(struct seq_file *);100100+extern void show_local_irqs(struct seq_file *, int);102101103102#endif /* ifndef __ASM_ARM_SMP_H */
···75757676unsigned int processor_id;7777EXPORT_SYMBOL(processor_id);7878-unsigned int __machine_arch_type;7878+unsigned int __machine_arch_type __read_mostly;7979EXPORT_SYMBOL(__machine_arch_type);8080-unsigned int cacheid;8080+unsigned int cacheid __read_mostly;8181EXPORT_SYMBOL(cacheid);82828383unsigned int __atags_pointer __initdata;···9191unsigned int system_serial_high;9292EXPORT_SYMBOL(system_serial_high);93939494-unsigned int elf_hwcap;9494+unsigned int elf_hwcap __read_mostly;9595EXPORT_SYMBOL(elf_hwcap);969697979898#ifdef MULTI_CPU9999-struct processor processor;9999+struct processor processor __read_mostly;100100#endif101101#ifdef MULTI_TLB102102-struct cpu_tlb_fns cpu_tlb;102102+struct cpu_tlb_fns cpu_tlb __read_mostly;103103#endif104104#ifdef MULTI_USER105105-struct cpu_user_fns cpu_user;105105+struct cpu_user_fns cpu_user __read_mostly;106106#endif107107#ifdef MULTI_CACHE108108-struct cpu_cache_fns cpu_cache;108108+struct cpu_cache_fns cpu_cache __read_mostly;109109#endif110110#ifdef CONFIG_OUTER_CACHE111111-struct outer_cache_fns outer_cache;111111+struct outer_cache_fns outer_cache __read_mostly;112112EXPORT_SYMBOL(outer_cache);113113#endif114114···126126static const char *cpu_name;127127static const char *machine_name;128128static char __initdata cmd_line[COMMAND_LINE_SIZE];129129+struct machine_desc *machine_desc __initdata;129130130131static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;131132static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };···709708 { 0, ATAG_NONE }710709};711710712712-static void (*init_machine)(void) __initdata;713713-714711static int __init customize_machine(void)715712{716713 /* customizes platform devices, or adds new ones */717717- if (init_machine)718718- init_machine();714714+ if (machine_desc->init_machine)715715+ machine_desc->init_machine();719716 return 0;720717}721718arch_initcall(customize_machine);···808809809810 setup_processor();810811 mdesc = setup_machine(machine_arch_type);812812+ machine_desc = mdesc;811813 machine_name = mdesc->name;812814813815 if (mdesc->soft_reboot)···868868 cpu_init();869869 tcm_init();870870871871- /*872872- * Set up various architecture-specific pointers873873- */874874- arch_nr_irqs = mdesc->nr_irqs;875875- init_arch_irq = mdesc->init_irq;876876- system_timer = mdesc->timer;877877- init_machine = mdesc->init_machine;871871+#ifdef CONFIG_MULTI_IRQ_HANDLER872872+ handle_arch_irq = mdesc->handle_irq;873873+#endif878874879875#ifdef CONFIG_VT880876#if defined(CONFIG_VGA_CONSOLE)···880884#endif881885#endif882886 early_trap_init();887887+888888+ if (mdesc->init_early)889889+ mdesc->init_early();883890}884891885892
+158-253
arch/arm/kernel/smp.c
···2525#include <linux/irq.h>2626#include <linux/percpu.h>2727#include <linux/clockchips.h>2828+#include <linux/completion.h>28292930#include <asm/atomic.h>3031#include <asm/cacheflush.h>···3938#include <asm/tlbflush.h>4039#include <asm/ptrace.h>4140#include <asm/localtimer.h>4242-#include <asm/smp_plat.h>43414442/*4543 * as from 2.5, kernels no longer have an init_tasks structure···4747 */4848struct secondary_data secondary_data;49495050-/*5151- * structures for inter-processor calls5252- * - A collection of single bit ipi messages.5353- */5454-struct ipi_data {5555- spinlock_t lock;5656- unsigned long ipi_count;5757- unsigned long bits;5858-};5959-6060-static DEFINE_PER_CPU(struct ipi_data, ipi_data) = {6161- .lock = SPIN_LOCK_UNLOCKED,6262-};6363-6450enum ipi_msg_type {6565- IPI_TIMER,5151+ IPI_TIMER = 2,6652 IPI_RESCHEDULE,6753 IPI_CALL_FUNC,6854 IPI_CALL_FUNC_SINGLE,···164178 barrier();165179 }166180167167- if (!cpu_online(cpu))181181+ if (!cpu_online(cpu)) {182182+ pr_crit("CPU%u: failed to come online\n", cpu);168183 ret = -EIO;184184+ }185185+ } else {186186+ pr_err("CPU%u: failed to boot: %d\n", cpu, ret);169187 }170188171189 secondary_data.stack = NULL;···185195186196 pgd_free(&init_mm, pgd);187197188188- if (ret) {189189- printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu);190190-191191- /*192192- * FIXME: We need to clean up the new idle thread. --rmk193193- */194194- }195195-196198 return ret;197199}198200199201#ifdef CONFIG_HOTPLUG_CPU202202+static void percpu_timer_stop(void);203203+200204/*201205 * __cpu_disable runs on the processor to be shutdown.202206 */···218234 /*219235 * Stop the local timer for this CPU.220236 */221221- local_timer_stop();237237+ percpu_timer_stop();222238223239 /*224240 * Flush user cache and TLB mappings, and then remove this CPU···237253 return 0;238254}239255256256+static DECLARE_COMPLETION(cpu_died);257257+240258/*241259 * called on the thread which is asking for a CPU to be shutdown -242260 * waits until shutdown has completed, or it is timed out.243261 */244262void __cpu_die(unsigned int cpu)245263{264264+ if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {265265+ pr_err("CPU%u: cpu didn't die\n", cpu);266266+ return;267267+ }268268+ printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);269269+246270 if (!platform_cpu_kill(cpu))247271 printk("CPU%u: unable to kill\n", cpu);248272}···267275{268276 unsigned int cpu = smp_processor_id();269277270270- local_irq_disable();271278 idle_task_exit();279279+280280+ local_irq_disable();281281+ mb();282282+283283+ /* Tell __cpu_die() that this CPU is now safe to dispose of */284284+ complete(&cpu_died);272285273286 /*274287 * actual CPU shutdown procedure is at least platform (if not275275- * CPU) specific288288+ * CPU) specific.276289 */277290 platform_cpu_die(cpu);278291···287290 * to be repeated to undo the effects of taking the CPU offline.288291 */289292 __asm__("mov sp, %0\n"293293+ " mov fp, #0\n"290294 " b secondary_start_kernel"291295 :292296 : "r" (task_stack_page(current) + THREAD_SIZE - 8));293297}294298#endif /* CONFIG_HOTPLUG_CPU */299299+300300+/*301301+ * Called by both boot and secondaries to move global data into302302+ * per-processor storage.303303+ */304304+static void __cpuinit smp_store_cpu_info(unsigned int cpuid)305305+{306306+ struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);307307+308308+ cpu_info->loops_per_jiffy = loops_per_jiffy;309309+}295310296311/*297312 * This is the secondary CPU boot entry. We're using this CPUs···329320330321 cpu_init();331322 preempt_disable();323323+ trace_hardirqs_off();332324333325 /*334326 * Give the platform a chance to do its own initialisation.···363353 cpu_idle();364354}365355366366-/*367367- * Called by both boot and secondaries to move global data into368368- * per-processor storage.369369- */370370-void __cpuinit smp_store_cpu_info(unsigned int cpuid)371371-{372372- struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);373373-374374- cpu_info->loops_per_jiffy = loops_per_jiffy;375375-}376376-377356void __init smp_cpus_done(unsigned int max_cpus)378357{379358 int cpu;···385386 per_cpu(cpu_data, cpu).idle = current;386387}387388388388-static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg)389389+void __init smp_prepare_cpus(unsigned int max_cpus)389390{390390- unsigned long flags;391391- unsigned int cpu;391391+ unsigned int ncores = num_possible_cpus();392392393393- local_irq_save(flags);394394-395395- for_each_cpu(cpu, mask) {396396- struct ipi_data *ipi = &per_cpu(ipi_data, cpu);397397-398398- spin_lock(&ipi->lock);399399- ipi->bits |= 1 << msg;400400- spin_unlock(&ipi->lock);401401- }393393+ smp_store_cpu_info(smp_processor_id());402394403395 /*404404- * Call the platform specific cross-CPU call function.396396+ * are we trying to boot more cores than exist?405397 */406406- smp_cross_call(mask);398398+ if (max_cpus > ncores)399399+ max_cpus = ncores;407400408408- local_irq_restore(flags);401401+ if (max_cpus > 1) {402402+ /*403403+ * Enable the local timer or broadcast device for the404404+ * boot CPU, but only if we have more than one CPU.405405+ */406406+ percpu_timer_setup();407407+408408+ /*409409+ * Initialise the SCU if there are more than one CPU410410+ * and let them know where to start.411411+ */412412+ platform_smp_prepare_cpus(max_cpus);413413+ }409414}410415411416void arch_send_call_function_ipi_mask(const struct cpumask *mask)412417{413413- send_ipi_message(mask, IPI_CALL_FUNC);418418+ smp_cross_call(mask, IPI_CALL_FUNC);414419}415420416421void arch_send_call_function_single_ipi(int cpu)417422{418418- send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);423423+ smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);419424}420425421421-void show_ipi_list(struct seq_file *p)426426+static const char *ipi_types[NR_IPI] = {427427+#define S(x,s) [x - IPI_TIMER] = s428428+ S(IPI_TIMER, "Timer broadcast interrupts"),429429+ S(IPI_RESCHEDULE, "Rescheduling interrupts"),430430+ S(IPI_CALL_FUNC, "Function call interrupts"),431431+ S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),432432+ S(IPI_CPU_STOP, "CPU stop interrupts"),433433+};434434+435435+void show_ipi_list(struct seq_file *p, int prec)422436{423423- unsigned int cpu;437437+ unsigned int cpu, i;424438425425- seq_puts(p, "IPI:");439439+ for (i = 0; i < NR_IPI; i++) {440440+ seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);426441427427- for_each_present_cpu(cpu)428428- seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count);442442+ for_each_present_cpu(cpu)443443+ seq_printf(p, "%10u ",444444+ __get_irq_stat(cpu, ipi_irqs[i]));429445430430- seq_putc(p, '\n');446446+ seq_printf(p, " %s\n", ipi_types[i]);447447+ }431448}432449433433-void show_local_irqs(struct seq_file *p)450450+u64 smp_irq_stat_cpu(unsigned int cpu)434451{435435- unsigned int cpu;452452+ u64 sum = 0;453453+ int i;436454437437- seq_printf(p, "LOC: ");455455+ for (i = 0; i < NR_IPI; i++)456456+ sum += __get_irq_stat(cpu, ipi_irqs[i]);438457439439- for_each_present_cpu(cpu)440440- seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs);458458+#ifdef CONFIG_LOCAL_TIMERS459459+ sum += __get_irq_stat(cpu, local_timer_irqs);460460+#endif441461442442- seq_putc(p, '\n');462462+ return sum;443463}444464445465/*···481463 int cpu = smp_processor_id();482464483465 if (local_timer_ack()) {484484- irq_stat[cpu].local_timer_irqs++;466466+ __inc_irq_stat(cpu, local_timer_irqs);485467 ipi_timer();486468 }487469488470 set_irq_regs(old_regs);471471+}472472+473473+void show_local_irqs(struct seq_file *p, int prec)474474+{475475+ unsigned int cpu;476476+477477+ seq_printf(p, "%*s: ", prec, "LOC");478478+479479+ for_each_present_cpu(cpu)480480+ seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs));481481+482482+ seq_printf(p, " Local timer interrupts\n");489483}490484#endif491485492486#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST493487static void smp_timer_broadcast(const struct cpumask *mask)494488{495495- send_ipi_message(mask, IPI_TIMER);489489+ smp_cross_call(mask, IPI_TIMER);496490}497491#else498492#define smp_timer_broadcast NULL···541511 local_timer_setup(evt);542512}543513514514+#ifdef CONFIG_HOTPLUG_CPU515515+/*516516+ * The generic clock events code purposely does not stop the local timer517517+ * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it518518+ * manually here.519519+ */520520+static void percpu_timer_stop(void)521521+{522522+ unsigned int cpu = smp_processor_id();523523+ struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);524524+525525+ evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);526526+}527527+#endif528528+544529static DEFINE_SPINLOCK(stop_lock);545530546531/*···582537583538/*584539 * Main handler for inter-processor interrupts585585- *586586- * For ARM, the ipimask now only identifies a single587587- * category of IPI (Bit 1 IPIs have been replaced by a588588- * different mechanism):589589- *590590- * Bit 0 - Inter-processor function call591540 */592592-asmlinkage void __exception_irq_entry do_IPI(struct pt_regs *regs)541541+asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)593542{594543 unsigned int cpu = smp_processor_id();595595- struct ipi_data *ipi = &per_cpu(ipi_data, cpu);596544 struct pt_regs *old_regs = set_irq_regs(regs);597545598598- ipi->ipi_count++;546546+ if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI)547547+ __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]);599548600600- for (;;) {601601- unsigned long msgs;549549+ switch (ipinr) {550550+ case IPI_TIMER:551551+ ipi_timer();552552+ break;602553603603- spin_lock(&ipi->lock);604604- msgs = ipi->bits;605605- ipi->bits = 0;606606- spin_unlock(&ipi->lock);554554+ case IPI_RESCHEDULE:555555+ /*556556+ * nothing more to do - eveything is557557+ * done on the interrupt return path558558+ */559559+ break;607560608608- if (!msgs)609609- break;561561+ case IPI_CALL_FUNC:562562+ generic_smp_call_function_interrupt();563563+ break;610564611611- do {612612- unsigned nextmsg;565565+ case IPI_CALL_FUNC_SINGLE:566566+ generic_smp_call_function_single_interrupt();567567+ break;613568614614- nextmsg = msgs & -msgs;615615- msgs &= ~nextmsg;616616- nextmsg = ffz(~nextmsg);569569+ case IPI_CPU_STOP:570570+ ipi_cpu_stop(cpu);571571+ break;617572618618- switch (nextmsg) {619619- case IPI_TIMER:620620- ipi_timer();621621- break;622622-623623- case IPI_RESCHEDULE:624624- /*625625- * nothing more to do - eveything is626626- * done on the interrupt return path627627- */628628- break;629629-630630- case IPI_CALL_FUNC:631631- generic_smp_call_function_interrupt();632632- break;633633-634634- case IPI_CALL_FUNC_SINGLE:635635- generic_smp_call_function_single_interrupt();636636- break;637637-638638- case IPI_CPU_STOP:639639- ipi_cpu_stop(cpu);640640- break;641641-642642- default:643643- printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",644644- cpu, nextmsg);645645- break;646646- }647647- } while (msgs);573573+ default:574574+ printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",575575+ cpu, ipinr);576576+ break;648577 }649649-650578 set_irq_regs(old_regs);651579}652580653581void smp_send_reschedule(int cpu)654582{655655- send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);583583+ smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);656584}657585658586void smp_send_stop(void)659587{660660- cpumask_t mask = cpu_online_map;661661- cpu_clear(smp_processor_id(), mask);662662- if (!cpus_empty(mask))663663- send_ipi_message(&mask, IPI_CPU_STOP);588588+ unsigned long timeout;589589+590590+ if (num_online_cpus() > 1) {591591+ cpumask_t mask = cpu_online_map;592592+ cpu_clear(smp_processor_id(), mask);593593+594594+ smp_cross_call(&mask, IPI_CPU_STOP);595595+ }596596+597597+ /* Wait up to one second for other CPUs to stop */598598+ timeout = USEC_PER_SEC;599599+ while (num_online_cpus() > 1 && timeout--)600600+ udelay(1);601601+602602+ if (num_online_cpus() > 1)603603+ pr_warning("SMP: failed to stop secondary CPUs\n");664604}665605666606/*···654624int setup_profiling_timer(unsigned int multiplier)655625{656626 return -EINVAL;657657-}658658-659659-static void660660-on_each_cpu_mask(void (*func)(void *), void *info, int wait,661661- const struct cpumask *mask)662662-{663663- preempt_disable();664664-665665- smp_call_function_many(mask, func, info, wait);666666- if (cpumask_test_cpu(smp_processor_id(), mask))667667- func(info);668668-669669- preempt_enable();670670-}671671-672672-/**********************************************************************/673673-674674-/*675675- * TLB operations676676- */677677-struct tlb_args {678678- struct vm_area_struct *ta_vma;679679- unsigned long ta_start;680680- unsigned long ta_end;681681-};682682-683683-static inline void ipi_flush_tlb_all(void *ignored)684684-{685685- local_flush_tlb_all();686686-}687687-688688-static inline void ipi_flush_tlb_mm(void *arg)689689-{690690- struct mm_struct *mm = (struct mm_struct *)arg;691691-692692- local_flush_tlb_mm(mm);693693-}694694-695695-static inline void ipi_flush_tlb_page(void *arg)696696-{697697- struct tlb_args *ta = (struct tlb_args *)arg;698698-699699- local_flush_tlb_page(ta->ta_vma, ta->ta_start);700700-}701701-702702-static inline void ipi_flush_tlb_kernel_page(void *arg)703703-{704704- struct tlb_args *ta = (struct tlb_args *)arg;705705-706706- local_flush_tlb_kernel_page(ta->ta_start);707707-}708708-709709-static inline void ipi_flush_tlb_range(void *arg)710710-{711711- struct tlb_args *ta = (struct tlb_args *)arg;712712-713713- local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);714714-}715715-716716-static inline void ipi_flush_tlb_kernel_range(void *arg)717717-{718718- struct tlb_args *ta = (struct tlb_args *)arg;719719-720720- local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);721721-}722722-723723-void flush_tlb_all(void)724724-{725725- if (tlb_ops_need_broadcast())726726- on_each_cpu(ipi_flush_tlb_all, NULL, 1);727727- else728728- local_flush_tlb_all();729729-}730730-731731-void flush_tlb_mm(struct mm_struct *mm)732732-{733733- if (tlb_ops_need_broadcast())734734- on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));735735- else736736- local_flush_tlb_mm(mm);737737-}738738-739739-void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)740740-{741741- if (tlb_ops_need_broadcast()) {742742- struct tlb_args ta;743743- ta.ta_vma = vma;744744- ta.ta_start = uaddr;745745- on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));746746- } else747747- local_flush_tlb_page(vma, uaddr);748748-}749749-750750-void flush_tlb_kernel_page(unsigned long kaddr)751751-{752752- if (tlb_ops_need_broadcast()) {753753- struct tlb_args ta;754754- ta.ta_start = kaddr;755755- on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);756756- } else757757- local_flush_tlb_kernel_page(kaddr);758758-}759759-760760-void flush_tlb_range(struct vm_area_struct *vma,761761- unsigned long start, unsigned long end)762762-{763763- if (tlb_ops_need_broadcast()) {764764- struct tlb_args ta;765765- ta.ta_vma = vma;766766- ta.ta_start = start;767767- ta.ta_end = end;768768- on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));769769- } else770770- local_flush_tlb_range(vma, start, end);771771-}772772-773773-void flush_tlb_kernel_range(unsigned long start, unsigned long end)774774-{775775- if (tlb_ops_need_broadcast()) {776776- struct tlb_args ta;777777- ta.ta_start = start;778778- ta.ta_end = end;779779- on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);780780- } else781781- local_flush_tlb_kernel_range(start, end);782627}
+139
arch/arm/kernel/smp_tlb.c
···11+/*22+ * linux/arch/arm/kernel/smp_tlb.c33+ *44+ * Copyright (C) 2002 ARM Limited, All Rights Reserved.55+ *66+ * This program is free software; you can redistribute it and/or modify77+ * it under the terms of the GNU General Public License version 2 as88+ * published by the Free Software Foundation.99+ */1010+#include <linux/preempt.h>1111+#include <linux/smp.h>1212+1313+#include <asm/smp_plat.h>1414+#include <asm/tlbflush.h>1515+1616+static void on_each_cpu_mask(void (*func)(void *), void *info, int wait,1717+ const struct cpumask *mask)1818+{1919+ preempt_disable();2020+2121+ smp_call_function_many(mask, func, info, wait);2222+ if (cpumask_test_cpu(smp_processor_id(), mask))2323+ func(info);2424+2525+ preempt_enable();2626+}2727+2828+/**********************************************************************/2929+3030+/*3131+ * TLB operations3232+ */3333+struct tlb_args {3434+ struct vm_area_struct *ta_vma;3535+ unsigned long ta_start;3636+ unsigned long ta_end;3737+};3838+3939+static inline void ipi_flush_tlb_all(void *ignored)4040+{4141+ local_flush_tlb_all();4242+}4343+4444+static inline void ipi_flush_tlb_mm(void *arg)4545+{4646+ struct mm_struct *mm = (struct mm_struct *)arg;4747+4848+ local_flush_tlb_mm(mm);4949+}5050+5151+static inline void ipi_flush_tlb_page(void *arg)5252+{5353+ struct tlb_args *ta = (struct tlb_args *)arg;5454+5555+ local_flush_tlb_page(ta->ta_vma, ta->ta_start);5656+}5757+5858+static inline void ipi_flush_tlb_kernel_page(void *arg)5959+{6060+ struct tlb_args *ta = (struct tlb_args *)arg;6161+6262+ local_flush_tlb_kernel_page(ta->ta_start);6363+}6464+6565+static inline void ipi_flush_tlb_range(void *arg)6666+{6767+ struct tlb_args *ta = (struct tlb_args *)arg;6868+6969+ local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);7070+}7171+7272+static inline void ipi_flush_tlb_kernel_range(void *arg)7373+{7474+ struct tlb_args *ta = (struct tlb_args *)arg;7575+7676+ local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);7777+}7878+7979+void flush_tlb_all(void)8080+{8181+ if (tlb_ops_need_broadcast())8282+ on_each_cpu(ipi_flush_tlb_all, NULL, 1);8383+ else8484+ local_flush_tlb_all();8585+}8686+8787+void flush_tlb_mm(struct mm_struct *mm)8888+{8989+ if (tlb_ops_need_broadcast())9090+ on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));9191+ else9292+ local_flush_tlb_mm(mm);9393+}9494+9595+void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)9696+{9797+ if (tlb_ops_need_broadcast()) {9898+ struct tlb_args ta;9999+ ta.ta_vma = vma;100100+ ta.ta_start = uaddr;101101+ on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));102102+ } else103103+ local_flush_tlb_page(vma, uaddr);104104+}105105+106106+void flush_tlb_kernel_page(unsigned long kaddr)107107+{108108+ if (tlb_ops_need_broadcast()) {109109+ struct tlb_args ta;110110+ ta.ta_start = kaddr;111111+ on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);112112+ } else113113+ local_flush_tlb_kernel_page(kaddr);114114+}115115+116116+void flush_tlb_range(struct vm_area_struct *vma,117117+ unsigned long start, unsigned long end)118118+{119119+ if (tlb_ops_need_broadcast()) {120120+ struct tlb_args ta;121121+ ta.ta_vma = vma;122122+ ta.ta_start = start;123123+ ta.ta_end = end;124124+ on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));125125+ } else126126+ local_flush_tlb_range(vma, start, end);127127+}128128+129129+void flush_tlb_kernel_range(unsigned long start, unsigned long end)130130+{131131+ if (tlb_ops_need_broadcast()) {132132+ struct tlb_args ta;133133+ ta.ta_start = start;134134+ ta.ta_end = end;135135+ on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);136136+ } else137137+ local_flush_tlb_kernel_range(start, end);138138+}139139+
-10
arch/arm/kernel/smp_twd.c
···145145146146 clockevents_register_device(clk);147147}148148-149149-#ifdef CONFIG_HOTPLUG_CPU150150-/*151151- * take a local timer down152152- */153153-void twd_timer_stop(void)154154-{155155- __raw_writel(0, twd_base + TWD_TIMER_CONTROL);156156-}157157-#endif
+267
arch/arm/kernel/swp_emulate.c
···11+/*22+ * linux/arch/arm/kernel/swp_emulate.c33+ *44+ * Copyright (C) 2009 ARM Limited55+ * __user_* functions adapted from include/asm/uaccess.h66+ *77+ * This program is free software; you can redistribute it and/or modify88+ * it under the terms of the GNU General Public License version 2 as99+ * published by the Free Software Foundation.1010+ *1111+ * Implements emulation of the SWP/SWPB instructions using load-exclusive and1212+ * store-exclusive for processors that have them disabled (or future ones that1313+ * might not implement them).1414+ *1515+ * Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>]1616+ * Where: Rt = destination1717+ * Rt2 = source1818+ * Rn = address1919+ */2020+2121+#include <linux/init.h>2222+#include <linux/kernel.h>2323+#include <linux/proc_fs.h>2424+#include <linux/sched.h>2525+#include <linux/syscalls.h>2626+#include <linux/perf_event.h>2727+2828+#include <asm/traps.h>2929+#include <asm/uaccess.h>3030+3131+/*3232+ * Error-checking SWP macros implemented using ldrex{b}/strex{b}3333+ */3434+#define __user_swpX_asm(data, addr, res, temp, B) \3535+ __asm__ __volatile__( \3636+ " mov %2, %1\n" \3737+ "0: ldrex"B" %1, [%3]\n" \3838+ "1: strex"B" %0, %2, [%3]\n" \3939+ " cmp %0, #0\n" \4040+ " movne %0, %4\n" \4141+ "2:\n" \4242+ " .section .fixup,\"ax\"\n" \4343+ " .align 2\n" \4444+ "3: mov %0, %5\n" \4545+ " b 2b\n" \4646+ " .previous\n" \4747+ " .section __ex_table,\"a\"\n" \4848+ " .align 3\n" \4949+ " .long 0b, 3b\n" \5050+ " .long 1b, 3b\n" \5151+ " .previous" \5252+ : "=&r" (res), "+r" (data), "=&r" (temp) \5353+ : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \5454+ : "cc", "memory")5555+5656+#define __user_swp_asm(data, addr, res, temp) \5757+ __user_swpX_asm(data, addr, res, temp, "")5858+#define __user_swpb_asm(data, addr, res, temp) \5959+ __user_swpX_asm(data, addr, res, temp, "b")6060+6161+/*6262+ * Macros/defines for extracting register numbers from instruction.6363+ */6464+#define EXTRACT_REG_NUM(instruction, offset) \6565+ (((instruction) & (0xf << (offset))) >> (offset))6666+#define RN_OFFSET 166767+#define RT_OFFSET 126868+#define RT2_OFFSET 06969+/*7070+ * Bit 22 of the instruction encoding distinguishes between7171+ * the SWP and SWPB variants (bit set means SWPB).7272+ */7373+#define TYPE_SWPB (1 << 22)7474+7575+static unsigned long swpcounter;7676+static unsigned long swpbcounter;7777+static unsigned long abtcounter;7878+static pid_t previous_pid;7979+8080+#ifdef CONFIG_PROC_FS8181+static int proc_read_status(char *page, char **start, off_t off, int count,8282+ int *eof, void *data)8383+{8484+ char *p = page;8585+ int len;8686+8787+ p += sprintf(p, "Emulated SWP:\t\t%lu\n", swpcounter);8888+ p += sprintf(p, "Emulated SWPB:\t\t%lu\n", swpbcounter);8989+ p += sprintf(p, "Aborted SWP{B}:\t\t%lu\n", abtcounter);9090+ if (previous_pid != 0)9191+ p += sprintf(p, "Last process:\t\t%d\n", previous_pid);9292+9393+ len = (p - page) - off;9494+ if (len < 0)9595+ len = 0;9696+9797+ *eof = (len <= count) ? 1 : 0;9898+ *start = page + off;9999+100100+ return len;101101+}102102+#endif103103+104104+/*105105+ * Set up process info to signal segmentation fault - called on access error.106106+ */107107+static void set_segfault(struct pt_regs *regs, unsigned long addr)108108+{109109+ siginfo_t info;110110+111111+ if (find_vma(current->mm, addr) == NULL)112112+ info.si_code = SEGV_MAPERR;113113+ else114114+ info.si_code = SEGV_ACCERR;115115+116116+ info.si_signo = SIGSEGV;117117+ info.si_errno = 0;118118+ info.si_addr = (void *) instruction_pointer(regs);119119+120120+ pr_debug("SWP{B} emulation: access caused memory abort!\n");121121+ arm_notify_die("Illegal memory access", regs, &info, 0, 0);122122+123123+ abtcounter++;124124+}125125+126126+static int emulate_swpX(unsigned int address, unsigned int *data,127127+ unsigned int type)128128+{129129+ unsigned int res = 0;130130+131131+ if ((type != TYPE_SWPB) && (address & 0x3)) {132132+ /* SWP to unaligned address not permitted */133133+ pr_debug("SWP instruction on unaligned pointer!\n");134134+ return -EFAULT;135135+ }136136+137137+ while (1) {138138+ unsigned long temp;139139+140140+ /*141141+ * Barrier required between accessing protected resource and142142+ * releasing a lock for it. Legacy code might not have done143143+ * this, and we cannot determine that this is not the case144144+ * being emulated, so insert always.145145+ */146146+ smp_mb();147147+148148+ if (type == TYPE_SWPB)149149+ __user_swpb_asm(*data, address, res, temp);150150+ else151151+ __user_swp_asm(*data, address, res, temp);152152+153153+ if (likely(res != -EAGAIN) || signal_pending(current))154154+ break;155155+156156+ cond_resched();157157+ }158158+159159+ if (res == 0) {160160+ /*161161+ * Barrier also required between aquiring a lock for a162162+ * protected resource and accessing the resource. Inserted for163163+ * same reason as above.164164+ */165165+ smp_mb();166166+167167+ if (type == TYPE_SWPB)168168+ swpbcounter++;169169+ else170170+ swpcounter++;171171+ }172172+173173+ return res;174174+}175175+176176+/*177177+ * swp_handler logs the id of calling process, dissects the instruction, sanity178178+ * checks the memory location, calls emulate_swpX for the actual operation and179179+ * deals with fixup/error handling before returning180180+ */181181+static int swp_handler(struct pt_regs *regs, unsigned int instr)182182+{183183+ unsigned int address, destreg, data, type;184184+ unsigned int res = 0;185185+186186+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc);187187+188188+ if (current->pid != previous_pid) {189189+ pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",190190+ current->comm, (unsigned long)current->pid);191191+ previous_pid = current->pid;192192+ }193193+194194+ address = regs->uregs[EXTRACT_REG_NUM(instr, RN_OFFSET)];195195+ data = regs->uregs[EXTRACT_REG_NUM(instr, RT2_OFFSET)];196196+ destreg = EXTRACT_REG_NUM(instr, RT_OFFSET);197197+198198+ type = instr & TYPE_SWPB;199199+200200+ pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n",201201+ EXTRACT_REG_NUM(instr, RN_OFFSET), address,202202+ destreg, EXTRACT_REG_NUM(instr, RT2_OFFSET), data);203203+204204+ /* Check access in reasonable access range for both SWP and SWPB */205205+ if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {206206+ pr_debug("SWP{B} emulation: access to %p not allowed!\n",207207+ (void *)address);208208+ res = -EFAULT;209209+ } else {210210+ res = emulate_swpX(address, &data, type);211211+ }212212+213213+ if (res == 0) {214214+ /*215215+ * On successful emulation, revert the adjustment to the PC216216+ * made in kernel/traps.c in order to resume execution at the217217+ * instruction following the SWP{B}.218218+ */219219+ regs->ARM_pc += 4;220220+ regs->uregs[destreg] = data;221221+ } else if (res == -EFAULT) {222222+ /*223223+ * Memory errors do not mean emulation failed.224224+ * Set up signal info to return SEGV, then return OK225225+ */226226+ set_segfault(regs, address);227227+ }228228+229229+ return 0;230230+}231231+232232+/*233233+ * Only emulate SWP/SWPB executed in ARM state/User mode.234234+ * The kernel must be SWP free and SWP{B} does not exist in Thumb/ThumbEE.235235+ */236236+static struct undef_hook swp_hook = {237237+ .instr_mask = 0x0fb00ff0,238238+ .instr_val = 0x01000090,239239+ .cpsr_mask = MODE_MASK | PSR_T_BIT | PSR_J_BIT,240240+ .cpsr_val = USR_MODE,241241+ .fn = swp_handler242242+};243243+244244+/*245245+ * Register handler and create status file in /proc/cpu246246+ * Invoked as late_initcall, since not needed before init spawned.247247+ */248248+static int __init swp_emulation_init(void)249249+{250250+#ifdef CONFIG_PROC_FS251251+ struct proc_dir_entry *res;252252+253253+ res = create_proc_entry("cpu/swp_emulation", S_IRUGO, NULL);254254+255255+ if (!res)256256+ return -ENOMEM;257257+258258+ res->read_proc = proc_read_status;259259+#endif /* CONFIG_PROC_FS */260260+261261+ printk(KERN_NOTICE "Registering SWP/SWPB emulation handler\n");262262+ register_undef_hook(&swp_hook);263263+264264+ return 0;265265+}266266+267267+late_initcall(swp_emulation_init);
+3-1
arch/arm/kernel/time.c
···3030#include <asm/leds.h>3131#include <asm/thread_info.h>3232#include <asm/stacktrace.h>3333+#include <asm/mach/arch.h>3334#include <asm/mach/time.h>34353536/*3637 * Our system timer.3738 */3838-struct sys_timer *system_timer;3939+static struct sys_timer *system_timer;39404041#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE)4142/* this needs a better home */···161160162161void __init time_init(void)163162{163163+ system_timer = machine_desc->timer;164164 system_timer->init();165165}166166
+10-4
arch/arm/kernel/traps.c
···37373838static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };39394040+void *vectors_page;4141+4042#ifdef CONFIG_DEBUG_USER4143unsigned int user_debug;4244···758756759757void __init early_trap_init(void)760758{759759+#if defined(CONFIG_CPU_USE_DOMAINS)761760 unsigned long vectors = CONFIG_VECTORS_BASE;761761+#else762762+ unsigned long vectors = (unsigned long)vectors_page;763763+#endif762764 extern char __stubs_start[], __stubs_end[];763765 extern char __vectors_start[], __vectors_end[];764766 extern char __kuser_helper_start[], __kuser_helper_end[];···786780 * Copy signal return handlers into the vector page, and787781 * set sigreturn to be a pointer to these.788782 */789789- memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes,790790- sizeof(sigreturn_codes));791791- memcpy((void *)KERN_RESTART_CODE, syscall_restart_code,792792- sizeof(syscall_restart_code));783783+ memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),784784+ sigreturn_codes, sizeof(sigreturn_codes));785785+ memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),786786+ syscall_restart_code, sizeof(syscall_restart_code));793787794788 flush_icache_range(vectors, vectors + PAGE_SIZE);795789 modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
+1
arch/arm/kernel/vmlinux.lds.S
···168168169169 NOSAVE_DATA170170 CACHELINE_ALIGNED_DATA(32)171171+ READ_MOSTLY_DATA(32)171172172173 /*173174 * The exception fixup table (might need resorting at runtime)
···3344config MACH_CNS3420VB55 bool "Support for CNS3420 Validation Board"66+ select MIGHT_HAVE_PCI67 help78 Include support for the Cavium Networks CNS3420 MPCore Platform89 Baseboard.
···4455config ARCH_INTEGRATOR_AP66 bool "Support Integrator/AP and Integrator/PP2 platforms"77+ select MIGHT_HAVE_PCI78 help89 Include support for the ARM(R) Integrator/AP and910 Integrator/PP2 platforms.
···5858 __dma; \5959 })60606161-#define __arch_page_to_dma(dev, page) \6161+#define __arch_pfn_to_dma(dev, pfn) \6262 ({ \6363 /* __is_lbus_virt() can never be true for RAM pages */ \6464- (dma_addr_t)page_to_phys(page); \6464+ (dma_addr_t)__pfn_to_phys(pfn); \6565 })66666767-#define __arch_dma_to_page(dev, addr) phys_to_page(addr)6767+#define __arch_dma_to_pfn(dev, addr) __phys_to_pfn(addr)68686969#endif /* CONFIG_ARCH_IOP13XX */7070#endif /* !ASSEMBLY */
+1
arch/arm/mach-ks8695/Kconfig
···4455config MACH_KS869566 bool "KS8695 development board"77+ select MIGHT_HAVE_PCI78 help89 Say 'Y' here if you want your kernel to run on the original910 Kendin-Micrel KS8695 development board.
···1010 * the Free Software Foundation; either version 2 of the License.1111 */12121313-#include <asm/clkdev.h>1313+#include <linux/clkdev.h>14141515void nuc93x_clk_enable(struct clk *clk, int enable);1616void clks_register(struct clk_lookup *clks, size_t num);
···1717#include <linux/kernel.h>1818#include <linux/errno.h>1919#include <linux/smp.h>2020-#include <linux/completion.h>21202221#include <asm/cacheflush.h>2322#include <mach/omap4-common.h>24232525-static DECLARE_COMPLETION(cpu_killed);2626-2724int platform_cpu_kill(unsigned int cpu)2825{2929- return wait_for_completion_timeout(&cpu_killed, 5000);2626+ return 1;3027}31283229/*···3235 */3336void platform_cpu_die(unsigned int cpu)3437{3535- unsigned int this_cpu = hard_smp_processor_id();3636-3737- if (cpu != this_cpu) {3838- pr_crit("platform_cpu_die running on %u, should be %u\n",3939- this_cpu, cpu);4040- BUG();4141- }4242- pr_notice("CPU%u: shutdown\n", cpu);4343- complete(&cpu_killed);4438 flush_cache_all();4539 dsb();4640
+15-51
arch/arm/mach-omap2/omap-smp.c
···2121#include <linux/io.h>22222323#include <asm/cacheflush.h>2424-#include <asm/localtimer.h>2524#include <asm/smp_scu.h>2625#include <mach/hardware.h>2726#include <mach/omap4-common.h>···2829/* SCU base address */2930static void __iomem *scu_base;30313131-/*3232- * Use SCU config register to count number of cores3333- */3434-static inline unsigned int get_core_count(void)3535-{3636- if (scu_base)3737- return scu_get_core_count(scu_base);3838- return 1;3939-}4040-4132static DEFINE_SPINLOCK(boot_lock);42334334void __cpuinit platform_secondary_init(unsigned int cpu)4435{4545- trace_hardirqs_off();4646-4736 /*4837 * If any interrupts are already enabled for the primary4938 * core (e.g. timer irq), then they will not have been enabled···6376 omap_modify_auxcoreboot0(0x200, 0xfffffdff);6477 flush_cache_all();6578 smp_wmb();6666- smp_cross_call(cpumask_of(cpu));7979+ smp_cross_call(cpumask_of(cpu), 1);67806881 /*6982 * Now the secondary core is starting up let it run its···105118 scu_base = ioremap(OMAP44XX_SCU_BASE, SZ_256);106119 BUG_ON(!scu_base);107120108108- ncores = get_core_count();109109-110110- for (i = 0; i < ncores; i++)111111- set_cpu_possible(i, true);112112-}113113-114114-void __init smp_prepare_cpus(unsigned int max_cpus)115115-{116116- unsigned int ncores = get_core_count();117117- unsigned int cpu = smp_processor_id();118118- int i;121121+ ncores = scu_get_core_count(scu_base);119122120123 /* sanity check */121121- if (ncores == 0) {122122- printk(KERN_ERR123123- "OMAP4: strange core count of 0? Default to 1\n");124124- ncores = 1;125125- }126126-127124 if (ncores > NR_CPUS) {128125 printk(KERN_WARNING129126 "OMAP4: no. of cores (%d) greater than configured "···115144 ncores, NR_CPUS);116145 ncores = NR_CPUS;117146 }118118- smp_store_cpu_info(cpu);119147120120- /*121121- * are we trying to boot more cores than exist?122122- */123123- if (max_cpus > ncores)124124- max_cpus = ncores;148148+ for (i = 0; i < ncores; i++)149149+ set_cpu_possible(i, true);150150+}151151+152152+void __init platform_smp_prepare_cpus(unsigned int max_cpus)153153+{154154+ int i;125155126156 /*127157 * Initialise the present map, which describes the set of CPUs···131159 for (i = 0; i < max_cpus; i++)132160 set_cpu_present(i, true);133161134134- if (max_cpus > 1) {135135- /*136136- * Enable the local timer or broadcast device for the137137- * boot CPU, but only if we have more than one CPU.138138- */139139- percpu_timer_setup();140140-141141- /*142142- * Initialise the SCU and wake up the secondary core using143143- * wakeup_secondary().144144- */145145- scu_enable(scu_base);146146- wakeup_secondary();147147- }162162+ /*163163+ * Initialise the SCU and wake up the secondary core using164164+ * wakeup_secondary().165165+ */166166+ scu_enable(scu_base);167167+ wakeup_secondary();148168}
···1111#include <linux/kernel.h>1212#include <linux/errno.h>1313#include <linux/smp.h>1414-#include <linux/completion.h>15141615#include <asm/cacheflush.h>17161817extern volatile int pen_release;1919-2020-static DECLARE_COMPLETION(cpu_killed);21182219static inline void cpu_enter_lowpower(void)2320{···3134 " bic %0, %0, #0x20\n"3235 " mcr p15, 0, %0, c1, c0, 1\n"3336 " mrc p15, 0, %0, c1, c0, 0\n"3434- " bic %0, %0, #0x04\n"3737+ " bic %0, %0, %2\n"3538 " mcr p15, 0, %0, c1, c0, 0\n"3639 : "=&r" (v)3737- : "r" (0)4040+ : "r" (0), "Ir" (CR_C)3841 : "cc");3942}4043···4346 unsigned int v;44474548 asm volatile( "mrc p15, 0, %0, c1, c0, 0\n"4646- " orr %0, %0, #0x04\n"4949+ " orr %0, %0, %1\n"4750 " mcr p15, 0, %0, c1, c0, 0\n"4851 " mrc p15, 0, %0, c1, c0, 1\n"4952 " orr %0, %0, #0x20\n"5053 " mcr p15, 0, %0, c1, c0, 1\n"5154 : "=&r" (v)5252- :5555+ : "Ir" (CR_C)5356 : "cc");5457}55585656-static inline void platform_do_lowpower(unsigned int cpu)5959+static inline void platform_do_lowpower(unsigned int cpu, int *spurious)5760{5861 /*5962 * there is no power-control hardware on this platform, so all···7780 }78817982 /*8080- * getting here, means that we have come out of WFI without8383+ * Getting here, means that we have come out of WFI without8184 * having been woken up - this shouldn't happen8285 *8383- * The trouble is, letting people know about this is not really8484- * possible, since we are currently running incoherently, and8585- * therefore cannot safely call printk() or anything else8686+ * Just note it happening - when we're woken, we can report8787+ * its occurrence.8688 */8787-#ifdef DEBUG8888- printk("CPU%u: spurious wakeup call\n", cpu);8989-#endif8989+ (*spurious)++;9090 }9191}92929393int platform_cpu_kill(unsigned int cpu)9494{9595- return wait_for_completion_timeout(&cpu_killed, 5000);9595+ return 1;9696}97979898/*···99105 */100106void platform_cpu_die(unsigned int cpu)101107{102102-#ifdef DEBUG103103- unsigned int this_cpu = hard_smp_processor_id();104104-105105- if (cpu != this_cpu) {106106- printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",107107- this_cpu, cpu);108108- BUG();109109- }110110-#endif111111-112112- printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);113113- complete(&cpu_killed);108108+ int spurious = 0;114109115110 /*116111 * we're ready for shutdown now, so do it117112 */118113 cpu_enter_lowpower();119119- platform_do_lowpower(cpu);114114+ platform_do_lowpower(cpu, &spurious);120115121116 /*122117 * bring this CPU back into the world of cache123118 * coherency, and then restore interrupts124119 */125120 cpu_leave_lowpower();121121+122122+ if (spurious)123123+ pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);126124}127125128126int platform_cpu_disable(unsigned int cpu)
+2-3
arch/arm/mach-realview/include/mach/smp.h
···22#define ASMARM_ARCH_SMP_H3344#include <asm/hardware/gic.h>55-#include <asm/smp_mpidr.h>6576/*87 * We use IRQ1 as the IPI98 */1010-static inline void smp_cross_call(const struct cpumask *mask)99+static inline void smp_cross_call(const struct cpumask *mask, int ipi)1110{1212- gic_raise_softirq(mask, 1);1111+ gic_raise_softirq(mask, ipi);1312}14131514#endif
+37-79
arch/arm/mach-realview/platsmp.c
···1919#include <asm/cacheflush.h>2020#include <mach/hardware.h>2121#include <asm/mach-types.h>2222-#include <asm/localtimer.h>2322#include <asm/unified.h>24232524#include <mach/board-eb.h>···3637 */3738volatile int __cpuinitdata pen_release = -1;38394040+/*4141+ * Write pen_release in a way that is guaranteed to be visible to all4242+ * observers, irrespective of whether they're taking part in coherency4343+ * or not. This is necessary for the hotplug code to work reliably.4444+ */4545+static void write_pen_release(int val)4646+{4747+ pen_release = val;4848+ smp_wmb();4949+ __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));5050+ outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));5151+}5252+3953static void __iomem *scu_base_addr(void)4054{4155 if (machine_is_realview_eb_mp())···6250 return (void __iomem *)0;6351}64526565-static inline unsigned int get_core_count(void)6666-{6767- void __iomem *scu_base = scu_base_addr();6868- if (scu_base)6969- return scu_get_core_count(scu_base);7070- return 1;7171-}7272-7353static DEFINE_SPINLOCK(boot_lock);74547555void __cpuinit platform_secondary_init(unsigned int cpu)7656{7777- trace_hardirqs_off();7878-7957 /*8058 * if any interrupts are already enabled for the primary8159 * core (e.g. timer irq), then they will not have been enabled···7775 * let the primary processor know we're out of the7876 * pen, then head off into the C entry point7977 */8080- pen_release = -1;8181- smp_wmb();7878+ write_pen_release(-1);82798380 /*8481 * Synchronise with the boot thread.···104103 * Note that "pen_release" is the hardware CPU ID, whereas105104 * "cpu" is Linux's internal ID.106105 */107107- pen_release = cpu;108108- flush_cache_all();106106+ write_pen_release(cpu);109107110108 /*111111- * XXX112112- *113113- * This is a later addition to the booting protocol: the114114- * bootMonitor now puts secondary cores into WFI, so115115- * poke_milo() no longer gets the cores moving; we need116116- * to send a soft interrupt to wake the secondary core.117117- * Use smp_cross_call() for this, since there's little118118- * point duplicating the code here109109+ * Send the secondary CPU a soft interrupt, thereby causing110110+ * the boot monitor to read the system wide flags register,111111+ * and branch to the address found there.119112 */120120- smp_cross_call(cpumask_of(cpu));113113+ smp_cross_call(cpumask_of(cpu), 1);121114122115 timeout = jiffies + (1 * HZ);123116 while (time_before(jiffies, timeout)) {···131136 return pen_release != -1 ? -ENOSYS : 0;132137}133138134134-static void __init poke_milo(void)135135-{136136- /* nobody is to be released from the pen yet */137137- pen_release = -1;138138-139139- /*140140- * Write the address of secondary startup into the system-wide flags141141- * register. The BootMonitor waits for this register to become142142- * non-zero.143143- */144144- __raw_writel(BSYM(virt_to_phys(realview_secondary_startup)),145145- __io_address(REALVIEW_SYS_FLAGSSET));146146-147147- mb();148148-}149149-150139/*151140 * Initialise the CPU possible map early - this describes the CPUs152141 * which may be present or become present in the system.153142 */154143void __init smp_init_cpus(void)155144{156156- unsigned int i, ncores = get_core_count();145145+ void __iomem *scu_base = scu_base_addr();146146+ unsigned int i, ncores;157147158158- for (i = 0; i < ncores; i++)159159- set_cpu_possible(i, true);160160-}161161-162162-void __init smp_prepare_cpus(unsigned int max_cpus)163163-{164164- unsigned int ncores = get_core_count();165165- unsigned int cpu = smp_processor_id();166166- int i;148148+ ncores = scu_base ? scu_get_core_count(scu_base) : 1;167149168150 /* sanity check */169169- if (ncores == 0) {170170- printk(KERN_ERR171171- "Realview: strange CM count of 0? Default to 1\n");172172-173173- ncores = 1;174174- }175175-176151 if (ncores > NR_CPUS) {177152 printk(KERN_WARNING178153 "Realview: no. of cores (%d) greater than configured "···151186 ncores = NR_CPUS;152187 }153188154154- smp_store_cpu_info(cpu);189189+ for (i = 0; i < ncores; i++)190190+ set_cpu_possible(i, true);191191+}155192156156- /*157157- * are we trying to boot more cores than exist?158158- */159159- if (max_cpus > ncores)160160- max_cpus = ncores;193193+void __init platform_smp_prepare_cpus(unsigned int max_cpus)194194+{195195+ int i;161196162197 /*163198 * Initialise the present map, which describes the set of CPUs···166201 for (i = 0; i < max_cpus; i++)167202 set_cpu_present(i, true);168203169169- /*170170- * Initialise the SCU if there are more than one CPU and let171171- * them know where to start. Note that, on modern versions of172172- * MILO, the "poke" doesn't actually do anything until each173173- * individual core is sent a soft interrupt to get it out of174174- * WFI175175- */176176- if (max_cpus > 1) {177177- /*178178- * Enable the local timer or broadcast device for the179179- * boot CPU, but only if we have more than one CPU.180180- */181181- percpu_timer_setup();204204+ scu_enable(scu_base_addr());182205183183- scu_enable(scu_base_addr());184184- poke_milo();185185- }206206+ /*207207+ * Write the address of secondary startup into the208208+ * system-wide flags register. The BootMonitor waits209209+ * until it receives a soft interrupt, and then the210210+ * secondary CPU branches to this address.211211+ */212212+ __raw_writel(BSYM(virt_to_phys(realview_secondary_startup)),213213+ __io_address(REALVIEW_SYS_FLAGSSET));186214}
+1-1
arch/arm/mach-s3c2412/Kconfig
···5959 Say Y here if you are using the Logitech Jive.60606161config MACH_JIVE_SHOW_BOOTLOADER6262- bool "Allow access to bootloader partitions in MTD"6262+ bool "Allow access to bootloader partitions in MTD (EXPERIMENTAL)"6363 depends on MACH_JIVE && EXPERIMENTAL64646565config MACH_SMDK2413
+15-29
arch/arm/mach-s5pv310/hotplug.c
···1313#include <linux/kernel.h>1414#include <linux/errno.h>1515#include <linux/smp.h>1616-#include <linux/completion.h>17161817#include <asm/cacheflush.h>19182019extern volatile int pen_release;2121-2222-static DECLARE_COMPLETION(cpu_killed);23202421static inline void cpu_enter_lowpower(void)2522{···3033 * Turn off coherency3134 */3235 " mrc p15, 0, %0, c1, c0, 1\n"3333- " bic %0, %0, #0x20\n"3636+ " bic %0, %0, %2\n"3437 " mcr p15, 0, %0, c1, c0, 1\n"3538 " mrc p15, 0, %0, c1, c0, 0\n"3639 " bic %0, %0, #0x04\n"3740 " mcr p15, 0, %0, c1, c0, 0\n"3841 : "=&r" (v)3939- : "r" (0)4242+ : "r" (0), "Ir" (CR_C)4043 : "cc");4144}4245···46494750 asm volatile(4851 "mrc p15, 0, %0, c1, c0, 0\n"4949- " orr %0, %0, #0x04\n"5252+ " orr %0, %0, %1\n"5053 " mcr p15, 0, %0, c1, c0, 0\n"5154 " mrc p15, 0, %0, c1, c0, 1\n"5255 " orr %0, %0, #0x20\n"5356 " mcr p15, 0, %0, c1, c0, 1\n"5457 : "=&r" (v)5555- :5858+ : "Ir" (CR_C)5659 : "cc");5760}58615959-static inline void platform_do_lowpower(unsigned int cpu)6262+static inline void platform_do_lowpower(unsigned int cpu, int *spurious)6063{6164 /*6265 * there is no power-control hardware on this platform, so all···8083 }81848285 /*8383- * getting here, means that we have come out of WFI without8686+ * Getting here, means that we have come out of WFI without8487 * having been woken up - this shouldn't happen8588 *8686- * The trouble is, letting people know about this is not really8787- * possible, since we are currently running incoherently, and8888- * therefore cannot safely call printk() or anything else8989+ * Just note it happening - when we're woken, we can report9090+ * its occurrence.8991 */9090-#ifdef DEBUG9191- printk(KERN_WARN "CPU%u: spurious wakeup call\n", cpu);9292-#endif9292+ (*spurious)++;9393 }9494}95959696int platform_cpu_kill(unsigned int cpu)9797{9898- return wait_for_completion_timeout(&cpu_killed, 5000);9898+ return 1;9999}100100101101/*···102108 */103109void platform_cpu_die(unsigned int cpu)104110{105105-#ifdef DEBUG106106- unsigned int this_cpu = hard_smp_processor_id();107107-108108- if (cpu != this_cpu) {109109- printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",110110- this_cpu, cpu);111111- BUG();112112- }113113-#endif114114-115115- printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);116116- complete(&cpu_killed);111111+ int spurious = 0;117112118113 /*119114 * we're ready for shutdown now, so do it120115 */121116 cpu_enter_lowpower();122122- platform_do_lowpower(cpu);117117+ platform_do_lowpower(cpu, &spurious);123118124119 /*125120 * bring this CPU back into the world of cache126121 * coherency, and then restore interrupts127122 */128123 cpu_leave_lowpower();124124+125125+ if (spurious)126126+ pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);129127}130128131129int platform_cpu_disable(unsigned int cpu)
+2-3
arch/arm/mach-s5pv310/include/mach/smp.h
···77#define ASM_ARCH_SMP_H __FILE__8899#include <asm/hardware/gic.h>1010-#include <asm/smp_mpidr.h>11101211/*1312 * We use IRQ1 as the IPI1413 */1515-static inline void smp_cross_call(const struct cpumask *mask)1414+static inline void smp_cross_call(const struct cpumask *mask, int ipi)1615{1717- gic_raise_softirq(mask, 1);1616+ gic_raise_softirq(mask, ipi);1817}19182019#endif
+23-43
arch/arm/mach-s5pv310/platsmp.c
···2222#include <linux/io.h>23232424#include <asm/cacheflush.h>2525-#include <asm/localtimer.h>2625#include <asm/smp_scu.h>2726#include <asm/unified.h>2827···37383839volatile int __cpuinitdata pen_release = -1;39404141+/*4242+ * Write pen_release in a way that is guaranteed to be visible to all4343+ * observers, irrespective of whether they're taking part in coherency4444+ * or not. This is necessary for the hotplug code to work reliably.4545+ */4646+static void write_pen_release(int val)4747+{4848+ pen_release = val;4949+ smp_wmb();5050+ __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));5151+ outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));5252+}5353+4054static void __iomem *scu_base_addr(void)4155{4256 return (void __iomem *)(S5P_VA_SCU);···59476048void __cpuinit platform_secondary_init(unsigned int cpu)6149{6262- trace_hardirqs_off();6363-6450 /*6551 * if any interrupts are already enabled for the primary6652 * core (e.g. timer irq), then they will not have been enabled···7060 * let the primary processor know we're out of the7161 * pen, then head off into the C entry point7262 */7373- pen_release = -1;7474- smp_wmb();6363+ write_pen_release(-1);75647665 /*7766 * Synchronise with the boot thread.···9788 * Note that "pen_release" is the hardware CPU ID, whereas9889 * "cpu" is Linux's internal ID.9990 */100100- pen_release = cpu;101101- __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));102102- outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));9191+ write_pen_release(cpu);1039210493 /*10594 * Send the secondary CPU a soft interrupt, thereby causing10695 * the boot monitor to read the system wide flags register,10796 * and branch to the address found there.10897 */109109- smp_cross_call(cpumask_of(cpu));9898+ smp_cross_call(cpumask_of(cpu), 1);11099111100 timeout = jiffies + (1 * HZ);112101 while (time_before(jiffies, timeout)) {···137130 ncores = scu_base ? scu_get_core_count(scu_base) : 1;138131139132 /* sanity check */140140- if (ncores == 0) {141141- printk(KERN_ERR142142- "S5PV310: strange CM count of 0? Default to 1\n");143143-144144- ncores = 1;145145- }146146-147133 if (ncores > NR_CPUS) {148134 printk(KERN_WARNING149135 "S5PV310: no. of cores (%d) greater than configured "···149149 set_cpu_possible(i, true);150150}151151152152-void __init smp_prepare_cpus(unsigned int max_cpus)152152+void __init platform_smp_prepare_cpus(unsigned int max_cpus)153153{154154- unsigned int ncores = num_possible_cpus();155155- unsigned int cpu = smp_processor_id();156154 int i;157157-158158- smp_store_cpu_info(cpu);159159-160160- /* are we trying to boot more cores than exist? */161161- if (max_cpus > ncores)162162- max_cpus = ncores;163155164156 /*165157 * Initialise the present map, which describes the set of CPUs···160168 for (i = 0; i < max_cpus; i++)161169 set_cpu_present(i, true);162170171171+ scu_enable(scu_base_addr());172172+163173 /*164164- * Initialise the SCU if there are more than one CPU and let165165- * them know where to start.174174+ * Write the address of secondary startup into the175175+ * system-wide flags register. The boot monitor waits176176+ * until it receives a soft interrupt, and then the177177+ * secondary CPU branches to this address.166178 */167167- if (max_cpus > 1) {168168- /*169169- * Enable the local timer or broadcast device for the170170- * boot CPU, but only if we have more than one CPU.171171- */172172- percpu_timer_setup();173173-174174- scu_enable(scu_base_addr());175175-176176- /*177177- * Write the address of secondary startup into the178178- * system-wide flags register. The boot monitor waits179179- * until it receives a soft interrupt, and then the180180- * secondary CPU branches to this address.181181- */182179 __raw_writel(BSYM(virt_to_phys(s5pv310_secondary_startup)), S5P_VA_SYSRAM);183183- }184180}
···1111#include <linux/kernel.h>1212#include <linux/errno.h>1313#include <linux/smp.h>1414-#include <linux/completion.h>15141615#include <asm/cacheflush.h>1717-1818-static DECLARE_COMPLETION(cpu_killed);19162017static inline void cpu_enter_lowpower(void)2118{···2629 * Turn off coherency2730 */2831 " mrc p15, 0, %0, c1, c0, 1\n"2929- " bic %0, %0, #0x20\n"3232+ " bic %0, %0, %2\n"3033 " mcr p15, 0, %0, c1, c0, 1\n"3134 " mrc p15, 0, %0, c1, c0, 0\n"3235 " bic %0, %0, #0x04\n"3336 " mcr p15, 0, %0, c1, c0, 0\n"3437 : "=&r" (v)3535- : "r" (0)3838+ : "r" (0), "Ir" (CR_C)3639 : "cc");3740}3841···42454346 asm volatile(4447 "mrc p15, 0, %0, c1, c0, 0\n"4545- " orr %0, %0, #0x04\n"4848+ " orr %0, %0, %1\n"4649 " mcr p15, 0, %0, c1, c0, 0\n"4750 " mrc p15, 0, %0, c1, c0, 1\n"4851 " orr %0, %0, #0x20\n"4952 " mcr p15, 0, %0, c1, c0, 1\n"5053 : "=&r" (v)5151- :5454+ : "Ir" (CR_C)5255 : "cc");5356}54575555-static inline void platform_do_lowpower(unsigned int cpu)5858+static inline void platform_do_lowpower(unsigned int cpu, int *spurious)5659{5760 /*5861 * there is no power-control hardware on this platform, so all···7679 /*}*/77807881 /*7979- * getting here, means that we have come out of WFI without8282+ * Getting here, means that we have come out of WFI without8083 * having been woken up - this shouldn't happen8184 *8282- * The trouble is, letting people know about this is not really8383- * possible, since we are currently running incoherently, and8484- * therefore cannot safely call printk() or anything else8585+ * Just note it happening - when we're woken, we can report8686+ * its occurrence.8587 */8686-#ifdef DEBUG8787- printk(KERN_WARN "CPU%u: spurious wakeup call\n", cpu);8888-#endif8888+ (*spurious)++;8989 }9090}91919292int platform_cpu_kill(unsigned int cpu)9393{9494- return wait_for_completion_timeout(&cpu_killed, 5000);9494+ return 1;9595}96969797/*···98104 */99105void platform_cpu_die(unsigned int cpu)100106{101101-#ifdef DEBUG102102- unsigned int this_cpu = hard_smp_processor_id();103103-104104- if (cpu != this_cpu) {105105- printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",106106- this_cpu, cpu);107107- BUG();108108- }109109-#endif110110-111111- printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);112112- complete(&cpu_killed);107107+ int spurious = 0;113108114109 /*115110 * we're ready for shutdown now, so do it116111 */117112 cpu_enter_lowpower();118118- platform_do_lowpower(cpu);113113+ platform_do_lowpower(cpu, &spurious);119114120115 /*121116 * bring this CPU back into the world of cache122117 * coherency, and then restore interrupts123118 */124119 cpu_leave_lowpower();120120+121121+ if (spurious)122122+ pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);125123}126124127125int platform_cpu_disable(unsigned int cpu)
+2-10
arch/arm/mach-tegra/include/mach/smp.h
···22#define ASMARM_ARCH_SMP_H3344#include <asm/hardware/gic.h>55-#include <asm/smp_mpidr.h>6576/*87 * We use IRQ1 as the IPI98 */1010-static inline void smp_cross_call(const struct cpumask *mask)99+static inline void smp_cross_call(const struct cpumask *mask, int ipi)1110{1212- gic_raise_softirq(mask, 1);1313-}1414-1515-/*1616- * Do nothing on MPcore.1717- */1818-static inline void smp_cross_call_done(cpumask_t callmap)1919-{1111+ gic_raise_softirq(mask, ipi);2012}21132214#endif
+8-25
arch/arm/mach-tegra/platsmp.c
···2222#include <asm/cacheflush.h>2323#include <mach/hardware.h>2424#include <asm/mach-types.h>2525-#include <asm/localtimer.h>2625#include <asm/smp_scu.h>27262827#include <mach/iomap.h>···40414142void __cpuinit platform_secondary_init(unsigned int cpu)4243{4343- trace_hardirqs_off();4444-4544 /*4645 * if any interrupts are already enabled for the primary4746 * core (e.g. timer irq), then they will not have been enabled···114117{115118 unsigned int i, ncores = scu_get_core_count(scu_base);116119120120+ if (ncores > NR_CPUS) {121121+ printk(KERN_ERR "Tegra: no. of cores (%u) greater than configured (%u), clipping\n",122122+ ncores, NR_CPUS);123123+ ncores = NR_CPUS;124124+ }125125+117126 for (i = 0; i < ncores; i++)118127 cpu_set(i, cpu_possible_map);119128}120129121121-void __init smp_prepare_cpus(unsigned int max_cpus)130130+void __init platform_smp_prepare_cpus(unsigned int max_cpus)122131{123123- unsigned int ncores = scu_get_core_count(scu_base);124124- unsigned int cpu = smp_processor_id();125132 int i;126126-127127- smp_store_cpu_info(cpu);128128-129129- /*130130- * are we trying to boot more cores than exist?131131- */132132- if (max_cpus > ncores)133133- max_cpus = ncores;134133135134 /*136135 * Initialise the present map, which describes the set of CPUs···135142 for (i = 0; i < max_cpus; i++)136143 set_cpu_present(i, true);137144138138- /*139139- * Initialise the SCU if there are more than one CPU and let140140- * them know where to start. Note that, on modern versions of141141- * MILO, the "poke" doesn't actually do anything until each142142- * individual core is sent a soft interrupt to get it out of143143- * WFI144144- */145145- if (max_cpus > 1) {146146- percpu_timer_setup();147147- scu_enable(scu_base);148148- }145145+ scu_enable(scu_base);149146}
···1111#include <linux/kernel.h>1212#include <linux/errno.h>1313#include <linux/smp.h>1414-#include <linux/completion.h>15141615#include <asm/cacheflush.h>17161817extern volatile int pen_release;1919-2020-static DECLARE_COMPLETION(cpu_killed);21182219static inline void platform_do_lowpower(unsigned int cpu)2320{···35383639int platform_cpu_kill(unsigned int cpu)3740{3838- return wait_for_completion_timeout(&cpu_killed, 5000);4141+ return 1;3942}40434144/*···4548 */4649void platform_cpu_die(unsigned int cpu)4750{4848-#ifdef DEBUG4949- unsigned int this_cpu = hard_smp_processor_id();5050-5151- if (cpu != this_cpu) {5252- printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",5353- this_cpu, cpu);5454- BUG();5555- }5656-#endif5757-5858- printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);5959- complete(&cpu_killed);6060-6151 /* directly enter low power state, skipping secure registers */6252 platform_do_lowpower(cpu);6353}
+2-3
arch/arm/mach-ux500/include/mach/smp.h
···1010#define ASMARM_ARCH_SMP_H11111212#include <asm/hardware/gic.h>1313-#include <asm/smp_mpidr.h>14131514/* This is required to wakeup the secondary core */1615extern void u8500_secondary_startup(void);···1718/*1819 * We use IRQ1 as the IPI1920 */2020-static inline void smp_cross_call(const struct cpumask *mask)2121+static inline void smp_cross_call(const struct cpumask *mask, int ipi)2122{2222- gic_raise_softirq(mask, 1);2323+ gic_raise_softirq(mask, ipi);2324}2425#endif
+28-49
arch/arm/mach-ux500/platsmp.c
···1818#include <linux/io.h>19192020#include <asm/cacheflush.h>2121-#include <asm/localtimer.h>2221#include <asm/smp_scu.h>2322#include <mach/hardware.h>2423···2728 */2829volatile int __cpuinitdata pen_release = -1;29303030-static unsigned int __init get_core_count(void)3131+/*3232+ * Write pen_release in a way that is guaranteed to be visible to all3333+ * observers, irrespective of whether they're taking part in coherency3434+ * or not. This is necessary for the hotplug code to work reliably.3535+ */3636+static void write_pen_release(int val)3137{3232- return scu_get_core_count(__io_address(UX500_SCU_BASE));3838+ pen_release = val;3939+ smp_wmb();4040+ __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));4141+ outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));3342}34433544static DEFINE_SPINLOCK(boot_lock);36453746void __cpuinit platform_secondary_init(unsigned int cpu)3847{3939- trace_hardirqs_off();4040-4148 /*4249 * if any interrupts are already enabled for the primary4350 * core (e.g. timer irq), then they will not have been enabled···5550 * let the primary processor know we're out of the5651 * pen, then head off into the C entry point5752 */5858- pen_release = -1;5353+ write_pen_release(-1);59546055 /*6156 * Synchronise with the boot thread.···7974 * the holding pen - release it, then wait for it to flag8075 * that it has been released by resetting pen_release.8176 */8282- pen_release = cpu;8383- __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));8484- outer_clean_range(__pa(&pen_release), __pa(&pen_release) + 1);7777+ write_pen_release(cpu);85788686- smp_cross_call(cpumask_of(cpu));7979+ smp_cross_call(cpumask_of(cpu), 1);87808881 timeout = jiffies + (1 * HZ);8982 while (time_before(jiffies, timeout)) {···1009710198static void __init wakeup_secondary(void)10299{103103- /* nobody is to be released from the pen yet */104104- pen_release = -1;105105-106100 /*107101 * write the address of secondary startup into the backup ram register108102 * at offset 0x1FF4, then write the magic number 0xA1FEED01 to the···126126 */127127void __init smp_init_cpus(void)128128{129129- unsigned int i, ncores = get_core_count();129129+ unsigned int i, ncores;130130+131131+ ncores = scu_get_core_count(__io_address(UX500_SCU_BASE));132132+133133+ /* sanity check */134134+ if (ncores > NR_CPUS) {135135+ printk(KERN_WARNING136136+ "U8500: no. of cores (%d) greater than configured "137137+ "maximum of %d - clipping\n",138138+ ncores, NR_CPUS);139139+ ncores = NR_CPUS;140140+ }130141131142 for (i = 0; i < ncores; i++)132143 set_cpu_possible(i, true);133144}134145135135-void __init smp_prepare_cpus(unsigned int max_cpus)146146+void __init platform_smp_prepare_cpus(unsigned int max_cpus)136147{137137- unsigned int ncores = get_core_count();138138- unsigned int cpu = smp_processor_id();139148 int i;140140-141141- /* sanity check */142142- if (ncores == 0) {143143- printk(KERN_ERR144144- "U8500: strange CM count of 0? Default to 1\n");145145- ncores = 1;146146- }147147-148148- if (ncores > num_possible_cpus()) {149149- printk(KERN_WARNING150150- "U8500: no. of cores (%d) greater than configured "151151- "maximum of %d - clipping\n",152152- ncores, num_possible_cpus());153153- ncores = num_possible_cpus();154154- }155155-156156- smp_store_cpu_info(cpu);157157-158158- /*159159- * are we trying to boot more cores than exist?160160- */161161- if (max_cpus > ncores)162162- max_cpus = ncores;163149164150 /*165151 * Initialise the present map, which describes the set of CPUs···154168 for (i = 0; i < max_cpus; i++)155169 set_cpu_present(i, true);156170157157- if (max_cpus > 1) {158158- /*159159- * Enable the local timer or broadcast device for the160160- * boot CPU, but only if we have more than one CPU.161161- */162162- percpu_timer_setup();163163- scu_enable(__io_address(UX500_SCU_BASE));164164- wakeup_secondary();165165- }171171+ scu_enable(__io_address(UX500_SCU_BASE));172172+ wakeup_secondary();166173}
+1
arch/arm/mach-versatile/Kconfig
···44config ARCH_VERSATILE_PB55 bool "Support Versatile/PB platform"66 select CPU_ARM926T77+ select MIGHT_HAVE_PCI78 default y89 help910 Include support for the ARM(R) Versatile/PB platform.
···11+/*22+ * linux/arch/arm/mach-realview/hotplug.c33+ *44+ * Copyright (C) 2002 ARM Ltd.55+ * All Rights Reserved66+ *77+ * This program is free software; you can redistribute it and/or modify88+ * it under the terms of the GNU General Public License version 2 as99+ * published by the Free Software Foundation.1010+ */1111+#include <linux/kernel.h>1212+#include <linux/errno.h>1313+#include <linux/smp.h>1414+1515+#include <asm/cacheflush.h>1616+1717+extern volatile int pen_release;1818+1919+static inline void cpu_enter_lowpower(void)2020+{2121+ unsigned int v;2222+2323+ flush_cache_all();2424+ asm volatile(2525+ "mcr p15, 0, %1, c7, c5, 0\n"2626+ " mcr p15, 0, %1, c7, c10, 4\n"2727+ /*2828+ * Turn off coherency2929+ */3030+ " mrc p15, 0, %0, c1, c0, 1\n"3131+ " bic %0, %0, %3\n"3232+ " mcr p15, 0, %0, c1, c0, 1\n"3333+ " mrc p15, 0, %0, c1, c0, 0\n"3434+ " bic %0, %0, %2\n"3535+ " mcr p15, 0, %0, c1, c0, 0\n"3636+ : "=&r" (v)3737+ : "r" (0), "Ir" (CR_C), "Ir" (0x40)3838+ : "cc");3939+}4040+4141+static inline void cpu_leave_lowpower(void)4242+{4343+ unsigned int v;4444+4545+ asm volatile(4646+ "mrc p15, 0, %0, c1, c0, 0\n"4747+ " orr %0, %0, %1\n"4848+ " mcr p15, 0, %0, c1, c0, 0\n"4949+ " mrc p15, 0, %0, c1, c0, 1\n"5050+ " orr %0, %0, %2\n"5151+ " mcr p15, 0, %0, c1, c0, 1\n"5252+ : "=&r" (v)5353+ : "Ir" (CR_C), "Ir" (0x40)5454+ : "cc");5555+}5656+5757+static inline void platform_do_lowpower(unsigned int cpu, int *spurious)5858+{5959+ /*6060+ * there is no power-control hardware on this platform, so all6161+ * we can do is put the core into WFI; this is safe as the calling6262+ * code will have already disabled interrupts6363+ */6464+ for (;;) {6565+ /*6666+ * here's the WFI6767+ */6868+ asm(".word 0xe320f003\n"6969+ :7070+ :7171+ : "memory", "cc");7272+7373+ if (pen_release == cpu) {7474+ /*7575+ * OK, proper wakeup, we're done7676+ */7777+ break;7878+ }7979+8080+ /*8181+ * Getting here, means that we have come out of WFI without8282+ * having been woken up - this shouldn't happen8383+ *8484+ * Just note it happening - when we're woken, we can report8585+ * its occurrence.8686+ */8787+ (*spurious)++;8888+ }8989+}9090+9191+int platform_cpu_kill(unsigned int cpu)9292+{9393+ return 1;9494+}9595+9696+/*9797+ * platform-specific code to shutdown a CPU9898+ *9999+ * Called with IRQs disabled100100+ */101101+void platform_cpu_die(unsigned int cpu)102102+{103103+ int spurious = 0;104104+105105+ /*106106+ * we're ready for shutdown now, so do it107107+ */108108+ cpu_enter_lowpower();109109+ platform_do_lowpower(cpu, &spurious);110110+111111+ /*112112+ * bring this CPU back into the world of cache113113+ * coherency, and then restore interrupts114114+ */115115+ cpu_leave_lowpower();116116+117117+ if (spurious)118118+ pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);119119+}120120+121121+int platform_cpu_disable(unsigned int cpu)122122+{123123+ /*124124+ * we don't allow CPU 0 to be shutdown (it is still too special125125+ * e.g. clock tick interrupts)126126+ */127127+ return cpu == 0 ? -EPERM : 0;128128+}
+2-3
arch/arm/mach-vexpress/include/mach/smp.h
···22#define __MACH_SMP_H3344#include <asm/hardware/gic.h>55-#include <asm/smp_mpidr.h>6576/*87 * We use IRQ1 as the IPI98 */1010-static inline void smp_cross_call(const struct cpumask *mask)99+static inline void smp_cross_call(const struct cpumask *mask, int ipi)1110{1212- gic_raise_softirq(mask, 1);1111+ gic_raise_softirq(mask, ipi);1312}1413#endif
+26-48
arch/arm/mach-vexpress/platsmp.c
···1717#include <linux/io.h>18181919#include <asm/cacheflush.h>2020-#include <asm/localtimer.h>2120#include <asm/smp_scu.h>2221#include <asm/unified.h>2322···3435 */3536volatile int __cpuinitdata pen_release = -1;36373838+/*3939+ * Write pen_release in a way that is guaranteed to be visible to all4040+ * observers, irrespective of whether they're taking part in coherency4141+ * or not. This is necessary for the hotplug code to work reliably.4242+ */4343+static void write_pen_release(int val)4444+{4545+ pen_release = val;4646+ smp_wmb();4747+ __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));4848+ outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));4949+}5050+3751static void __iomem *scu_base_addr(void)3852{3953 return MMIO_P2V(A9_MPCORE_SCU);···56445745void __cpuinit platform_secondary_init(unsigned int cpu)5846{5959- trace_hardirqs_off();6060-6147 /*6248 * if any interrupts are already enabled for the primary6349 * core (e.g. timer irq), then they will not have been enabled···6757 * let the primary processor know we're out of the6858 * pen, then head off into the C entry point6959 */7070- pen_release = -1;7171- smp_wmb();6060+ write_pen_release(-1);72617362 /*7463 * Synchronise with the boot thread.···9283 * since we haven't sent them a soft interrupt, they shouldn't9384 * be there.9485 */9595- pen_release = cpu;9696- __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));9797- outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));8686+ write_pen_release(cpu);98879988 /*10089 * Send the secondary CPU a soft interrupt, thereby causing10190 * the boot monitor to read the system wide flags register,10291 * and branch to the address found there.10392 */104104- smp_cross_call(cpumask_of(cpu));9393+ smp_cross_call(cpumask_of(cpu), 1);1059410695 timeout = jiffies + (1 * HZ);10796 while (time_before(jiffies, timeout)) {···131124 ncores = scu_base ? scu_get_core_count(scu_base) : 1;132125133126 /* sanity check */134134- if (ncores == 0) {135135- printk(KERN_ERR136136- "vexpress: strange CM count of 0? Default to 1\n");137137-138138- ncores = 1;139139- }140140-141127 if (ncores > NR_CPUS) {142128 printk(KERN_WARNING143129 "vexpress: no. of cores (%d) greater than configured "···143143 set_cpu_possible(i, true);144144}145145146146-void __init smp_prepare_cpus(unsigned int max_cpus)146146+void __init platform_smp_prepare_cpus(unsigned int max_cpus)147147{148148- unsigned int ncores = num_possible_cpus();149149- unsigned int cpu = smp_processor_id();150148 int i;151151-152152- smp_store_cpu_info(cpu);153153-154154- /*155155- * are we trying to boot more cores than exist?156156- */157157- if (max_cpus > ncores)158158- max_cpus = ncores;159149160150 /*161151 * Initialise the present map, which describes the set of CPUs···154164 for (i = 0; i < max_cpus; i++)155165 set_cpu_present(i, true);156166167167+ scu_enable(scu_base_addr());168168+157169 /*158158- * Initialise the SCU if there are more than one CPU and let159159- * them know where to start.170170+ * Write the address of secondary startup into the171171+ * system-wide flags register. The boot monitor waits172172+ * until it receives a soft interrupt, and then the173173+ * secondary CPU branches to this address.160174 */161161- if (max_cpus > 1) {162162- /*163163- * Enable the local timer or broadcast device for the164164- * boot CPU, but only if we have more than one CPU.165165- */166166- percpu_timer_setup();167167-168168- scu_enable(scu_base_addr());169169-170170- /*171171- * Write the address of secondary startup into the172172- * system-wide flags register. The boot monitor waits173173- * until it receives a soft interrupt, and then the174174- * secondary CPU branches to this address.175175- */176176- writel(~0, MMIO_P2V(V2M_SYS_FLAGSCLR));177177- writel(BSYM(virt_to_phys(vexpress_secondary_startup)),178178- MMIO_P2V(V2M_SYS_FLAGSSET));179179- }175175+ writel(~0, MMIO_P2V(V2M_SYS_FLAGSCLR));176176+ writel(BSYM(virt_to_phys(vexpress_secondary_startup)),177177+ MMIO_P2V(V2M_SYS_FLAGSSET));180178}
···1010 * the Free Software Foundation; either version 2 of the License.1111 */12121313-#include <asm/clkdev.h>1313+#include <linux/clkdev.h>14141515void nuc900_clk_enable(struct clk *clk, int enable);1616void nuc900_subclk_enable(struct clk *clk, int enable);
+35
arch/arm/mm/Kconfig
···599599 help600600 Processor has the CP15 register, which has MPU related registers.601601602602+config CPU_USE_DOMAINS603603+ bool604604+ depends on MMU605605+ default y if !CPU_32v6K606606+ help607607+ This option enables or disables the use of domain switching608608+ via the set_fs() function.609609+602610#603611# CPU supports 36-bit I/O604612#···635627 help636628 Say Y here if you have a CPU with the ThumbEE extension and code to637629 make use of it. Say N for code that can run on CPUs without ThumbEE.630630+631631+config SWP_EMULATE632632+ bool "Emulate SWP/SWPB instructions"633633+ depends on CPU_V7634634+ select HAVE_PROC_CPU if PROC_FS635635+ default y if SMP636636+ help637637+ ARMv6 architecture deprecates use of the SWP/SWPB instructions.638638+ ARMv7 multiprocessing extensions introduce the ability to disable639639+ these instructions, triggering an undefined instruction exception640640+ when executed. Say Y here to enable software emulation of these641641+ instructions for userspace (not kernel) using LDREX/STREX.642642+ Also creates /proc/cpu/swp_emulation for statistics.643643+644644+ In some older versions of glibc [<=2.8] SWP is used during futex645645+ trylock() operations with the assumption that the code will not646646+ be preempted. This invalid assumption may be more likely to fail647647+ with SWP emulation enabled, leading to deadlock of the user648648+ application.649649+650650+ NOTE: when accessing uncached shared regions, LDREX/STREX rely651651+ on an external transaction monitoring block called a global652652+ monitor to maintain update atomicity. If your system does not653653+ implement a global monitor, this option can cause programs that654654+ perform SWP operations to uncached memory to deadlock.655655+656656+ If unsure, say Y.638657639658config CPU_BIG_ENDIAN640659 bool "Build big-endian kernel"
···204204 /*205205 * Don't allow RAM to be mapped - this causes problems with ARMv6+206206 */207207- if (pfn_valid(pfn)) {208208- printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n"209209- "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"210210- "will fail in the next kernel release. Please fix your driver.\n");211211- WARN_ON(1);212212- }207207+ if (WARN_ON(pfn_valid(pfn)))208208+ return NULL;213209214210 type = get_mem_type(mtype);215211 if (!type)
+3-3
arch/arm/mm/mmu.c
···2424#include <asm/smp_plat.h>2525#include <asm/tlb.h>2626#include <asm/highmem.h>2727+#include <asm/traps.h>27282829#include <asm/mach/arch.h>2930#include <asm/mach/map.h>···915914{916915 struct map_desc map;917916 unsigned long addr;918918- void *vectors;919917920918 /*921919 * Allocate the vector page early.922920 */923923- vectors = early_alloc(PAGE_SIZE);921921+ vectors_page = early_alloc(PAGE_SIZE);924922925923 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)926924 pmd_clear(pmd_off_k(addr));···959959 * location (0xffff0000). If we aren't using high-vectors, also960960 * create a mapping at the low-vectors virtual address.961961 */962962- map.pfn = __phys_to_pfn(virt_to_phys(vectors));962962+ map.pfn = __phys_to_pfn(virt_to_phys(vectors_page));963963 map.virtual = 0xffff0000;964964 map.length = PAGE_SIZE;965965 map.type = MT_HIGH_VECTORS;
···1010 */1111#include <linux/module.h>1212#include <linux/types.h>1313+#include <linux/cpu.h>1314#include <linux/kernel.h>1515+#include <linux/notifier.h>1416#include <linux/signal.h>1517#include <linux/sched.h>1818+#include <linux/smp.h>1619#include <linux/init.h>17201821#include <asm/cputype.h>···487484 put_cpu();488485}489486490490-#include <linux/smp.h>487487+/*488488+ * VFP hardware can lose all context when a CPU goes offline.489489+ * Safely clear our held state when a CPU has been killed, and490490+ * re-enable access to VFP when the CPU comes back online.491491+ *492492+ * Both CPU_DYING and CPU_STARTING are called on the CPU which493493+ * is being offlined/onlined.494494+ */495495+static int vfp_hotplug(struct notifier_block *b, unsigned long action,496496+ void *hcpu)497497+{498498+ if (action == CPU_DYING || action == CPU_DYING_FROZEN) {499499+ unsigned int cpu = (long)hcpu;500500+ last_VFP_context[cpu] = NULL;501501+ } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)502502+ vfp_enable(NULL);503503+ return NOTIFY_OK;504504+}491505492506/*493507 * VFP support code initialisation.···534514 else if (vfpsid & FPSID_NODOUBLE) {535515 printk("no double precision support\n");536516 } else {517517+ hotcpu_notifier(vfp_hotplug, 0);518518+537519 smp_call_function(vfp_enable, NULL, 1);538520539521 VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
···4848 return ret;4949}50505151-/*5252- * Returns a clock. Note that we first try to use device id on the bus5353- * and clock name. If this fails, we try to use clock name only.5454- */5555-struct clk *clk_get(struct device *dev, const char *con_id)5656-{5757- const char *dev_id = dev ? dev_name(dev) : NULL;5858-5959- return clk_get_sys(dev_id, con_id);6060-}6161-EXPORT_SYMBOL_GPL(clk_get);6262-6363-void clk_put(struct clk *clk)6464-{6565-}6666-EXPORT_SYMBOL_GPL(clk_put);6751
···147147 clk_put(pclk);148148}149149150150+static int amba_get_enable_vcore(struct amba_device *pcdev)151151+{152152+ struct regulator *vcore = regulator_get(&pcdev->dev, "vcore");153153+ int ret;154154+155155+ pcdev->vcore = vcore;156156+157157+ if (IS_ERR(vcore)) {158158+ /* It is OK not to supply a vcore regulator */159159+ if (PTR_ERR(vcore) == -ENODEV)160160+ return 0;161161+ return PTR_ERR(vcore);162162+ }163163+164164+ ret = regulator_enable(vcore);165165+ if (ret) {166166+ regulator_put(vcore);167167+ pcdev->vcore = ERR_PTR(-ENODEV);168168+ }169169+170170+ return ret;171171+}172172+173173+static void amba_put_disable_vcore(struct amba_device *pcdev)174174+{175175+ struct regulator *vcore = pcdev->vcore;176176+177177+ if (!IS_ERR(vcore)) {178178+ regulator_disable(vcore);179179+ regulator_put(vcore);180180+ }181181+}182182+150183/*151184 * These are the device model conversion veneers; they convert the152185 * device model structures to our more specific structures.···192159 int ret;193160194161 do {162162+ ret = amba_get_enable_vcore(pcdev);163163+ if (ret)164164+ break;165165+195166 ret = amba_get_enable_pclk(pcdev);196167 if (ret)197168 break;···205168 break;206169207170 amba_put_disable_pclk(pcdev);171171+ amba_put_disable_vcore(pcdev);208172 } while (0);209173210174 return ret;···218180 int ret = drv->remove(pcdev);219181220182 amba_put_disable_pclk(pcdev);183183+ amba_put_disable_vcore(pcdev);221184222185 return ret;223186}
···1818#include <linux/device.h>1919#include <linux/err.h>2020#include <linux/resource.h>2121+#include <linux/regulator/consumer.h>21222223#define AMBA_NR_IRQS 22324#define AMBA_CID 0xb105f00d···2928 struct device dev;3029 struct resource res;3130 struct clk *pclk;3131+ struct regulator *vcore;3232 u64 dma_mask;3333 unsigned int periphid;3434 unsigned int irq[AMBA_NR_IRQS];···72707371#define amba_pclk_disable(d) \7472 do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0)7373+7474+#define amba_vcore_enable(d) \7575+ (IS_ERR((d)->vcore) ? 0 : regulator_enable((d)->vcore))7676+7777+#define amba_vcore_disable(d) \7878+ do { if (!IS_ERR((d)->vcore)) regulator_disable((d)->vcore); } while (0)75797680/* Some drivers don't use the struct amba_device */7781#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff)
+36
include/linux/clkdev.h
···11+/*22+ * include/linux/clkdev.h33+ *44+ * Copyright (C) 2008 Russell King.55+ *66+ * This program is free software; you can redistribute it and/or modify77+ * it under the terms of the GNU General Public License version 2 as88+ * published by the Free Software Foundation.99+ *1010+ * Helper for the clk API to assist looking up a struct clk.1111+ */1212+#ifndef __CLKDEV_H1313+#define __CLKDEV_H1414+1515+#include <asm/clkdev.h>1616+1717+struct clk;1818+struct device;1919+2020+struct clk_lookup {2121+ struct list_head node;2222+ const char *dev_id;2323+ const char *con_id;2424+ struct clk *clk;2525+};2626+2727+struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,2828+ const char *dev_fmt, ...);2929+3030+void clkdev_add(struct clk_lookup *cl);3131+void clkdev_drop(struct clk_lookup *cl);3232+3333+void clkdev_add_table(struct clk_lookup *, size_t);3434+int clk_add_alias(const char *, const char *, char *, struct device *);3535+3636+#endif