···11+/*22+ * Meta cache partition manipulation.33+ *44+ * Copyright 2010 Imagination Technologies Ltd.55+ */66+77+#ifndef _METAG_CACHEPART_H_88+#define _METAG_CACHEPART_H_99+1010+/**1111+ * get_dcache_size() - Get size of data cache.1212+ */1313+unsigned int get_dcache_size(void);1414+1515+/**1616+ * get_icache_size() - Get size of code cache.1717+ */1818+unsigned int get_icache_size(void);1919+2020+/**2121+ * get_global_dcache_size() - Get the thread's global dcache.2222+ *2323+ * Returns the size of the current thread's global dcache partition.2424+ */2525+unsigned int get_global_dcache_size(void);2626+2727+/**2828+ * get_global_icache_size() - Get the thread's global icache.2929+ *3030+ * Returns the size of the current thread's global icache partition.3131+ */3232+unsigned int get_global_icache_size(void);3333+3434+/**3535+ * check_for_dache_aliasing() - Ensure that the bootloader has configured the3636+ * dache and icache properly to avoid aliasing3737+ * @thread_id: Hardware thread ID3838+ *3939+ */4040+void check_for_cache_aliasing(int thread_id);4141+4242+#endif
+35
arch/metag/include/asm/core_reg.h
···11+#ifndef __ASM_METAG_CORE_REG_H_22+#define __ASM_METAG_CORE_REG_H_33+44+#include <asm/metag_regs.h>55+66+extern void core_reg_write(int unit, int reg, int thread, unsigned int val);77+extern unsigned int core_reg_read(int unit, int reg, int thread);88+99+/*1010+ * These macros allow direct access from C to any register known to the1111+ * assembler. Example candidates are TXTACTCYC, TXIDLECYC, and TXPRIVEXT.1212+ */1313+1414+#define __core_reg_get(reg) ({ \1515+ unsigned int __grvalue; \1616+ asm volatile("MOV %0," #reg \1717+ : "=r" (__grvalue)); \1818+ __grvalue; \1919+})2020+2121+#define __core_reg_set(reg, value) do { \2222+ unsigned int __srvalue = (value); \2323+ asm volatile("MOV " #reg ",%0" \2424+ : \2525+ : "r" (__srvalue)); \2626+} while (0)2727+2828+#define __core_reg_swap(reg, value) do { \2929+ unsigned int __srvalue = (value); \3030+ asm volatile("SWAP " #reg ",%0" \3131+ : "+r" (__srvalue)); \3232+ (value) = __srvalue; \3333+} while (0)3434+3535+#endif
···11+/*22+ * Meta cache partition manipulation.33+ *44+ * Copyright 2010 Imagination Technologies Ltd.55+ */66+77+#include <linux/kernel.h>88+#include <linux/io.h>99+#include <linux/errno.h>1010+#include <asm/processor.h>1111+#include <asm/cachepart.h>1212+#include <asm/metag_isa.h>1313+#include <asm/metag_mem.h>1414+1515+#define SYSC_DCPART(n) (SYSC_DCPART0 + SYSC_xCPARTn_STRIDE * (n))1616+#define SYSC_ICPART(n) (SYSC_ICPART0 + SYSC_xCPARTn_STRIDE * (n))1717+1818+#define CACHE_ASSOCIATIVITY 4 /* 4 way set-assosiative */1919+#define ICACHE 02020+#define DCACHE 12121+2222+/* The CORE_CONFIG2 register is not available on Meta 1 */2323+#ifdef CONFIG_METAG_META212424+unsigned int get_dcache_size(void)2525+{2626+ unsigned int config2 = metag_in32(METAC_CORE_CONFIG2);2727+ return 0x1000 << ((config2 & METAC_CORECFG2_DCSZ_BITS)2828+ >> METAC_CORECFG2_DCSZ_S);2929+}3030+3131+unsigned int get_icache_size(void)3232+{3333+ unsigned int config2 = metag_in32(METAC_CORE_CONFIG2);3434+ return 0x1000 << ((config2 & METAC_CORE_C2ICSZ_BITS)3535+ >> METAC_CORE_C2ICSZ_S);3636+}3737+3838+unsigned int get_global_dcache_size(void)3939+{4040+ unsigned int cpart = metag_in32(SYSC_DCPART(hard_processor_id()));4141+ unsigned int temp = cpart & SYSC_xCPARTG_AND_BITS;4242+ return (get_dcache_size() * ((temp >> SYSC_xCPARTG_AND_S) + 1)) >> 4;4343+}4444+4545+unsigned int get_global_icache_size(void)4646+{4747+ unsigned int cpart = metag_in32(SYSC_ICPART(hard_processor_id()));4848+ unsigned int temp = cpart & SYSC_xCPARTG_AND_BITS;4949+ return (get_icache_size() * ((temp >> SYSC_xCPARTG_AND_S) + 1)) >> 4;5050+}5151+5252+static unsigned int get_thread_cache_size(unsigned int cache, int thread_id)5353+{5454+ unsigned int cache_size;5555+ unsigned int t_cache_part;5656+ unsigned int isEnabled;5757+ unsigned int offset = 0;5858+ isEnabled = (cache == DCACHE ? metag_in32(MMCU_DCACHE_CTRL_ADDR) & 0x1 :5959+ metag_in32(MMCU_ICACHE_CTRL_ADDR) & 0x1);6060+ if (!isEnabled)6161+ return 0;6262+#if PAGE_OFFSET >= LINGLOBAL_BASE6363+ /* Checking for global cache */6464+ cache_size = (cache == DCACHE ? get_global_dache_size() :6565+ get_global_icache_size());6666+ offset = 8;6767+#else6868+ cache_size = (cache == DCACHE ? get_dcache_size() :6969+ get_icache_size());7070+#endif7171+ t_cache_part = (cache == DCACHE ?7272+ (metag_in32(SYSC_DCPART(thread_id)) >> offset) & 0xF :7373+ (metag_in32(SYSC_ICPART(thread_id)) >> offset) & 0xF);7474+ switch (t_cache_part) {7575+ case 0xF:7676+ return cache_size;7777+ case 0x7:7878+ return cache_size / 2;7979+ case 0x3:8080+ return cache_size / 4;8181+ case 0x1:8282+ return cache_size / 8;8383+ case 0:8484+ return cache_size / 16;8585+ }8686+ return -1;8787+}8888+8989+void check_for_cache_aliasing(int thread_id)9090+{9191+ unsigned int thread_cache_size;9292+ unsigned int cache_type;9393+ for (cache_type = ICACHE; cache_type <= DCACHE; cache_type++) {9494+ thread_cache_size =9595+ get_thread_cache_size(cache_type, thread_id);9696+ if (thread_cache_size < 0)9797+ pr_emerg("Can't read %s cache size", \9898+ cache_type ? "DCACHE" : "ICACHE");9999+ else if (thread_cache_size == 0)100100+ /* Cache is off. No need to check for aliasing */101101+ continue;102102+ if (thread_cache_size / CACHE_ASSOCIATIVITY > PAGE_SIZE) {103103+ pr_emerg("Cache aliasing detected in %s on Thread %d",104104+ cache_type ? "DCACHE" : "ICACHE", thread_id);105105+ pr_warn("Total %s size: %u bytes",106106+ cache_type ? "DCACHE" : "ICACHE ",107107+ cache_type ? get_dcache_size()108108+ : get_icache_size());109109+ pr_warn("Thread %s size: %d bytes",110110+ cache_type ? "CACHE" : "ICACHE",111111+ thread_cache_size);112112+ pr_warn("Page Size: %lu bytes", PAGE_SIZE);113113+ }114114+ }115115+}116116+117117+#else118118+119119+void check_for_cache_aliasing(int thread_id)120120+{121121+ return;122122+}123123+124124+#endif
+117
arch/metag/kernel/core_reg.c
···11+/*22+ * Support for reading and writing Meta core internal registers.33+ *44+ * Copyright (C) 2011 Imagination Technologies Ltd.55+ *66+ */77+88+#include <linux/delay.h>99+#include <linux/export.h>1010+1111+#include <asm/core_reg.h>1212+#include <asm/global_lock.h>1313+#include <asm/hwthread.h>1414+#include <asm/io.h>1515+#include <asm/metag_mem.h>1616+#include <asm/metag_regs.h>1717+1818+#define UNIT_BIT_MASK TXUXXRXRQ_UXX_BITS1919+#define REG_BIT_MASK TXUXXRXRQ_RX_BITS2020+#define THREAD_BIT_MASK TXUXXRXRQ_TX_BITS2121+2222+#define UNIT_SHIFTS TXUXXRXRQ_UXX_S2323+#define REG_SHIFTS TXUXXRXRQ_RX_S2424+#define THREAD_SHIFTS TXUXXRXRQ_TX_S2525+2626+#define UNIT_VAL(x) (((x) << UNIT_SHIFTS) & UNIT_BIT_MASK)2727+#define REG_VAL(x) (((x) << REG_SHIFTS) & REG_BIT_MASK)2828+#define THREAD_VAL(x) (((x) << THREAD_SHIFTS) & THREAD_BIT_MASK)2929+3030+/*3131+ * core_reg_write() - modify the content of a register in a core unit.3232+ * @unit: The unit to be modified.3333+ * @reg: Register number within the unit.3434+ * @thread: The thread we want to access.3535+ * @val: The new value to write.3636+ *3737+ * Check asm/metag_regs.h for a list/defines of supported units (ie: TXUPC_ID,3838+ * TXUTR_ID, etc), and regnums within the units (ie: TXMASKI_REGNUM,3939+ * TXPOLLI_REGNUM, etc).4040+ */4141+void core_reg_write(int unit, int reg, int thread, unsigned int val)4242+{4343+ unsigned long flags;4444+4545+ /* TXUCT_ID has its own memory mapped registers */4646+ if (unit == TXUCT_ID) {4747+ void __iomem *cu_reg = __CU_addr(thread, reg);4848+ metag_out32(val, cu_reg);4949+ return;5050+ }5151+5252+ __global_lock2(flags);5353+5454+ /* wait for ready */5555+ while (!(metag_in32(TXUXXRXRQ) & TXUXXRXRQ_DREADY_BIT))5656+ udelay(10);5757+5858+ /* set the value to write */5959+ metag_out32(val, TXUXXRXDT);6060+6161+ /* set the register to write */6262+ val = UNIT_VAL(unit) | REG_VAL(reg) | THREAD_VAL(thread);6363+ metag_out32(val, TXUXXRXRQ);6464+6565+ /* wait for finish */6666+ while (!(metag_in32(TXUXXRXRQ) & TXUXXRXRQ_DREADY_BIT))6767+ udelay(10);6868+6969+ __global_unlock2(flags);7070+}7171+EXPORT_SYMBOL(core_reg_write);7272+7373+/*7474+ * core_reg_read() - read the content of a register in a core unit.7575+ * @unit: The unit to be modified.7676+ * @reg: Register number within the unit.7777+ * @thread: The thread we want to access.7878+ *7979+ * Check asm/metag_regs.h for a list/defines of supported units (ie: TXUPC_ID,8080+ * TXUTR_ID, etc), and regnums within the units (ie: TXMASKI_REGNUM,8181+ * TXPOLLI_REGNUM, etc).8282+ */8383+unsigned int core_reg_read(int unit, int reg, int thread)8484+{8585+ unsigned long flags;8686+ unsigned int val;8787+8888+ /* TXUCT_ID has its own memory mapped registers */8989+ if (unit == TXUCT_ID) {9090+ void __iomem *cu_reg = __CU_addr(thread, reg);9191+ val = metag_in32(cu_reg);9292+ return val;9393+ }9494+9595+ __global_lock2(flags);9696+9797+ /* wait for ready */9898+ while (!(metag_in32(TXUXXRXRQ) & TXUXXRXRQ_DREADY_BIT))9999+ udelay(10);100100+101101+ /* set the register to read */102102+ val = (UNIT_VAL(unit) | REG_VAL(reg) | THREAD_VAL(thread) |103103+ TXUXXRXRQ_RDnWR_BIT);104104+ metag_out32(val, TXUXXRXRQ);105105+106106+ /* wait for finish */107107+ while (!(metag_in32(TXUXXRXRQ) & TXUXXRXRQ_DREADY_BIT))108108+ udelay(10);109109+110110+ /* read the register value */111111+ val = metag_in32(TXUXXRXDT);112112+113113+ __global_unlock2(flags);114114+115115+ return val;116116+}117117+EXPORT_SYMBOL(core_reg_read);
···11+/*22+ * Copyright (C) 2009,2010,2011 Imagination Technologies Ltd.33+ *44+ * Copyright (C) 2002 ARM Limited, All Rights Reserved.55+ *66+ * This program is free software; you can redistribute it and/or modify77+ * it under the terms of the GNU General Public License version 2 as88+ * published by the Free Software Foundation.99+ */1010+#include <linux/atomic.h>1111+#include <linux/delay.h>1212+#include <linux/init.h>1313+#include <linux/spinlock.h>1414+#include <linux/sched.h>1515+#include <linux/interrupt.h>1616+#include <linux/cache.h>1717+#include <linux/profile.h>1818+#include <linux/errno.h>1919+#include <linux/mm.h>2020+#include <linux/err.h>2121+#include <linux/cpu.h>2222+#include <linux/smp.h>2323+#include <linux/seq_file.h>2424+#include <linux/irq.h>2525+#include <linux/bootmem.h>2626+2727+#include <asm/cacheflush.h>2828+#include <asm/cachepart.h>2929+#include <asm/core_reg.h>3030+#include <asm/cpu.h>3131+#include <asm/mmu_context.h>3232+#include <asm/pgtable.h>3333+#include <asm/pgalloc.h>3434+#include <asm/processor.h>3535+#include <asm/setup.h>3636+#include <asm/tlbflush.h>3737+#include <asm/hwthread.h>3838+#include <asm/traps.h>3939+4040+DECLARE_PER_CPU(PTBI, pTBI);4141+4242+void *secondary_data_stack;4343+4444+/*4545+ * structures for inter-processor calls4646+ * - A collection of single bit ipi messages.4747+ */4848+struct ipi_data {4949+ spinlock_t lock;5050+ unsigned long ipi_count;5151+ unsigned long bits;5252+};5353+5454+static DEFINE_PER_CPU(struct ipi_data, ipi_data) = {5555+ .lock = __SPIN_LOCK_UNLOCKED(ipi_data.lock),5656+};5757+5858+static DEFINE_SPINLOCK(boot_lock);5959+6060+/*6161+ * "thread" is assumed to be a valid Meta hardware thread ID.6262+ */6363+int __cpuinit boot_secondary(unsigned int thread, struct task_struct *idle)6464+{6565+ u32 val;6666+6767+ /*6868+ * set synchronisation state between this boot processor6969+ * and the secondary one7070+ */7171+ spin_lock(&boot_lock);7272+7373+ core_reg_write(TXUPC_ID, 0, thread, (unsigned int)secondary_startup);7474+ core_reg_write(TXUPC_ID, 1, thread, 0);7575+7676+ /*7777+ * Give the thread privilege (PSTAT) and clear potentially problematic7878+ * bits in the process (namely ISTAT, CBMarker, CBMarkerI, LSM_STEP).7979+ */8080+ core_reg_write(TXUCT_ID, TXSTATUS_REGNUM, thread, TXSTATUS_PSTAT_BIT);8181+8282+ /* Clear the minim enable bit. */8383+ val = core_reg_read(TXUCT_ID, TXPRIVEXT_REGNUM, thread);8484+ core_reg_write(TXUCT_ID, TXPRIVEXT_REGNUM, thread, val & ~0x80);8585+8686+ /*8787+ * set the ThreadEnable bit (0x1) in the TXENABLE register8888+ * for the specified thread - off it goes!8989+ */9090+ val = core_reg_read(TXUCT_ID, TXENABLE_REGNUM, thread);9191+ core_reg_write(TXUCT_ID, TXENABLE_REGNUM, thread, val | 0x1);9292+9393+ /*9494+ * now the secondary core is starting up let it run its9595+ * calibrations, then wait for it to finish9696+ */9797+ spin_unlock(&boot_lock);9898+9999+ return 0;100100+}101101+102102+int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)103103+{104104+ unsigned int thread = cpu_2_hwthread_id[cpu];105105+ int ret;106106+107107+ load_pgd(swapper_pg_dir, thread);108108+109109+ flush_tlb_all();110110+111111+ /*112112+ * Tell the secondary CPU where to find its idle thread's stack.113113+ */114114+ secondary_data_stack = task_stack_page(idle);115115+116116+ wmb();117117+118118+ /*119119+ * Now bring the CPU into our world.120120+ */121121+ ret = boot_secondary(thread, idle);122122+ if (ret == 0) {123123+ unsigned long timeout;124124+125125+ /*126126+ * CPU was successfully started, wait for it127127+ * to come online or time out.128128+ */129129+ timeout = jiffies + HZ;130130+ while (time_before(jiffies, timeout)) {131131+ if (cpu_online(cpu))132132+ break;133133+134134+ udelay(10);135135+ barrier();136136+ }137137+138138+ if (!cpu_online(cpu))139139+ ret = -EIO;140140+ }141141+142142+ secondary_data_stack = NULL;143143+144144+ if (ret) {145145+ pr_crit("CPU%u: processor failed to boot\n", cpu);146146+147147+ /*148148+ * FIXME: We need to clean up the new idle thread. --rmk149149+ */150150+ }151151+152152+ return ret;153153+}154154+155155+#ifdef CONFIG_HOTPLUG_CPU156156+static DECLARE_COMPLETION(cpu_killed);157157+158158+/*159159+ * __cpu_disable runs on the processor to be shutdown.160160+ */161161+int __cpuexit __cpu_disable(void)162162+{163163+ unsigned int cpu = smp_processor_id();164164+ struct task_struct *p;165165+166166+ /*167167+ * Take this CPU offline. Once we clear this, we can't return,168168+ * and we must not schedule until we're ready to give up the cpu.169169+ */170170+ set_cpu_online(cpu, false);171171+172172+ /*173173+ * OK - migrate IRQs away from this CPU174174+ */175175+ migrate_irqs();176176+177177+ /*178178+ * Flush user cache and TLB mappings, and then remove this CPU179179+ * from the vm mask set of all processes.180180+ */181181+ flush_cache_all();182182+ local_flush_tlb_all();183183+184184+ read_lock(&tasklist_lock);185185+ for_each_process(p) {186186+ if (p->mm)187187+ cpumask_clear_cpu(cpu, mm_cpumask(p->mm));188188+ }189189+ read_unlock(&tasklist_lock);190190+191191+ return 0;192192+}193193+194194+/*195195+ * called on the thread which is asking for a CPU to be shutdown -196196+ * waits until shutdown has completed, or it is timed out.197197+ */198198+void __cpuexit __cpu_die(unsigned int cpu)199199+{200200+ if (!wait_for_completion_timeout(&cpu_killed, msecs_to_jiffies(1)))201201+ pr_err("CPU%u: unable to kill\n", cpu);202202+}203203+204204+/*205205+ * Called from the idle thread for the CPU which has been shutdown.206206+ *207207+ * Note that we do not return from this function. If this cpu is208208+ * brought online again it will need to run secondary_startup().209209+ */210210+void __cpuexit cpu_die(void)211211+{212212+ local_irq_disable();213213+ idle_task_exit();214214+215215+ complete(&cpu_killed);216216+217217+ asm ("XOR TXENABLE, D0Re0,D0Re0\n");218218+}219219+#endif /* CONFIG_HOTPLUG_CPU */220220+221221+/*222222+ * Called by both boot and secondaries to move global data into223223+ * per-processor storage.224224+ */225225+void __cpuinit smp_store_cpu_info(unsigned int cpuid)226226+{227227+ struct cpuinfo_metag *cpu_info = &per_cpu(cpu_data, cpuid);228228+229229+ cpu_info->loops_per_jiffy = loops_per_jiffy;230230+}231231+232232+/*233233+ * This is the secondary CPU boot entry. We're using this CPUs234234+ * idle thread stack and the global page tables.235235+ */236236+asmlinkage void secondary_start_kernel(void)237237+{238238+ struct mm_struct *mm = &init_mm;239239+ unsigned int cpu = smp_processor_id();240240+241241+ /*242242+ * All kernel threads share the same mm context; grab a243243+ * reference and switch to it.244244+ */245245+ atomic_inc(&mm->mm_users);246246+ atomic_inc(&mm->mm_count);247247+ current->active_mm = mm;248248+ cpumask_set_cpu(cpu, mm_cpumask(mm));249249+ enter_lazy_tlb(mm, current);250250+ local_flush_tlb_all();251251+252252+ /*253253+ * TODO: Some day it might be useful for each Linux CPU to254254+ * have its own TBI structure. That would allow each Linux CPU255255+ * to run different interrupt handlers for the same IRQ256256+ * number.257257+ *258258+ * For now, simply copying the pointer to the boot CPU's TBI259259+ * structure is sufficient because we always want to run the260260+ * same interrupt handler whatever CPU takes the interrupt.261261+ */262262+ per_cpu(pTBI, cpu) = __TBI(TBID_ISTAT_BIT);263263+264264+ if (!per_cpu(pTBI, cpu))265265+ panic("No TBI found!");266266+267267+ per_cpu_trap_init(cpu);268268+269269+ preempt_disable();270270+271271+ setup_txprivext();272272+273273+ /*274274+ * Enable local interrupts.275275+ */276276+ tbi_startup_interrupt(TBID_SIGNUM_TRT);277277+ notify_cpu_starting(cpu);278278+ local_irq_enable();279279+280280+ pr_info("CPU%u (thread %u): Booted secondary processor\n",281281+ cpu, cpu_2_hwthread_id[cpu]);282282+283283+ calibrate_delay();284284+ smp_store_cpu_info(cpu);285285+286286+ /*287287+ * OK, now it's safe to let the boot CPU continue288288+ */289289+ set_cpu_online(cpu, true);290290+291291+ /*292292+ * Check for cache aliasing.293293+ * Preemption is disabled294294+ */295295+ check_for_cache_aliasing(cpu);296296+297297+ /*298298+ * OK, it's off to the idle thread for us299299+ */300300+ cpu_idle();301301+}302302+303303+void __init smp_cpus_done(unsigned int max_cpus)304304+{305305+ int cpu;306306+ unsigned long bogosum = 0;307307+308308+ for_each_online_cpu(cpu)309309+ bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;310310+311311+ pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n",312312+ num_online_cpus(),313313+ bogosum / (500000/HZ),314314+ (bogosum / (5000/HZ)) % 100);315315+}316316+317317+void __init smp_prepare_cpus(unsigned int max_cpus)318318+{319319+ unsigned int cpu = smp_processor_id();320320+321321+ init_new_context(current, &init_mm);322322+ current_thread_info()->cpu = cpu;323323+324324+ smp_store_cpu_info(cpu);325325+ init_cpu_present(cpu_possible_mask);326326+}327327+328328+void __init smp_prepare_boot_cpu(void)329329+{330330+ unsigned int cpu = smp_processor_id();331331+332332+ per_cpu(pTBI, cpu) = __TBI(TBID_ISTAT_BIT);333333+334334+ if (!per_cpu(pTBI, cpu))335335+ panic("No TBI found!");336336+}337337+338338+static void smp_cross_call(cpumask_t callmap, enum ipi_msg_type msg);339339+340340+static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg)341341+{342342+ unsigned long flags;343343+ unsigned int cpu;344344+ cpumask_t map;345345+346346+ cpumask_clear(&map);347347+ local_irq_save(flags);348348+349349+ for_each_cpu(cpu, mask) {350350+ struct ipi_data *ipi = &per_cpu(ipi_data, cpu);351351+352352+ spin_lock(&ipi->lock);353353+354354+ /*355355+ * KICK interrupts are queued in hardware so we'll get356356+ * multiple interrupts if we call smp_cross_call()357357+ * multiple times for one msg. The problem is that we358358+ * only have one bit for each message - we can't queue359359+ * them in software.360360+ *361361+ * The first time through ipi_handler() we'll clear362362+ * the msg bit, having done all the work. But when we363363+ * return we'll get _another_ interrupt (and another,364364+ * and another until we've handled all the queued365365+ * KICKs). Running ipi_handler() when there's no work366366+ * to do is bad because that's how kick handler367367+ * chaining detects who the KICK was intended for.368368+ * See arch/metag/kernel/kick.c for more details.369369+ *370370+ * So only add 'cpu' to 'map' if we haven't already371371+ * queued a KICK interrupt for 'msg'.372372+ */373373+ if (!(ipi->bits & (1 << msg))) {374374+ ipi->bits |= 1 << msg;375375+ cpumask_set_cpu(cpu, &map);376376+ }377377+378378+ spin_unlock(&ipi->lock);379379+ }380380+381381+ /*382382+ * Call the platform specific cross-CPU call function.383383+ */384384+ smp_cross_call(map, msg);385385+386386+ local_irq_restore(flags);387387+}388388+389389+void arch_send_call_function_ipi_mask(const struct cpumask *mask)390390+{391391+ send_ipi_message(mask, IPI_CALL_FUNC);392392+}393393+394394+void arch_send_call_function_single_ipi(int cpu)395395+{396396+ send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);397397+}398398+399399+void show_ipi_list(struct seq_file *p)400400+{401401+ unsigned int cpu;402402+403403+ seq_puts(p, "IPI:");404404+405405+ for_each_present_cpu(cpu)406406+ seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count);407407+408408+ seq_putc(p, '\n');409409+}410410+411411+static DEFINE_SPINLOCK(stop_lock);412412+413413+/*414414+ * Main handler for inter-processor interrupts415415+ *416416+ * For Meta, the ipimask now only identifies a single417417+ * category of IPI (Bit 1 IPIs have been replaced by a418418+ * different mechanism):419419+ *420420+ * Bit 0 - Inter-processor function call421421+ */422422+static int do_IPI(struct pt_regs *regs)423423+{424424+ unsigned int cpu = smp_processor_id();425425+ struct ipi_data *ipi = &per_cpu(ipi_data, cpu);426426+ struct pt_regs *old_regs = set_irq_regs(regs);427427+ unsigned long msgs, nextmsg;428428+ int handled = 0;429429+430430+ ipi->ipi_count++;431431+432432+ spin_lock(&ipi->lock);433433+ msgs = ipi->bits;434434+ nextmsg = msgs & -msgs;435435+ ipi->bits &= ~nextmsg;436436+ spin_unlock(&ipi->lock);437437+438438+ if (nextmsg) {439439+ handled = 1;440440+441441+ nextmsg = ffz(~nextmsg);442442+ switch (nextmsg) {443443+ case IPI_RESCHEDULE:444444+ scheduler_ipi();445445+ break;446446+447447+ case IPI_CALL_FUNC:448448+ generic_smp_call_function_interrupt();449449+ break;450450+451451+ case IPI_CALL_FUNC_SINGLE:452452+ generic_smp_call_function_single_interrupt();453453+ break;454454+455455+ default:456456+ pr_crit("CPU%u: Unknown IPI message 0x%lx\n",457457+ cpu, nextmsg);458458+ break;459459+ }460460+ }461461+462462+ set_irq_regs(old_regs);463463+464464+ return handled;465465+}466466+467467+void smp_send_reschedule(int cpu)468468+{469469+ send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);470470+}471471+472472+static void stop_this_cpu(void *data)473473+{474474+ unsigned int cpu = smp_processor_id();475475+476476+ if (system_state == SYSTEM_BOOTING ||477477+ system_state == SYSTEM_RUNNING) {478478+ spin_lock(&stop_lock);479479+ pr_crit("CPU%u: stopping\n", cpu);480480+ dump_stack();481481+ spin_unlock(&stop_lock);482482+ }483483+484484+ set_cpu_online(cpu, false);485485+486486+ local_irq_disable();487487+488488+ hard_processor_halt(HALT_OK);489489+}490490+491491+void smp_send_stop(void)492492+{493493+ smp_call_function(stop_this_cpu, NULL, 0);494494+}495495+496496+/*497497+ * not supported here498498+ */499499+int setup_profiling_timer(unsigned int multiplier)500500+{501501+ return -EINVAL;502502+}503503+504504+/*505505+ * We use KICKs for inter-processor interrupts.506506+ *507507+ * For every CPU in "callmap" the IPI data must already have been508508+ * stored in that CPU's "ipi_data" member prior to calling this509509+ * function.510510+ */511511+static void kick_raise_softirq(cpumask_t callmap, unsigned int irq)512512+{513513+ int cpu;514514+515515+ for_each_cpu(cpu, &callmap) {516516+ unsigned int thread;517517+518518+ thread = cpu_2_hwthread_id[cpu];519519+520520+ BUG_ON(thread == BAD_HWTHREAD_ID);521521+522522+ metag_out32(1, T0KICKI + (thread * TnXKICK_STRIDE));523523+ }524524+}525525+526526+static TBIRES ipi_handler(TBIRES State, int SigNum, int Triggers,527527+ int Inst, PTBI pTBI, int *handled)528528+{529529+ *handled = do_IPI((struct pt_regs *)State.Sig.pCtx);530530+531531+ return State;532532+}533533+534534+static struct kick_irq_handler ipi_irq = {535535+ .func = ipi_handler,536536+};537537+538538+static void smp_cross_call(cpumask_t callmap, enum ipi_msg_type msg)539539+{540540+ kick_raise_softirq(callmap, 1);541541+}542542+543543+static inline unsigned int get_core_count(void)544544+{545545+ int i;546546+ unsigned int ret = 0;547547+548548+ for (i = 0; i < CONFIG_NR_CPUS; i++) {549549+ if (core_reg_read(TXUCT_ID, TXENABLE_REGNUM, i))550550+ ret++;551551+ }552552+553553+ return ret;554554+}555555+556556+/*557557+ * Initialise the CPU possible map early - this describes the CPUs558558+ * which may be present or become present in the system.559559+ */560560+void __init smp_init_cpus(void)561561+{562562+ unsigned int i, ncores = get_core_count();563563+564564+ /* If no hwthread_map early param was set use default mapping */565565+ for (i = 0; i < NR_CPUS; i++)566566+ if (cpu_2_hwthread_id[i] == BAD_HWTHREAD_ID) {567567+ cpu_2_hwthread_id[i] = i;568568+ hwthread_id_2_cpu[i] = i;569569+ }570570+571571+ for (i = 0; i < ncores; i++)572572+ set_cpu_possible(i, true);573573+574574+ kick_register_func(&ipi_irq);575575+}
+77
arch/metag/kernel/topology.c
···11+/*22+ * Copyright (C) 2007 Paul Mundt33+ * Copyright (C) 2010 Imagination Technolohies Ltd.44+ *55+ * This file is subject to the terms and conditions of the GNU General Public66+ * License. See the file "COPYING" in the main directory of this archive77+ * for more details.88+ */99+#include <linux/cpu.h>1010+#include <linux/cpumask.h>1111+#include <linux/init.h>1212+#include <linux/percpu.h>1313+#include <linux/node.h>1414+#include <linux/nodemask.h>1515+#include <linux/topology.h>1616+1717+#include <asm/cpu.h>1818+1919+DEFINE_PER_CPU(struct cpuinfo_metag, cpu_data);2020+2121+cpumask_t cpu_core_map[NR_CPUS];2222+2323+static cpumask_t cpu_coregroup_map(unsigned int cpu)2424+{2525+ return *cpu_possible_mask;2626+}2727+2828+const struct cpumask *cpu_coregroup_mask(unsigned int cpu)2929+{3030+ return &cpu_core_map[cpu];3131+}3232+3333+int arch_update_cpu_topology(void)3434+{3535+ unsigned int cpu;3636+3737+ for_each_possible_cpu(cpu)3838+ cpu_core_map[cpu] = cpu_coregroup_map(cpu);3939+4040+ return 0;4141+}4242+4343+static int __init topology_init(void)4444+{4545+ int i, ret;4646+4747+#ifdef CONFIG_NEED_MULTIPLE_NODES4848+ for_each_online_node(i)4949+ register_one_node(i);5050+#endif5151+5252+ for_each_present_cpu(i) {5353+ struct cpuinfo_metag *cpuinfo = &per_cpu(cpu_data, i);5454+#ifdef CONFIG_HOTPLUG_CPU5555+ cpuinfo->cpu.hotpluggable = 1;5656+#endif5757+ ret = register_cpu(&cpuinfo->cpu, i);5858+ if (unlikely(ret))5959+ pr_warn("%s: register_cpu %d failed (%d)\n",6060+ __func__, i, ret);6161+ }6262+6363+#if defined(CONFIG_NUMA) && !defined(CONFIG_SMP)6464+ /*6565+ * In the UP case, make sure the CPU association is still6666+ * registered under each node. Without this, sysfs fails6767+ * to make the connection between nodes other than node06868+ * and cpu0.6969+ */7070+ for_each_online_node(i)7171+ if (i != numa_node_id())7272+ register_cpu_under_node(raw_smp_processor_id(), i);7373+#endif7474+7575+ return 0;7676+}7777+subsys_initcall(topology_init);