Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARC: SMP support

ARC common code to enable a SMP system + ISS provided SMP extensions.

ARC700 natively lacks SMP support, hence some of the core features are
are only enabled if SoCs have the necessary h/w pixie-dust. This
includes:
-Inter Processor Interrupts (IPI)
-Cache coherency
-load-locked/store-conditional
...

The low level exception handling would be completely broken in SMP
because we don't have hardware assisted stack switching. Thus a fair bit
of this code is repurposing the MMU_SCRATCH reg for event handler
prologues to keep them re-entrant.

Many thanks to Rajeshwar Ranga for his initial "major" contributions to
SMP Port (back in 2008), and to Noam Camus and Gilad Ben-Yossef for help
with resurrecting that in 3.2 kernel (2012).

Note that this platform code is again singleton design pattern - so
multiple SMP platforms won't build at the moment - this deficiency is
addressed in subsequent patches within this series.

Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Rajeshwar Ranga <rajeshwar.ranga@gmail.com>
Cc: Noam Camus <noamc@ezchip.com>
Cc: Gilad Ben-Yossef <gilad@benyossef.com>

+960 -2
+38 -1
arch/arc/Kconfig
··· 116 116 help 117 117 Build kernel for Big Endian Mode of ARC CPU 118 118 119 + config SMP 120 + bool "Symmetric Multi-Processing (Incomplete)" 121 + default n 122 + select USE_GENERIC_SMP_HELPERS 123 + help 124 + This enables support for systems with more than one CPU. If you have 125 + a system with only one CPU, like most personal computers, say N. If 126 + you have a system with more than one CPU, say Y. 127 + 128 + if SMP 129 + 130 + config ARC_HAS_COH_CACHES 131 + def_bool n 132 + 133 + config ARC_HAS_COH_LLSC 134 + def_bool n 135 + 136 + config ARC_HAS_COH_RTSC 137 + def_bool n 138 + 139 + config ARC_HAS_REENTRANT_IRQ_LV2 140 + def_bool n 141 + 142 + endif 143 + 144 + config NR_CPUS 145 + int "Maximum number of CPUs (2-32)" 146 + range 2 32 147 + depends on SMP 148 + default "2" 149 + 119 150 menuconfig ARC_CACHE 120 151 bool "Enable Cache Support" 121 152 default y 153 + # if SMP, cache enabled ONLY if ARC implementation has cache coherency 154 + depends on !SMP || ARC_HAS_COH_CACHES 122 155 123 156 if ARC_CACHE 124 157 ··· 246 213 default n 247 214 # Timer HAS to be high priority, for any other high priority config 248 215 select ARC_IRQ3_LV2 216 + # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy 217 + depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2 249 218 250 219 if ARC_COMPACT_IRQ_LEVELS 251 220 ··· 296 261 bool "Insn: RTSC (64-bit r/o cycle counter)" 297 262 default y 298 263 depends on ARC_CPU_REL_4_10 264 + # if SMP, enable RTSC only if counter is coherent across cores 265 + depends on !SMP || ARC_HAS_COH_RTSC 299 266 300 267 endmenu # "ARC CPU Configuration" 301 268 ··· 346 309 347 310 config ARC_DBG_TLB_PARANOIA 348 311 bool "Paranoia Checks in Low Level TLB Handlers" 349 - depends on ARC_DBG 312 + depends on ARC_DBG && !SMP 350 313 default n 351 314 352 315 config ARC_DBG_TLB_MISS_COUNT
+3
arch/arc/Makefile
··· 133 133 # Thus forcing all exten calls in this file to be long calls 134 134 export CFLAGS_decompress_inflate.o = -mmedium-calls 135 135 export CFLAGS_initramfs.o = -mmedium-calls 136 + ifdef CONFIG_SMP 137 + export CFLAGS_core.o = -mmedium-calls 138 + endif
+49
arch/arc/include/asm/entry.h
··· 389 389 * to be saved again on kernel mode stack, as part of ptregs. 390 390 *-------------------------------------------------------------*/ 391 391 .macro EXCPN_PROLOG_FREEUP_REG reg 392 + #ifdef CONFIG_SMP 393 + sr \reg, [ARC_REG_SCRATCH_DATA0] 394 + #else 392 395 st \reg, [@ex_saved_reg1] 396 + #endif 393 397 .endm 394 398 395 399 .macro EXCPN_PROLOG_RESTORE_REG reg 400 + #ifdef CONFIG_SMP 401 + lr \reg, [ARC_REG_SCRATCH_DATA0] 402 + #else 396 403 ld \reg, [@ex_saved_reg1] 404 + #endif 397 405 .endm 398 406 399 407 /*-------------------------------------------------------------- ··· 516 508 /* restore original r9 , saved in int1_saved_reg 517 509 * It will be saved on stack in macro: SAVE_CALLER_SAVED 518 510 */ 511 + #ifdef CONFIG_SMP 512 + lr r9, [ARC_REG_SCRATCH_DATA0] 513 + #else 519 514 ld r9, [@int1_saved_reg] 515 + #endif 520 516 521 517 /* now we are ready to save the remaining context :) */ 522 518 st orig_r8_IS_IRQ1, [sp, 8] /* Event Type */ ··· 651 639 bmsk \reg, \reg, 7 652 640 .endm 653 641 642 + #ifdef CONFIG_SMP 643 + 644 + /*------------------------------------------------- 645 + * Retrieve the current running task on this CPU 646 + * 1. Determine curr CPU id. 647 + * 2. Use it to index into _current_task[ ] 648 + */ 649 + .macro GET_CURR_TASK_ON_CPU reg 650 + GET_CPU_ID \reg 651 + ld.as \reg, [@_current_task, \reg] 652 + .endm 653 + 654 + /*------------------------------------------------- 655 + * Save a new task as the "current" task on this CPU 656 + * 1. Determine curr CPU id. 657 + * 2. Use it to index into _current_task[ ] 658 + * 659 + * Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS) 660 + * because ST r0, [r1, offset] can ONLY have s9 @offset 661 + * while LD can take s9 (4 byte insn) or LIMM (8 byte insn) 662 + */ 663 + 664 + .macro SET_CURR_TASK_ON_CPU tsk, tmp 665 + GET_CPU_ID \tmp 666 + add2 \tmp, @_current_task, \tmp 667 + st \tsk, [\tmp] 668 + #ifdef CONFIG_ARC_CURR_IN_REG 669 + mov r25, \tsk 670 + #endif 671 + 672 + .endm 673 + 674 + 675 + #else /* Uniprocessor implementation of macros */ 676 + 654 677 .macro GET_CURR_TASK_ON_CPU reg 655 678 ld \reg, [@_current_task] 656 679 .endm ··· 696 649 mov r25, \tsk 697 650 #endif 698 651 .endm 652 + 653 + #endif /* SMP / UNI */ 699 654 700 655 /* ------------------------------------------------------------------ 701 656 * Get the ptr to some field of Current Task at @off in task struct
+4
arch/arc/include/asm/mmu_context.h
··· 147 147 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 148 148 struct task_struct *tsk) 149 149 { 150 + #ifndef CONFIG_SMP 150 151 /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */ 151 152 write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); 153 + #endif 152 154 153 155 /* 154 156 * Get a new ASID if task doesn't have a valid one. Possible when ··· 199 197 200 198 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) 201 199 { 200 + #ifndef CONFIG_SMP 202 201 write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); 202 + #endif 203 203 204 204 /* Unconditionally get a new ASID */ 205 205 get_new_mmu_context(next);
+9
arch/arc/include/asm/mutex.h
··· 6 6 * published by the Free Software Foundation. 7 7 */ 8 8 9 + /* 10 + * xchg() based mutex fast path maintains a state of 0 or 1, as opposed to 11 + * atomic dec based which can "count" any number of lock contenders. 12 + * This ideally needs to be fixed in core, but for now switching to dec ver. 13 + */ 14 + #if defined(CONFIG_SMP) && (CONFIG_NR_CPUS > 2) 15 + #include <asm-generic/mutex-dec.h> 16 + #else 9 17 #include <asm-generic/mutex-xchg.h> 18 + #endif
+4
arch/arc/include/asm/pgtable.h
··· 354 354 * Thus use this macro only when you are certain that "current" is current 355 355 * e.g. when dealing with signal frame setup code etc 356 356 */ 357 + #ifndef CONFIG_SMP 357 358 #define pgd_offset_fast(mm, addr) \ 358 359 ({ \ 359 360 pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \ 360 361 pgd_base + pgd_index(addr); \ 361 362 }) 363 + #else 364 + #define pgd_offset_fast(mm, addr) pgd_offset(mm, addr) 365 + #endif 362 366 363 367 extern void paging_init(void); 364 368 extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
+8
arch/arc/include/asm/processor.h
··· 58 58 /* Prepare to copy thread state - unlazy all lazy status */ 59 59 #define prepare_to_copy(tsk) do { } while (0) 60 60 61 + /* 62 + * A lot of busy-wait loops in SMP are based off of non-volatile data otherwise 63 + * get optimised away by gcc 64 + */ 65 + #ifdef CONFIG_SMP 66 + #define cpu_relax() __asm__ __volatile__ ("" : : : "memory") 67 + #else 61 68 #define cpu_relax() do { } while (0) 69 + #endif 62 70 63 71 #define copy_segments(tsk, mm) do { } while (0) 64 72 #define release_segments(mm) do { } while (0)
+107
arch/arc/include/asm/smp.h
··· 9 9 #ifndef __ASM_ARC_SMP_H 10 10 #define __ASM_ARC_SMP_H 11 11 12 + #ifdef CONFIG_SMP 13 + 14 + #include <linux/types.h> 15 + #include <linux/init.h> 16 + #include <linux/threads.h> 17 + 18 + #define raw_smp_processor_id() (current_thread_info()->cpu) 19 + 20 + /* including cpumask.h leads to cyclic deps hence this Forward declaration */ 21 + struct cpumask; 22 + 23 + /* 24 + * APIs provided by arch SMP code to generic code 25 + */ 26 + extern void arch_send_call_function_single_ipi(int cpu); 27 + extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 28 + 29 + /* 30 + * APIs provided by arch SMP code to rest of arch code 31 + */ 32 + extern void __init smp_init_cpus(void); 33 + extern void __init first_lines_of_secondary(void); 34 + 35 + /* 36 + * API expected BY platform smp code (FROM arch smp code) 37 + * 38 + * smp_ipi_irq_setup: 39 + * Takes @cpu and @irq to which the arch-common ISR is hooked up 40 + */ 41 + extern int smp_ipi_irq_setup(int cpu, int irq); 42 + 43 + /* 44 + * APIs expected FROM platform smp code 45 + * 46 + * arc_platform_smp_cpuinfo: 47 + * returns a string containing info for /proc/cpuinfo 48 + * 49 + * arc_platform_smp_init_cpu: 50 + * Called from start_kernel_secondary to do any CPU local setup 51 + * such as starting a timer, setting up IPI etc 52 + * 53 + * arc_platform_smp_wait_to_boot: 54 + * Called from early bootup code for non-Master CPUs to "park" them 55 + * 56 + * arc_platform_smp_wakeup_cpu: 57 + * Called from __cpu_up (Master CPU) to kick start another one 58 + * 59 + * arc_platform_ipi_send: 60 + * Takes @cpumask to which IPI(s) would be sent. 61 + * The actual msg-id/buffer is manager in arch-common code 62 + * 63 + * arc_platform_ipi_clear: 64 + * Takes @cpu which got IPI at @irq to do any IPI clearing 65 + */ 66 + extern const char *arc_platform_smp_cpuinfo(void); 67 + extern void arc_platform_smp_init_cpu(void); 68 + extern void arc_platform_smp_wait_to_boot(int cpu); 69 + extern void arc_platform_smp_wakeup_cpu(int cpu, unsigned long pc); 70 + extern void arc_platform_ipi_send(const struct cpumask *callmap); 71 + extern void arc_platform_ipi_clear(int cpu, int irq); 72 + 73 + #endif /* CONFIG_SMP */ 74 + 12 75 /* 13 76 * ARC700 doesn't support atomic Read-Modify-Write ops. 14 77 * Originally Interrupts had to be disabled around code to gaurantee atomicity. ··· 81 18 * 82 19 * (1) These insn were introduced only in 4.10 release. So for older released 83 20 * support needed. 21 + * 22 + * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be 23 + * gaurantted by the platform (not something which core handles). 24 + * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ 25 + * disabling for atomicity. 26 + * 27 + * However exported spinlock API is not usable due to cyclic hdr deps 28 + * (even after system.h disintegration upstream) 29 + * asm/bitops.h -> linux/spinlock.h -> linux/preempt.h 30 + * -> linux/thread_info.h -> linux/bitops.h -> asm/bitops.h 31 + * 32 + * So the workaround is to use the lowest level arch spinlock API. 33 + * The exported spinlock API is smart enough to be NOP for !CONFIG_SMP, 34 + * but same is not true for ARCH backend, hence the need for 2 variants 84 35 */ 85 36 #ifndef CONFIG_ARC_HAS_LLSC 86 37 87 38 #include <linux/irqflags.h> 39 + #ifdef CONFIG_SMP 40 + 41 + #include <asm/spinlock.h> 42 + 43 + extern arch_spinlock_t smp_atomic_ops_lock; 44 + extern arch_spinlock_t smp_bitops_lock; 45 + 46 + #define atomic_ops_lock(flags) do { \ 47 + local_irq_save(flags); \ 48 + arch_spin_lock(&smp_atomic_ops_lock); \ 49 + } while (0) 50 + 51 + #define atomic_ops_unlock(flags) do { \ 52 + arch_spin_unlock(&smp_atomic_ops_lock); \ 53 + local_irq_restore(flags); \ 54 + } while (0) 55 + 56 + #define bitops_lock(flags) do { \ 57 + local_irq_save(flags); \ 58 + arch_spin_lock(&smp_bitops_lock); \ 59 + } while (0) 60 + 61 + #define bitops_unlock(flags) do { \ 62 + arch_spin_unlock(&smp_bitops_lock); \ 63 + local_irq_restore(flags); \ 64 + } while (0) 65 + 66 + #else /* !CONFIG_SMP */ 88 67 89 68 #define atomic_ops_lock(flags) local_irq_save(flags) 90 69 #define atomic_ops_unlock(flags) local_irq_restore(flags) 91 70 92 71 #define bitops_lock(flags) local_irq_save(flags) 93 72 #define bitops_unlock(flags) local_irq_restore(flags) 73 + 74 + #endif /* !CONFIG_SMP */ 94 75 95 76 #endif /* !CONFIG_ARC_HAS_LLSC */ 96 77
+1
arch/arc/kernel/Makefile
··· 13 13 obj-y += devtree.o 14 14 15 15 obj-$(CONFIG_MODULES) += arcksyms.o module.o 16 + obj-$(CONFIG_SMP) += smp.o 16 17 17 18 obj-$(CONFIG_ARC_FPU_SAVE_RESTORE) += fpu.o 18 19 CFLAGS_fpu.o += -mdpfp
+11
arch/arc/kernel/ctx_sw.c
··· 58 58 * For SMP extra work to get to &_current_task[cpu] 59 59 * (open coded SET_CURR_TASK_ON_CPU) 60 60 */ 61 + #ifndef CONFIG_SMP 61 62 "st %2, [@_current_task] \n\t" 63 + #else 64 + "lr r24, [identity] \n\t" 65 + "lsr r24, r24, 8 \n\t" 66 + "bmsk r24, r24, 7 \n\t" 67 + "add2 r24, @_current_task, r24 \n\t" 68 + "st %2, [r24] \n\t" 69 + #endif 70 + #ifdef CONFIG_ARC_CURR_IN_REG 71 + "mov r25, %2 \n\t" 72 + #endif 62 73 63 74 /* get ksp of incoming task from tsk->thread.ksp */ 64 75 "ld.as sp, [%2, %1] \n\t"
+4
arch/arc/kernel/entry.S
··· 232 232 ARC_ENTRY handle_interrupt_level1 233 233 234 234 /* free up r9 as scratchpad */ 235 + #ifdef CONFIG_SMP 236 + sr r9, [ARC_REG_SCRATCH_DATA0] 237 + #else 235 238 st r9, [@int1_saved_reg] 239 + #endif 236 240 237 241 ;Which mode (user/kernel) was the system in when intr occured 238 242 lr r9, [status32_l1]
+33
arch/arc/kernel/head.S
··· 27 27 ; Don't clobber r0-r4 yet. It might have bootloader provided info 28 28 ;------------------------------------------------------------------- 29 29 30 + #ifdef CONFIG_SMP 31 + ; Only Boot (Master) proceeds. Others wait in platform dependent way 32 + ; IDENTITY Reg [ 3 2 1 0 ] 33 + ; (cpu-id) ^^^ => Zero for UP ARC700 34 + ; => #Core-ID if SMP (Master 0) 35 + GET_CPU_ID r5 36 + cmp r5, 0 37 + jnz arc_platform_smp_wait_to_boot 38 + #endif 30 39 ; Clear BSS before updating any globals 31 40 ; XXX: use ZOL here 32 41 mov r5, __bss_start ··· 85 76 GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output) 86 77 87 78 j start_kernel ; "C" entry point 79 + 80 + #ifdef CONFIG_SMP 81 + ;---------------------------------------------------------------- 82 + ; First lines of code run by secondary before jumping to 'C' 83 + ;---------------------------------------------------------------- 84 + .section .init.text, "ax",@progbits 85 + .type first_lines_of_secondary, @function 86 + .globl first_lines_of_secondary 87 + 88 + first_lines_of_secondary: 89 + 90 + ; setup per-cpu idle task as "current" on this CPU 91 + ld r0, [@secondary_idle_tsk] 92 + SET_CURR_TASK_ON_CPU r0, r1 93 + 94 + ; setup stack (fp, sp) 95 + mov fp, 0 96 + 97 + ; set it's stack base to tsk->thread_info bottom 98 + GET_TSK_STACK_BASE r0, sp 99 + 100 + j start_kernel_secondary 101 + 102 + #endif
+5
arch/arc/kernel/irq.c
··· 124 124 { 125 125 init_onchip_IRQ(); 126 126 plat_init_IRQ(); 127 + 128 + #ifdef CONFIG_SMP 129 + /* Master CPU can initialize it's side of IPI */ 130 + arc_platform_smp_init_cpu(); 131 + #endif 127 132 } 128 133 129 134 /*
+4
arch/arc/kernel/setup.c
··· 86 86 87 87 setup_processor(); 88 88 89 + #ifdef CONFIG_SMP 90 + smp_init_cpus(); 91 + #endif 92 + 89 93 setup_arch_memory(); 90 94 91 95 unflatten_device_tree();
+320
arch/arc/kernel/smp.c
··· 1 + /* 2 + * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * RajeshwarR: Dec 11, 2007 9 + * -- Added support for Inter Processor Interrupts 10 + * 11 + * Vineetg: Nov 1st, 2007 12 + * -- Initial Write (Borrowed heavily from ARM) 13 + */ 14 + 15 + #include <linux/module.h> 16 + #include <linux/init.h> 17 + #include <linux/spinlock.h> 18 + #include <linux/sched.h> 19 + #include <linux/interrupt.h> 20 + #include <linux/profile.h> 21 + #include <linux/errno.h> 22 + #include <linux/err.h> 23 + #include <linux/mm.h> 24 + #include <linux/cpu.h> 25 + #include <linux/smp.h> 26 + #include <linux/irq.h> 27 + #include <linux/delay.h> 28 + #include <linux/atomic.h> 29 + #include <linux/percpu.h> 30 + #include <linux/cpumask.h> 31 + #include <linux/spinlock_types.h> 32 + #include <linux/reboot.h> 33 + #include <asm/processor.h> 34 + #include <asm/setup.h> 35 + 36 + arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED; 37 + arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED; 38 + 39 + /* XXX: per cpu ? Only needed once in early seconday boot */ 40 + struct task_struct *secondary_idle_tsk; 41 + 42 + /* Called from start_kernel */ 43 + void __init smp_prepare_boot_cpu(void) 44 + { 45 + } 46 + 47 + /* 48 + * Initialise the CPU possible map early - this describes the CPUs 49 + * which may be present or become present in the system. 50 + */ 51 + void __init smp_init_cpus(void) 52 + { 53 + unsigned int i; 54 + 55 + for (i = 0; i < NR_CPUS; i++) 56 + set_cpu_possible(i, true); 57 + } 58 + 59 + /* called from init ( ) => process 1 */ 60 + void __init smp_prepare_cpus(unsigned int max_cpus) 61 + { 62 + int i; 63 + 64 + /* 65 + * Initialise the present map, which describes the set of CPUs 66 + * actually populated at the present time. 67 + */ 68 + for (i = 0; i < max_cpus; i++) 69 + set_cpu_present(i, true); 70 + } 71 + 72 + void __init smp_cpus_done(unsigned int max_cpus) 73 + { 74 + 75 + } 76 + 77 + /* 78 + * After power-up, a non Master CPU needs to wait for Master to kick start it 79 + * 80 + * The default implementation halts 81 + * 82 + * This relies on platform specific support allowing Master to directly set 83 + * this CPU's PC (to be @first_lines_of_secondary() and kick start it. 84 + * 85 + * In lack of such h/w assist, platforms can override this function 86 + * - make this function busy-spin on a token, eventually set by Master 87 + * (from arc_platform_smp_wakeup_cpu()) 88 + * - Once token is available, jump to @first_lines_of_secondary 89 + * (using inline asm). 90 + * 91 + * Alert: can NOT use stack here as it has not been determined/setup for CPU. 92 + * If it turns out to be elaborate, it's better to code it in assembly 93 + * 94 + */ 95 + void __attribute__((weak)) arc_platform_smp_wait_to_boot(int cpu) 96 + { 97 + /* 98 + * As a hack for debugging - since debugger will single-step over the 99 + * FLAG insn - wrap the halt itself it in a self loop 100 + */ 101 + __asm__ __volatile__( 102 + "1: \n" 103 + " flag 1 \n" 104 + " b 1b \n"); 105 + } 106 + 107 + /* 108 + * The very first "C" code executed by secondary 109 + * Called from asm stub in head.S 110 + * "current"/R25 already setup by low level boot code 111 + */ 112 + void __cpuinit start_kernel_secondary(void) 113 + { 114 + struct mm_struct *mm = &init_mm; 115 + unsigned int cpu = smp_processor_id(); 116 + 117 + /* MMU, Caches, Vector Table, Interrupts etc */ 118 + setup_processor(); 119 + 120 + atomic_inc(&mm->mm_users); 121 + atomic_inc(&mm->mm_count); 122 + current->active_mm = mm; 123 + 124 + notify_cpu_starting(cpu); 125 + set_cpu_online(cpu, true); 126 + 127 + pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); 128 + 129 + arc_platform_smp_init_cpu(); 130 + 131 + arc_local_timer_setup(cpu); 132 + 133 + local_irq_enable(); 134 + preempt_disable(); 135 + cpu_idle(); 136 + } 137 + 138 + /* 139 + * Called from kernel_init( ) -> smp_init( ) - for each CPU 140 + * 141 + * At this point, Secondary Processor is "HALT"ed: 142 + * -It booted, but was halted in head.S 143 + * -It was configured to halt-on-reset 144 + * So need to wake it up. 145 + * 146 + * Essential requirements being where to run from (PC) and stack (SP) 147 + */ 148 + int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) 149 + { 150 + unsigned long wait_till; 151 + 152 + secondary_idle_tsk = idle; 153 + 154 + pr_info("Idle Task [%d] %p", cpu, idle); 155 + pr_info("Trying to bring up CPU%u ...\n", cpu); 156 + 157 + arc_platform_smp_wakeup_cpu(cpu, 158 + (unsigned long)first_lines_of_secondary); 159 + 160 + /* wait for 1 sec after kicking the secondary */ 161 + wait_till = jiffies + HZ; 162 + while (time_before(jiffies, wait_till)) { 163 + if (cpu_online(cpu)) 164 + break; 165 + } 166 + 167 + if (!cpu_online(cpu)) { 168 + pr_info("Timeout: CPU%u FAILED to comeup !!!\n", cpu); 169 + return -1; 170 + } 171 + 172 + secondary_idle_tsk = NULL; 173 + 174 + return 0; 175 + } 176 + 177 + /* 178 + * not supported here 179 + */ 180 + int __init setup_profiling_timer(unsigned int multiplier) 181 + { 182 + return -EINVAL; 183 + } 184 + 185 + /*****************************************************************************/ 186 + /* Inter Processor Interrupt Handling */ 187 + /*****************************************************************************/ 188 + 189 + /* 190 + * structures for inter-processor calls 191 + * A Collection of single bit ipi messages 192 + * 193 + */ 194 + 195 + /* 196 + * TODO_rajesh investigate tlb message types. 197 + * IPI Timer not needed because each ARC has an individual Interrupting Timer 198 + */ 199 + enum ipi_msg_type { 200 + IPI_NOP = 0, 201 + IPI_RESCHEDULE = 1, 202 + IPI_CALL_FUNC, 203 + IPI_CALL_FUNC_SINGLE, 204 + IPI_CPU_STOP 205 + }; 206 + 207 + struct ipi_data { 208 + unsigned long bits; 209 + }; 210 + 211 + static DEFINE_PER_CPU(struct ipi_data, ipi_data); 212 + 213 + static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg) 214 + { 215 + unsigned long flags; 216 + unsigned int cpu; 217 + 218 + local_irq_save(flags); 219 + 220 + for_each_cpu(cpu, callmap) { 221 + struct ipi_data *ipi = &per_cpu(ipi_data, cpu); 222 + set_bit(msg, &ipi->bits); 223 + } 224 + 225 + /* Call the platform specific cross-CPU call function */ 226 + arc_platform_ipi_send(callmap); 227 + 228 + local_irq_restore(flags); 229 + } 230 + 231 + void smp_send_reschedule(int cpu) 232 + { 233 + ipi_send_msg(cpumask_of(cpu), IPI_RESCHEDULE); 234 + } 235 + 236 + void smp_send_stop(void) 237 + { 238 + struct cpumask targets; 239 + cpumask_copy(&targets, cpu_online_mask); 240 + cpumask_clear_cpu(smp_processor_id(), &targets); 241 + ipi_send_msg(&targets, IPI_CPU_STOP); 242 + } 243 + 244 + void arch_send_call_function_single_ipi(int cpu) 245 + { 246 + ipi_send_msg(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); 247 + } 248 + 249 + void arch_send_call_function_ipi_mask(const struct cpumask *mask) 250 + { 251 + ipi_send_msg(mask, IPI_CALL_FUNC); 252 + } 253 + 254 + /* 255 + * ipi_cpu_stop - handle IPI from smp_send_stop() 256 + */ 257 + static void ipi_cpu_stop(unsigned int cpu) 258 + { 259 + machine_halt(); 260 + } 261 + 262 + static inline void __do_IPI(unsigned long *ops, struct ipi_data *ipi, int cpu) 263 + { 264 + unsigned long msg = 0; 265 + 266 + do { 267 + msg = find_next_bit(ops, BITS_PER_LONG, msg+1); 268 + 269 + switch (msg) { 270 + case IPI_RESCHEDULE: 271 + scheduler_ipi(); 272 + break; 273 + 274 + case IPI_CALL_FUNC: 275 + generic_smp_call_function_interrupt(); 276 + break; 277 + 278 + case IPI_CALL_FUNC_SINGLE: 279 + generic_smp_call_function_single_interrupt(); 280 + break; 281 + 282 + case IPI_CPU_STOP: 283 + ipi_cpu_stop(cpu); 284 + break; 285 + } 286 + } while (msg < BITS_PER_LONG); 287 + 288 + } 289 + 290 + /* 291 + * arch-common ISR to handle for inter-processor interrupts 292 + * Has hooks for platform specific IPI 293 + */ 294 + irqreturn_t do_IPI(int irq, void *dev_id) 295 + { 296 + int cpu = smp_processor_id(); 297 + struct ipi_data *ipi = &per_cpu(ipi_data, cpu); 298 + unsigned long ops; 299 + 300 + arc_platform_ipi_clear(cpu, irq); 301 + 302 + /* 303 + * XXX: is this loop really needed 304 + * And do we need to move ipi_clean inside 305 + */ 306 + while ((ops = xchg(&ipi->bits, 0)) != 0) 307 + __do_IPI(&ops, ipi, cpu); 308 + 309 + return IRQ_HANDLED; 310 + } 311 + 312 + /* 313 + * API called by platform code to hookup arch-common ISR to their IPI IRQ 314 + */ 315 + static DEFINE_PER_CPU(int, ipi_dev); 316 + int smp_ipi_irq_setup(int cpu, int irq) 317 + { 318 + int *dev_id = &per_cpu(ipi_dev, smp_processor_id()); 319 + return request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev_id); 320 + }
+6
arch/arc/mm/tlb.c
··· 474 474 475 475 /* Enable the MMU */ 476 476 write_aux_reg(ARC_REG_PID, MMU_ENABLE); 477 + 478 + /* In smp we use this reg for interrupt 1 scratch */ 479 + #ifndef CONFIG_SMP 480 + /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */ 481 + write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir); 482 + #endif 477 483 } 478 484 479 485 /*
+38
arch/arc/mm/tlbex.S
··· 57 57 .global ex_saved_reg1 58 58 .align 1 << L1_CACHE_SHIFT ; IMP: Must be Cache Line aligned 59 59 .type ex_saved_reg1, @object 60 + #ifdef CONFIG_SMP 61 + .size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT) 62 + ex_saved_reg1: 63 + .zero (CONFIG_NR_CPUS << L1_CACHE_SHIFT) 64 + #else 60 65 .size ex_saved_reg1, 16 61 66 ex_saved_reg1: 62 67 .zero 16 68 + #endif 63 69 64 70 ;============================================================================ 65 71 ; Troubleshooting Stuff ··· 122 116 123 117 lr r2, [efa] 124 118 119 + #ifndef CONFIG_SMP 125 120 lr r1, [ARC_REG_SCRATCH_DATA0] ; current pgd 121 + #else 122 + GET_CURR_TASK_ON_CPU r1 123 + ld r1, [r1, TASK_ACT_MM] 124 + ld r1, [r1, MM_PGD] 125 + #endif 126 126 127 127 lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD 128 128 ld.as r1, [r1, r0] ; PGD entry corresp to faulting addr ··· 204 192 ; ".size ex_saved_reg1, 16" 205 193 ; [All of this dance is to avoid stack switching for each TLB Miss, since we 206 194 ; only need to save only a handful of regs, as opposed to complete reg file] 195 + ; 196 + ; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST 197 + ; core reg as it will not be SMP safe. 198 + ; Thus scratch AUX reg is used (and no longer used to cache task PGD). 199 + ; To save the rest of 3 regs - per cpu, the global is made "per-cpu". 200 + ; Epilogue thus has to locate the "per-cpu" storage for regs. 201 + ; To avoid cache line bouncing the per-cpu global is aligned/sized per 202 + ; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence 203 + ; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)" 207 204 208 205 ; As simple as that.... 209 206 210 207 .macro TLBMISS_FREEUP_REGS 208 + #ifdef CONFIG_SMP 209 + sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with 210 + GET_CPU_ID r0 ; get to per cpu scratch mem, 211 + lsl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu 212 + add r0, @ex_saved_reg1, r0 213 + #else 211 214 st r0, [@ex_saved_reg1] 212 215 mov_s r0, @ex_saved_reg1 216 + #endif 213 217 st_s r1, [r0, 4] 214 218 st_s r2, [r0, 8] 215 219 st_s r3, [r0, 12] ··· 238 210 239 211 ;----------------------------------------------------------------- 240 212 .macro TLBMISS_RESTORE_REGS 213 + #ifdef CONFIG_SMP 214 + GET_CPU_ID r0 ; get to per cpu scratch mem 215 + lsl r0, r0, L1_CACHE_SHIFT ; each is cache line wide 216 + add r0, @ex_saved_reg1, r0 217 + ld_s r3, [r0,12] 218 + ld_s r2, [r0, 8] 219 + ld_s r1, [r0, 4] 220 + lr r0, [ARC_REG_SCRATCH_DATA0] 221 + #else 241 222 mov_s r0, @ex_saved_reg1 242 223 ld_s r3, [r0,12] 243 224 ld_s r2, [r0, 8] 244 225 ld_s r1, [r0, 4] 245 226 ld_s r0, [r0] 227 + #endif 246 228 .endm 247 229 248 230 .section .text, "ax",@progbits ;Fast Path Code, candidate for ICCM
+14
arch/arc/plat-arcfpga/Kconfig
··· 13 13 14 14 config ARC_BOARD_ANGEL4 15 15 bool "ARC Angel4" 16 + select ISS_SMP_EXTN if SMP 16 17 help 17 18 ARC Angel4 FPGA Ref Platform (Xilinx Virtex Based) 18 19 ··· 21 20 bool "ML509" 22 21 help 23 22 ARC ML509 FPGA Ref Platform (Xilinx Virtex-5 Based) 23 + 24 + config ISS_SMP_EXTN 25 + bool "ARC SMP Extensions (ISS Models only)" 26 + default n 27 + depends on SMP 28 + select ARC_HAS_COH_RTSC 29 + help 30 + SMP Extensions to ARC700, in a "simulation only" Model, supported in 31 + ARC ISS (Instruction Set Simulator). 32 + The SMP extensions include: 33 + -IDU (Interrupt Distribution Unit) 34 + -XTL (To enable CPU start/stop/set-PC for another CPU) 35 + It doesn't provide coherent Caches and/or Atomic Ops (LLOCK/SCOND) 24 36 25 37 endchoice 26 38
+1
arch/arc/plat-arcfpga/Makefile
··· 7 7 # 8 8 9 9 obj-y := platform.o irq.o 10 + obj-$(CONFIG_SMP) += smp.o
+9 -1
arch/arc/plat-arcfpga/include/plat/irq.h
··· 12 12 #ifndef __PLAT_IRQ_H 13 13 #define __PLAT_IRQ_H 14 14 15 - #define NR_IRQS 16 15 + #ifdef CONFIG_SMP 16 + #define NR_IRQS 32 17 + #else 18 + #define NR_IRQS 16 19 + #endif 16 20 17 21 #define UART0_IRQ 5 18 22 #define UART1_IRQ 10 ··· 27 23 #define IDE_IRQ 13 28 24 #define PCI_IRQ 14 29 25 #define PS2_IRQ 15 26 + 27 + #ifdef CONFIG_SMP 28 + #define IDU_INTERRUPT_0 16 29 + #endif 30 30 31 31 #endif
+115
arch/arc/plat-arcfpga/include/plat/smp.h
··· 1 + /* 2 + * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * Rajeshwar Ranga: Interrupt Distribution Unit API's 9 + */ 10 + 11 + #ifndef __PLAT_ARCFPGA_SMP_H 12 + #define __PLAT_ARCFPGA_SMP_H 13 + 14 + #ifdef CONFIG_SMP 15 + 16 + #include <linux/types.h> 17 + #include <asm/arcregs.h> 18 + 19 + #define ARC_AUX_IDU_REG_CMD 0x2000 20 + #define ARC_AUX_IDU_REG_PARAM 0x2001 21 + 22 + #define ARC_AUX_XTL_REG_CMD 0x2002 23 + #define ARC_AUX_XTL_REG_PARAM 0x2003 24 + 25 + #define ARC_REG_MP_BCR 0x2021 26 + 27 + #define ARC_XTL_CMD_WRITE_PC 0x04 28 + #define ARC_XTL_CMD_CLEAR_HALT 0x02 29 + 30 + /* 31 + * Build Configuration Register which identifies the sub-components 32 + */ 33 + struct bcr_mp { 34 + #ifdef CONFIG_CPU_BIG_ENDIAN 35 + unsigned int mp_arch:16, pad:5, sdu:1, idu:1, scu:1, ver:8; 36 + #else 37 + unsigned int ver:8, scu:1, idu:1, sdu:1, pad:5, mp_arch:16; 38 + #endif 39 + }; 40 + 41 + /* IDU supports 256 common interrupts */ 42 + #define NR_IDU_IRQS 256 43 + 44 + /* 45 + * The Aux Regs layout is same bit-by-bit in both BE/LE modes. 46 + * However when casted as a bitfield encoded "C" struct, gcc treats it as 47 + * memory, generating different code for BE/LE, requiring strcture adj (see 48 + * include/asm/arcregs.h) 49 + * 50 + * However when manually "carving" the value for a Aux, no special handling 51 + * of BE is needed because of the property discribed above 52 + */ 53 + #define IDU_SET_COMMAND(irq, cmd) \ 54 + do { \ 55 + uint32_t __val; \ 56 + __val = (((irq & 0xFF) << 8) | (cmd & 0xFF)); \ 57 + write_aux_reg(ARC_AUX_IDU_REG_CMD, __val); \ 58 + } while (0) 59 + 60 + #define IDU_SET_PARAM(par) write_aux_reg(ARC_AUX_IDU_REG_PARAM, par) 61 + #define IDU_GET_PARAM() read_aux_reg(ARC_AUX_IDU_REG_PARAM) 62 + 63 + /* IDU Commands */ 64 + #define IDU_DISABLE 0x00 65 + #define IDU_ENABLE 0x01 66 + #define IDU_IRQ_CLEAR 0x02 67 + #define IDU_IRQ_ASSERT 0x03 68 + #define IDU_IRQ_WMODE 0x04 69 + #define IDU_IRQ_STATUS 0x05 70 + #define IDU_IRQ_ACK 0x06 71 + #define IDU_IRQ_PEND 0x07 72 + #define IDU_IRQ_RMODE 0x08 73 + #define IDU_IRQ_WBITMASK 0x09 74 + #define IDU_IRQ_RBITMASK 0x0A 75 + 76 + #define idu_enable() IDU_SET_COMMAND(0, IDU_ENABLE) 77 + #define idu_disable() IDU_SET_COMMAND(0, IDU_DISABLE) 78 + 79 + #define idu_irq_assert(irq) IDU_SET_COMMAND((irq), IDU_IRQ_ASSERT) 80 + #define idu_irq_clear(irq) IDU_SET_COMMAND((irq), IDU_IRQ_CLEAR) 81 + 82 + /* IDU Interrupt Mode - Destination Encoding */ 83 + #define IDU_IRQ_MOD_DISABLE 0x00 84 + #define IDU_IRQ_MOD_ROUND_RECP 0x01 85 + #define IDU_IRQ_MOD_TCPU_FIRSTRECP 0x02 86 + #define IDU_IRQ_MOD_TCPU_ALLRECP 0x03 87 + 88 + /* IDU Interrupt Mode - Triggering Mode */ 89 + #define IDU_IRQ_MODE_LEVEL_TRIG 0x00 90 + #define IDU_IRQ_MODE_PULSE_TRIG 0x01 91 + 92 + #define IDU_IRQ_MODE_PARAM(dest_mode, trig_mode) \ 93 + (((trig_mode & 0x01) << 15) | (dest_mode & 0xFF)) 94 + 95 + struct idu_irq_config { 96 + uint8_t irq; 97 + uint8_t dest_mode; 98 + uint8_t trig_mode; 99 + }; 100 + 101 + struct idu_irq_status { 102 + uint8_t irq; 103 + bool enabled; 104 + bool status; 105 + bool ack; 106 + bool pend; 107 + uint8_t next_rr; 108 + }; 109 + 110 + extern void idu_irq_set_tgtcpu(uint8_t irq, uint32_t mask); 111 + extern void idu_irq_set_mode(uint8_t irq, uint8_t dest_mode, uint8_t trig_mode); 112 + 113 + #endif /* CONFIG_SMP */ 114 + 115 + #endif
+10
arch/arc/plat-arcfpga/irq.c
··· 9 9 */ 10 10 11 11 #include <linux/interrupt.h> 12 + #include <asm/irq.h> 12 13 13 14 void __init plat_init_IRQ(void) 14 15 { 16 + /* 17 + * SMP Hack because UART IRQ hardwired to cpu0 (boot-cpu) but if the 18 + * request_irq() comes from any other CPU, the low level IRQ unamsking 19 + * essential for getting Interrupts won't be enabled on cpu0, locking 20 + * up the UART state machine. 21 + */ 22 + #ifdef CONFIG_SMP 23 + arch_unmask_irq(UART0_IRQ); 24 + #endif 15 25 }
+167
arch/arc/plat-arcfpga/smp.c
··· 1 + /* 2 + * ARC700 Simulation-only Extensions for SMP 3 + * 4 + * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + * 10 + * Vineet Gupta - 2012 : split off arch common and plat specific SMP 11 + * Rajeshwar Ranga - 2007 : Interrupt Distribution Unit API's 12 + */ 13 + 14 + #include <linux/smp.h> 15 + #include <asm/irq.h> 16 + #include <plat/smp.h> 17 + 18 + static char smp_cpuinfo_buf[128]; 19 + 20 + /* 21 + *------------------------------------------------------------------- 22 + * Platform specific callbacks expected by arch SMP code 23 + *------------------------------------------------------------------- 24 + */ 25 + 26 + const char *arc_platform_smp_cpuinfo(void) 27 + { 28 + #define IS_AVAIL1(var, str) ((var) ? str : "") 29 + 30 + struct bcr_mp mp; 31 + 32 + READ_BCR(ARC_REG_MP_BCR, mp); 33 + 34 + sprintf(smp_cpuinfo_buf, "Extn [700-SMP]: v%d, arch(%d) %s %s %s\n", 35 + mp.ver, mp.mp_arch, IS_AVAIL1(mp.scu, "SCU"), 36 + IS_AVAIL1(mp.idu, "IDU"), IS_AVAIL1(mp.sdu, "SDU")); 37 + 38 + return smp_cpuinfo_buf; 39 + } 40 + 41 + /* 42 + * Master kick starting another CPU 43 + */ 44 + void arc_platform_smp_wakeup_cpu(int cpu, unsigned long pc) 45 + { 46 + /* setup the start PC */ 47 + write_aux_reg(ARC_AUX_XTL_REG_PARAM, pc); 48 + 49 + /* Trigger WRITE_PC cmd for this cpu */ 50 + write_aux_reg(ARC_AUX_XTL_REG_CMD, 51 + (ARC_XTL_CMD_WRITE_PC | (cpu << 8))); 52 + 53 + /* Take the cpu out of Halt */ 54 + write_aux_reg(ARC_AUX_XTL_REG_CMD, 55 + (ARC_XTL_CMD_CLEAR_HALT | (cpu << 8))); 56 + 57 + } 58 + 59 + /* 60 + * Any SMP specific init any CPU does when it comes up. 61 + * Here we setup the CPU to enable Inter-Processor-Interrupts 62 + * Called for each CPU 63 + * -Master : init_IRQ() 64 + * -Other(s) : start_kernel_secondary() 65 + */ 66 + void arc_platform_smp_init_cpu(void) 67 + { 68 + int cpu = smp_processor_id(); 69 + 70 + /* Check if CPU is configured for more than 16 interrupts */ 71 + if (NR_IRQS <= 16 || get_hw_config_num_irq() <= 16) 72 + panic("[arcfpga] IRQ system can't support IDU IPI\n"); 73 + 74 + idu_disable(); 75 + 76 + /**************************************************************** 77 + * IDU provides a set of Common IRQs, each of which can be dynamically 78 + * attached to (1|many|all) CPUs. 79 + * The Common IRQs [0-15] are mapped as CPU pvt [16-31] 80 + * 81 + * Here we use a simple 1:1 mapping: 82 + * A CPU 'x' is wired to Common IRQ 'x'. 83 + * So an IDU ASSERT on IRQ 'x' will trigger Interupt on CPU 'x', which 84 + * makes up for our simple IPI plumbing. 85 + * 86 + * TBD: Have a dedicated multicast IRQ for sending IPIs to all CPUs 87 + * w/o having to do one-at-a-time 88 + ******************************************************************/ 89 + 90 + /* 91 + * Claim an IRQ which would trigger IPI on this CPU. 92 + * In IDU parlance it involves setting up a cpu bitmask for the IRQ 93 + * The bitmap here contains only 1 CPU (self). 94 + */ 95 + idu_irq_set_tgtcpu(cpu, 0x1 << cpu); 96 + 97 + /* Set the IRQ destination to use the bitmask above */ 98 + idu_irq_set_mode(cpu, 7, /* XXX: IDU_IRQ_MOD_TCPU_ALLRECP: ISS bug */ 99 + IDU_IRQ_MODE_PULSE_TRIG); 100 + 101 + idu_enable(); 102 + 103 + /* Attach the arch-common IPI ISR to our IDU IRQ */ 104 + smp_ipi_irq_setup(cpu, IDU_INTERRUPT_0 + cpu); 105 + } 106 + 107 + void arc_platform_ipi_send(const struct cpumask *callmap) 108 + { 109 + unsigned int cpu; 110 + 111 + for_each_cpu(cpu, callmap) 112 + idu_irq_assert(cpu); 113 + } 114 + 115 + void arc_platform_ipi_clear(int cpu, int irq) 116 + { 117 + idu_irq_clear(IDU_INTERRUPT_0 + cpu); 118 + } 119 + 120 + /* 121 + *------------------------------------------------------------------- 122 + * Low level Platform IPI Providers 123 + *------------------------------------------------------------------- 124 + */ 125 + 126 + /* Set the Mode for the Common IRQ */ 127 + void idu_irq_set_mode(uint8_t irq, uint8_t dest_mode, uint8_t trig_mode) 128 + { 129 + uint32_t par = IDU_IRQ_MODE_PARAM(dest_mode, trig_mode); 130 + 131 + IDU_SET_PARAM(par); 132 + IDU_SET_COMMAND(irq, IDU_IRQ_WMODE); 133 + } 134 + 135 + /* Set the target cpu Bitmask for Common IRQ */ 136 + void idu_irq_set_tgtcpu(uint8_t irq, uint32_t mask) 137 + { 138 + IDU_SET_PARAM(mask); 139 + IDU_SET_COMMAND(irq, IDU_IRQ_WBITMASK); 140 + } 141 + 142 + /* Get the Interrupt Acknowledged status for IRQ (as CPU Bitmask) */ 143 + bool idu_irq_get_ack(uint8_t irq) 144 + { 145 + uint32_t val; 146 + 147 + IDU_SET_COMMAND(irq, IDU_IRQ_ACK); 148 + val = IDU_GET_PARAM(); 149 + 150 + return val & (1 << irq); 151 + } 152 + 153 + /* 154 + * Get the Interrupt Pending status for IRQ (as CPU Bitmask) 155 + * -Pending means CPU has not yet noticed the IRQ (e.g. disabled) 156 + * -After Interrupt has been taken, the IPI expcitily needs to be 157 + * cleared, to be acknowledged. 158 + */ 159 + bool idu_irq_get_pend(uint8_t irq) 160 + { 161 + uint32_t val; 162 + 163 + IDU_SET_COMMAND(irq, IDU_IRQ_PEND); 164 + val = IDU_GET_PARAM(); 165 + 166 + return val & (1 << irq); 167 + }