Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

CRIS: remove SMP code

The CRIS SMP code cannot be built since there is no (and appears to
never have been) a CONFIG_SMP Kconfig option in arch/cris/. Remove it.

Signed-off-by: Rabin Vincent <rabin@rab.in>
Signed-off-by: Jesper Nilsson <jespern@axis.com>

authored by

Rabin Vincent and committed by
Jesper Nilsson
47a8f6fb 06aca924

+1 -638
-1
arch/cris/Kconfig
··· 46 46 select ARCH_WANT_IPC_PARSE_VERSION 47 47 select GENERIC_IRQ_SHOW 48 48 select GENERIC_IOMAP 49 - select GENERIC_SMP_IDLE_THREAD if ETRAX_ARCH_V32 50 49 select GENERIC_CMOS_UPDATE 51 50 select MODULES_USE_ELF_RELA 52 51 select CLONE_BACKWARDS2
-1
arch/cris/arch-v32/kernel/Makefile
··· 9 9 process.o ptrace.o setup.o signal.o traps.o time.o \ 10 10 cache.o cacheflush.o 11 11 12 - obj-$(CONFIG_SMP) += smp.o 13 12 obj-$(CONFIG_ETRAX_KGDB) += kgdb.o kgdb_asm.o 14 13 obj-$(CONFIG_ETRAX_FAST_TIMER) += fasttimer.o 15 14 obj-$(CONFIG_MODULES) += crisksyms.o
-32
arch/cris/arch-v32/kernel/head.S
··· 52 52 53 53 GIO_INIT 54 54 55 - #ifdef CONFIG_SMP 56 - secondary_cpu_entry: /* Entry point for secondary CPUs */ 57 - di 58 - #endif 59 - 60 55 ;; Setup and enable the MMU. Use same configuration for both the data 61 56 ;; and the instruction MMU. 62 57 ;; ··· 159 164 nop 160 165 nop 161 166 162 - #ifdef CONFIG_SMP 163 - ;; Read CPU ID 164 - move 0, $srs 165 - nop 166 - nop 167 - nop 168 - move $s12, $r0 169 - cmpq 0, $r0 170 - beq master_cpu 171 - nop 172 - slave_cpu: 173 - ; Time to boot-up. Get stack location provided by master CPU. 174 - move.d smp_init_current_idle_thread, $r1 175 - move.d [$r1], $sp 176 - add.d 8192, $sp 177 - move.d ebp_start, $r0 ; Defined in linker-script. 178 - move $r0, $ebp 179 - jsr smp_callin 180 - nop 181 - master_cpu: 182 - /* Set up entry point for secondary CPUs. The boot ROM has set up 183 - * EBP at start of internal memory. The CPU will get there 184 - * later when we issue an IPI to them... */ 185 - move.d MEM_INTMEM_START + IPI_INTR_VECT * 4, $r0 186 - move.d secondary_cpu_entry, $r1 187 - move.d $r1, [$r0] 188 - #endif 189 167 ; Check if starting from DRAM (network->RAM boot or unpacked 190 168 ; compressed kernel), or directly from flash. 191 169 lapcq ., $r0
-3
arch/cris/arch-v32/kernel/irq.c
··· 58 58 static unsigned long irq_regs[NR_CPUS] = 59 59 { 60 60 regi_irq, 61 - #ifdef CONFIG_SMP 62 - regi_irq2, 63 - #endif 64 61 }; 65 62 66 63 #if NR_REAL_IRQS > 32
-5
arch/cris/arch-v32/kernel/setup.c
··· 63 63 64 64 info = &cpinfo[ARRAY_SIZE(cpinfo) - 1]; 65 65 66 - #ifdef CONFIG_SMP 67 - if (!cpu_online(cpu)) 68 - return 0; 69 - #endif 70 - 71 66 revision = rdvr(); 72 67 73 68 for (i = 0; i < ARRAY_SIZE(cpinfo); i++) {
-358
arch/cris/arch-v32/kernel/smp.c
··· 1 - #include <linux/types.h> 2 - #include <asm/delay.h> 3 - #include <irq.h> 4 - #include <hwregs/intr_vect.h> 5 - #include <hwregs/intr_vect_defs.h> 6 - #include <asm/tlbflush.h> 7 - #include <asm/mmu_context.h> 8 - #include <hwregs/asm/mmu_defs_asm.h> 9 - #include <hwregs/supp_reg.h> 10 - #include <linux/atomic.h> 11 - 12 - #include <linux/err.h> 13 - #include <linux/init.h> 14 - #include <linux/timex.h> 15 - #include <linux/sched.h> 16 - #include <linux/kernel.h> 17 - #include <linux/cpumask.h> 18 - #include <linux/interrupt.h> 19 - #include <linux/module.h> 20 - 21 - #define IPI_SCHEDULE 1 22 - #define IPI_CALL 2 23 - #define IPI_FLUSH_TLB 4 24 - #define IPI_BOOT 8 25 - 26 - #define FLUSH_ALL (void*)0xffffffff 27 - 28 - /* Vector of locks used for various atomic operations */ 29 - spinlock_t cris_atomic_locks[] = { 30 - [0 ... LOCK_COUNT - 1] = __SPIN_LOCK_UNLOCKED(cris_atomic_locks) 31 - }; 32 - 33 - /* CPU masks */ 34 - cpumask_t phys_cpu_present_map = CPU_MASK_NONE; 35 - EXPORT_SYMBOL(phys_cpu_present_map); 36 - 37 - /* Variables used during SMP boot */ 38 - volatile int cpu_now_booting = 0; 39 - volatile struct thread_info *smp_init_current_idle_thread; 40 - 41 - /* Variables used during IPI */ 42 - static DEFINE_SPINLOCK(call_lock); 43 - static DEFINE_SPINLOCK(tlbstate_lock); 44 - 45 - struct call_data_struct { 46 - void (*func) (void *info); 47 - void *info; 48 - int wait; 49 - }; 50 - 51 - static struct call_data_struct * call_data; 52 - 53 - static struct mm_struct* flush_mm; 54 - static struct vm_area_struct* flush_vma; 55 - static unsigned long flush_addr; 56 - 57 - /* Mode registers */ 58 - static unsigned long irq_regs[NR_CPUS] = { 59 - regi_irq, 60 - regi_irq2 61 - }; 62 - 63 - static irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id); 64 - static int send_ipi(int vector, int wait, cpumask_t cpu_mask); 65 - static struct irqaction irq_ipi = { 66 - .handler = crisv32_ipi_interrupt, 67 - .flags = 0, 68 - .name = "ipi", 69 - }; 70 - 71 - extern void cris_mmu_init(void); 72 - extern void cris_timer_init(void); 73 - 74 - /* SMP initialization */ 75 - void __init smp_prepare_cpus(unsigned int max_cpus) 76 - { 77 - int i; 78 - 79 - /* From now on we can expect IPIs so set them up */ 80 - setup_irq(IPI_INTR_VECT, &irq_ipi); 81 - 82 - /* Mark all possible CPUs as present */ 83 - for (i = 0; i < max_cpus; i++) 84 - cpumask_set_cpu(i, &phys_cpu_present_map); 85 - } 86 - 87 - void smp_prepare_boot_cpu(void) 88 - { 89 - /* PGD pointer has moved after per_cpu initialization so 90 - * update the MMU. 91 - */ 92 - pgd_t **pgd; 93 - pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id()); 94 - 95 - SUPP_BANK_SEL(1); 96 - SUPP_REG_WR(RW_MM_TLB_PGD, pgd); 97 - SUPP_BANK_SEL(2); 98 - SUPP_REG_WR(RW_MM_TLB_PGD, pgd); 99 - 100 - set_cpu_online(0, true); 101 - cpumask_set_cpu(0, &phys_cpu_present_map); 102 - set_cpu_possible(0, true); 103 - } 104 - 105 - void __init smp_cpus_done(unsigned int max_cpus) 106 - { 107 - } 108 - 109 - /* Bring one cpu online.*/ 110 - static int __init 111 - smp_boot_one_cpu(int cpuid, struct task_struct idle) 112 - { 113 - unsigned timeout; 114 - cpumask_t cpu_mask; 115 - 116 - cpumask_clear(&cpu_mask); 117 - task_thread_info(idle)->cpu = cpuid; 118 - 119 - /* Information to the CPU that is about to boot */ 120 - smp_init_current_idle_thread = task_thread_info(idle); 121 - cpu_now_booting = cpuid; 122 - 123 - /* Kick it */ 124 - set_cpu_online(cpuid, true); 125 - cpumask_set_cpu(cpuid, &cpu_mask); 126 - send_ipi(IPI_BOOT, 0, cpu_mask); 127 - set_cpu_online(cpuid, false); 128 - 129 - /* Wait for CPU to come online */ 130 - for (timeout = 0; timeout < 10000; timeout++) { 131 - if(cpu_online(cpuid)) { 132 - cpu_now_booting = 0; 133 - smp_init_current_idle_thread = NULL; 134 - return 0; /* CPU online */ 135 - } 136 - udelay(100); 137 - barrier(); 138 - } 139 - 140 - printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid); 141 - return -1; 142 - } 143 - 144 - /* Secondary CPUs starts using C here. Here we need to setup CPU 145 - * specific stuff such as the local timer and the MMU. */ 146 - void __init smp_callin(void) 147 - { 148 - int cpu = cpu_now_booting; 149 - reg_intr_vect_rw_mask vect_mask = {0}; 150 - 151 - /* Initialise the idle task for this CPU */ 152 - atomic_inc(&init_mm.mm_count); 153 - current->active_mm = &init_mm; 154 - 155 - /* Set up MMU */ 156 - cris_mmu_init(); 157 - __flush_tlb_all(); 158 - 159 - /* Setup local timer. */ 160 - cris_timer_init(); 161 - 162 - /* Enable IRQ and idle */ 163 - REG_WR(intr_vect, irq_regs[cpu], rw_mask, vect_mask); 164 - crisv32_unmask_irq(IPI_INTR_VECT); 165 - crisv32_unmask_irq(TIMER0_INTR_VECT); 166 - preempt_disable(); 167 - notify_cpu_starting(cpu); 168 - local_irq_enable(); 169 - 170 - set_cpu_online(cpu, true); 171 - cpu_startup_entry(CPUHP_ONLINE); 172 - } 173 - 174 - /* Stop execution on this CPU.*/ 175 - void stop_this_cpu(void* dummy) 176 - { 177 - local_irq_disable(); 178 - asm volatile("halt"); 179 - } 180 - 181 - /* Other calls */ 182 - void smp_send_stop(void) 183 - { 184 - smp_call_function(stop_this_cpu, NULL, 0); 185 - } 186 - 187 - int setup_profiling_timer(unsigned int multiplier) 188 - { 189 - return -EINVAL; 190 - } 191 - 192 - 193 - /* cache_decay_ticks is used by the scheduler to decide if a process 194 - * is "hot" on one CPU. A higher value means a higher penalty to move 195 - * a process to another CPU. Our cache is rather small so we report 196 - * 1 tick. 197 - */ 198 - unsigned long cache_decay_ticks = 1; 199 - 200 - int __cpu_up(unsigned int cpu, struct task_struct *tidle) 201 - { 202 - smp_boot_one_cpu(cpu, tidle); 203 - return cpu_online(cpu) ? 0 : -ENOSYS; 204 - } 205 - 206 - void smp_send_reschedule(int cpu) 207 - { 208 - cpumask_t cpu_mask; 209 - cpumask_clear(&cpu_mask); 210 - cpumask_set_cpu(cpu, &cpu_mask); 211 - send_ipi(IPI_SCHEDULE, 0, cpu_mask); 212 - } 213 - 214 - /* TLB flushing 215 - * 216 - * Flush needs to be done on the local CPU and on any other CPU that 217 - * may have the same mapping. The mm->cpu_vm_mask is used to keep track 218 - * of which CPUs that a specific process has been executed on. 219 - */ 220 - void flush_tlb_common(struct mm_struct* mm, struct vm_area_struct* vma, unsigned long addr) 221 - { 222 - unsigned long flags; 223 - cpumask_t cpu_mask; 224 - 225 - spin_lock_irqsave(&tlbstate_lock, flags); 226 - cpu_mask = (mm == FLUSH_ALL ? cpu_all_mask : *mm_cpumask(mm)); 227 - cpumask_clear_cpu(smp_processor_id(), &cpu_mask); 228 - flush_mm = mm; 229 - flush_vma = vma; 230 - flush_addr = addr; 231 - send_ipi(IPI_FLUSH_TLB, 1, cpu_mask); 232 - spin_unlock_irqrestore(&tlbstate_lock, flags); 233 - } 234 - 235 - void flush_tlb_all(void) 236 - { 237 - __flush_tlb_all(); 238 - flush_tlb_common(FLUSH_ALL, FLUSH_ALL, 0); 239 - } 240 - 241 - void flush_tlb_mm(struct mm_struct *mm) 242 - { 243 - __flush_tlb_mm(mm); 244 - flush_tlb_common(mm, FLUSH_ALL, 0); 245 - /* No more mappings in other CPUs */ 246 - cpumask_clear(mm_cpumask(mm)); 247 - cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); 248 - } 249 - 250 - void flush_tlb_page(struct vm_area_struct *vma, 251 - unsigned long addr) 252 - { 253 - __flush_tlb_page(vma, addr); 254 - flush_tlb_common(vma->vm_mm, vma, addr); 255 - } 256 - 257 - /* Inter processor interrupts 258 - * 259 - * The IPIs are used for: 260 - * * Force a schedule on a CPU 261 - * * FLush TLB on other CPUs 262 - * * Call a function on other CPUs 263 - */ 264 - 265 - int send_ipi(int vector, int wait, cpumask_t cpu_mask) 266 - { 267 - int i = 0; 268 - reg_intr_vect_rw_ipi ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi); 269 - int ret = 0; 270 - 271 - /* Calculate CPUs to send to. */ 272 - cpumask_and(&cpu_mask, &cpu_mask, cpu_online_mask); 273 - 274 - /* Send the IPI. */ 275 - for_each_cpu(i, &cpu_mask) 276 - { 277 - ipi.vector |= vector; 278 - REG_WR(intr_vect, irq_regs[i], rw_ipi, ipi); 279 - } 280 - 281 - /* Wait for IPI to finish on other CPUS */ 282 - if (wait) { 283 - for_each_cpu(i, &cpu_mask) { 284 - int j; 285 - for (j = 0 ; j < 1000; j++) { 286 - ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi); 287 - if (!ipi.vector) 288 - break; 289 - udelay(100); 290 - } 291 - 292 - /* Timeout? */ 293 - if (ipi.vector) { 294 - printk("SMP call timeout from %d to %d\n", smp_processor_id(), i); 295 - ret = -ETIMEDOUT; 296 - dump_stack(); 297 - } 298 - } 299 - } 300 - return ret; 301 - } 302 - 303 - /* 304 - * You must not call this function with disabled interrupts or from a 305 - * hardware interrupt handler or from a bottom half handler. 306 - */ 307 - int smp_call_function(void (*func)(void *info), void *info, int wait) 308 - { 309 - cpumask_t cpu_mask; 310 - struct call_data_struct data; 311 - int ret; 312 - 313 - cpumask_setall(&cpu_mask); 314 - cpumask_clear_cpu(smp_processor_id(), &cpu_mask); 315 - 316 - WARN_ON(irqs_disabled()); 317 - 318 - data.func = func; 319 - data.info = info; 320 - data.wait = wait; 321 - 322 - spin_lock(&call_lock); 323 - call_data = &data; 324 - ret = send_ipi(IPI_CALL, wait, cpu_mask); 325 - spin_unlock(&call_lock); 326 - 327 - return ret; 328 - } 329 - 330 - irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id) 331 - { 332 - void (*func) (void *info) = call_data->func; 333 - void *info = call_data->info; 334 - reg_intr_vect_rw_ipi ipi; 335 - 336 - ipi = REG_RD(intr_vect, irq_regs[smp_processor_id()], rw_ipi); 337 - 338 - if (ipi.vector & IPI_SCHEDULE) { 339 - scheduler_ipi(); 340 - } 341 - if (ipi.vector & IPI_CALL) { 342 - func(info); 343 - } 344 - if (ipi.vector & IPI_FLUSH_TLB) { 345 - if (flush_mm == FLUSH_ALL) 346 - __flush_tlb_all(); 347 - else if (flush_vma == FLUSH_ALL) 348 - __flush_tlb_mm(flush_mm); 349 - else 350 - __flush_tlb_page(flush_vma, flush_addr); 351 - } 352 - 353 - ipi.vector = 0; 354 - REG_WR(intr_vect, irq_regs[smp_processor_id()], rw_ipi, ipi); 355 - 356 - return IRQ_HANDLED; 357 - } 358 -
-3
arch/cris/arch-v32/kernel/time.c
··· 60 60 unsigned long timer_regs[NR_CPUS] = 61 61 { 62 62 regi_timer0, 63 - #ifdef CONFIG_SMP 64 - regi_timer2 65 - #endif 66 63 }; 67 64 68 65 extern int set_rtc_mmss(unsigned long nowtime);
+1 -1
arch/cris/arch-v32/lib/Makefile
··· 3 3 # 4 4 5 5 lib-y = checksum.o checksumcopy.o string.o usercopy.o memset.o \ 6 - csumcpfruser.o spinlock.o delay.o strcmp.o 6 + csumcpfruser.o delay.o strcmp.o 7 7
-40
arch/cris/arch-v32/lib/spinlock.S
··· 1 - ;; Core of the spinlock implementation 2 - ;; 3 - ;; Copyright (C) 2004 Axis Communications AB. 4 - ;; 5 - ;; Author: Mikael Starvik 6 - 7 - 8 - .global cris_spin_lock 9 - .type cris_spin_lock,@function 10 - .global cris_spin_trylock 11 - .type cris_spin_trylock,@function 12 - 13 - .text 14 - 15 - cris_spin_lock: 16 - clearf p 17 - 1: test.b [$r10] 18 - beq 1b 19 - clearf p 20 - ax 21 - clear.b [$r10] 22 - bcs 1b 23 - clearf p 24 - ret 25 - nop 26 - 27 - .size cris_spin_lock, . - cris_spin_lock 28 - 29 - cris_spin_trylock: 30 - clearf p 31 - 1: move.b [$r10], $r11 32 - ax 33 - clear.b [$r10] 34 - bcs 1b 35 - clearf p 36 - ret 37 - movu.b $r11,$r10 38 - 39 - .size cris_spin_trylock, . - cris_spin_trylock 40 -
-11
arch/cris/arch-v32/mm/init.c
··· 40 40 */ 41 41 per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd; 42 42 43 - #ifdef CONFIG_SMP 44 - { 45 - pgd_t **pgd; 46 - pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id()); 47 - SUPP_BANK_SEL(1); 48 - SUPP_REG_WR(RW_MM_TLB_PGD, pgd); 49 - SUPP_BANK_SEL(2); 50 - SUPP_REG_WR(RW_MM_TLB_PGD, pgd); 51 - } 52 - #endif 53 - 54 43 /* Initialise the TLB. Function found in tlb.c. */ 55 44 tlb_init(); 56 45
-4
arch/cris/arch-v32/mm/mmu.S
··· 115 115 move.d $r0, [$r1] ; last_refill_cause = rw_mm_cause 116 116 117 117 3: ; Probably not in a loop, continue normal processing 118 - #ifdef CONFIG_SMP 119 - move $s7, $acr ; PGD 120 - #else 121 118 move.d current_pgd, $acr ; PGD 122 - #endif 123 119 ; Look up PMD in PGD 124 120 lsrq 24, $r0 ; Get PMD index into PGD (bit 24-31) 125 121 move.d [$acr], $acr ; PGD for the current process
-28
arch/cris/include/arch-v32/arch/atomic.h
··· 1 1 #ifndef __ASM_CRIS_ARCH_ATOMIC__ 2 2 #define __ASM_CRIS_ARCH_ATOMIC__ 3 3 4 - #include <linux/spinlock_types.h> 5 - 6 - extern void cris_spin_unlock(void *l, int val); 7 - extern void cris_spin_lock(void *l); 8 - extern int cris_spin_trylock(void* l); 9 - 10 - #ifndef CONFIG_SMP 11 4 #define cris_atomic_save(addr, flags) local_irq_save(flags); 12 5 #define cris_atomic_restore(addr, flags) local_irq_restore(flags); 13 - #else 14 - 15 - extern spinlock_t cris_atomic_locks[]; 16 - #define LOCK_COUNT 128 17 - #define HASH_ADDR(a) (((int)a) & 127) 18 - 19 - #define cris_atomic_save(addr, flags) \ 20 - local_irq_save(flags); \ 21 - cris_spin_lock((void *)&cris_atomic_locks[HASH_ADDR(addr)].raw_lock.slock); 22 - 23 - #define cris_atomic_restore(addr, flags) \ 24 - { \ 25 - spinlock_t *lock = (void*)&cris_atomic_locks[HASH_ADDR(addr)]; \ 26 - __asm__ volatile ("move.d %1,%0" \ 27 - : "=m" (lock->raw_lock.slock) \ 28 - : "r" (1) \ 29 - : "memory"); \ 30 - local_irq_restore(flags); \ 31 - } 32 - 33 - #endif 34 6 35 7 #endif 36 8
-131
arch/cris/include/arch-v32/arch/spinlock.h
··· 1 - #ifndef __ASM_ARCH_SPINLOCK_H 2 - #define __ASM_ARCH_SPINLOCK_H 3 - 4 - #include <linux/spinlock_types.h> 5 - 6 - #define RW_LOCK_BIAS 0x01000000 7 - 8 - extern void cris_spin_unlock(void *l, int val); 9 - extern void cris_spin_lock(void *l); 10 - extern int cris_spin_trylock(void *l); 11 - 12 - static inline int arch_spin_is_locked(arch_spinlock_t *x) 13 - { 14 - return *(volatile signed char *)(&(x)->slock) <= 0; 15 - } 16 - 17 - static inline void arch_spin_unlock(arch_spinlock_t *lock) 18 - { 19 - __asm__ volatile ("move.d %1,%0" \ 20 - : "=m" (lock->slock) \ 21 - : "r" (1) \ 22 - : "memory"); 23 - } 24 - 25 - static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 26 - { 27 - while (arch_spin_is_locked(lock)) 28 - cpu_relax(); 29 - } 30 - 31 - static inline int arch_spin_trylock(arch_spinlock_t *lock) 32 - { 33 - return cris_spin_trylock((void *)&lock->slock); 34 - } 35 - 36 - static inline void arch_spin_lock(arch_spinlock_t *lock) 37 - { 38 - cris_spin_lock((void *)&lock->slock); 39 - } 40 - 41 - static inline void 42 - arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 43 - { 44 - arch_spin_lock(lock); 45 - } 46 - 47 - /* 48 - * Read-write spinlocks, allowing multiple readers 49 - * but only one writer. 50 - * 51 - * NOTE! it is quite common to have readers in interrupts 52 - * but no interrupt writers. For those circumstances we 53 - * can "mix" irq-safe locks - any writer needs to get a 54 - * irq-safe write-lock, but readers can get non-irqsafe 55 - * read-locks. 56 - * 57 - */ 58 - 59 - static inline int arch_read_can_lock(arch_rwlock_t *x) 60 - { 61 - return (int)(x)->lock > 0; 62 - } 63 - 64 - static inline int arch_write_can_lock(arch_rwlock_t *x) 65 - { 66 - return (x)->lock == RW_LOCK_BIAS; 67 - } 68 - 69 - static inline void arch_read_lock(arch_rwlock_t *rw) 70 - { 71 - arch_spin_lock(&rw->slock); 72 - while (rw->lock == 0); 73 - rw->lock--; 74 - arch_spin_unlock(&rw->slock); 75 - } 76 - 77 - static inline void arch_write_lock(arch_rwlock_t *rw) 78 - { 79 - arch_spin_lock(&rw->slock); 80 - while (rw->lock != RW_LOCK_BIAS); 81 - rw->lock = 0; 82 - arch_spin_unlock(&rw->slock); 83 - } 84 - 85 - static inline void arch_read_unlock(arch_rwlock_t *rw) 86 - { 87 - arch_spin_lock(&rw->slock); 88 - rw->lock++; 89 - arch_spin_unlock(&rw->slock); 90 - } 91 - 92 - static inline void arch_write_unlock(arch_rwlock_t *rw) 93 - { 94 - arch_spin_lock(&rw->slock); 95 - while (rw->lock != RW_LOCK_BIAS); 96 - rw->lock = RW_LOCK_BIAS; 97 - arch_spin_unlock(&rw->slock); 98 - } 99 - 100 - static inline int arch_read_trylock(arch_rwlock_t *rw) 101 - { 102 - int ret = 0; 103 - arch_spin_lock(&rw->slock); 104 - if (rw->lock != 0) { 105 - rw->lock--; 106 - ret = 1; 107 - } 108 - arch_spin_unlock(&rw->slock); 109 - return ret; 110 - } 111 - 112 - static inline int arch_write_trylock(arch_rwlock_t *rw) 113 - { 114 - int ret = 0; 115 - arch_spin_lock(&rw->slock); 116 - if (rw->lock == RW_LOCK_BIAS) { 117 - rw->lock = 0; 118 - ret = 1; 119 - } 120 - arch_spin_unlock(&rw->slock); 121 - return ret; 122 - } 123 - 124 - #define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) 125 - #define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) 126 - 127 - #define arch_spin_relax(lock) cpu_relax() 128 - #define arch_read_relax(lock) cpu_relax() 129 - #define arch_write_relax(lock) cpu_relax() 130 - 131 - #endif /* __ASM_ARCH_SPINLOCK_H */
-2
arch/cris/include/asm/cmpxchg.h
··· 46 46 (unsigned long)(n), sizeof(*(ptr)))) 47 47 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 48 48 49 - #ifndef CONFIG_SMP 50 49 #include <asm-generic/cmpxchg.h> 51 - #endif 52 50 53 51 #endif /* __ASM_CRIS_CMPXCHG__ */
-10
arch/cris/include/asm/smp.h
··· 1 - #ifndef __ASM_SMP_H 2 - #define __ASM_SMP_H 3 - 4 - #include <linux/cpumask.h> 5 - 6 - extern cpumask_t phys_cpu_present_map; 7 - 8 - #define raw_smp_processor_id() (current_thread_info()->cpu) 9 - 10 - #endif
-1
arch/cris/include/asm/spinlock.h
··· 1 - #include <arch/spinlock.h>
-7
arch/cris/include/asm/tlbflush.h
··· 22 22 extern void __flush_tlb_page(struct vm_area_struct *vma, 23 23 unsigned long addr); 24 24 25 - #ifdef CONFIG_SMP 26 - extern void flush_tlb_all(void); 27 - extern void flush_tlb_mm(struct mm_struct *mm); 28 - extern void flush_tlb_page(struct vm_area_struct *vma, 29 - unsigned long addr); 30 - #else 31 25 #define flush_tlb_all __flush_tlb_all 32 26 #define flush_tlb_mm __flush_tlb_mm 33 27 #define flush_tlb_page __flush_tlb_page 34 - #endif 35 28 36 29 static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) 37 30 {