Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Disintegrate asm/system.h for Tile

Disintegrate asm/system.h for Tile.

Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Chris Metcalf <cmetcalf@tilera.com>

+309 -273
+1 -1
arch/tile/include/asm/atomic.h
··· 20 20 #ifndef __ASSEMBLY__ 21 21 22 22 #include <linux/compiler.h> 23 - #include <asm/system.h> 23 + #include <linux/types.h> 24 24 25 25 #define ATOMIC_INIT(i) { (i) } 26 26
+1
arch/tile/include/asm/atomic_32.h
··· 17 17 #ifndef _ASM_TILE_ATOMIC_32_H 18 18 #define _ASM_TILE_ATOMIC_32_H 19 19 20 + #include <asm/barrier.h> 20 21 #include <arch/chip.h> 21 22 22 23 #ifndef __ASSEMBLY__
+1
arch/tile/include/asm/atomic_64.h
··· 19 19 20 20 #ifndef __ASSEMBLY__ 21 21 22 + #include <asm/barrier.h> 22 23 #include <arch/spr_def.h> 23 24 24 25 /* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
+148
arch/tile/include/asm/barrier.h
··· 1 + /* 2 + * Copyright 2010 Tilera Corporation. All Rights Reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License 6 + * as published by the Free Software Foundation, version 2. 7 + * 8 + * This program is distributed in the hope that it will be useful, but 9 + * WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 + * NON INFRINGEMENT. See the GNU General Public License for 12 + * more details. 13 + */ 14 + 15 + #ifndef _ASM_TILE_BARRIER_H 16 + #define _ASM_TILE_BARRIER_H 17 + 18 + #ifndef __ASSEMBLY__ 19 + 20 + #include <linux/types.h> 21 + #include <arch/chip.h> 22 + #include <arch/spr_def.h> 23 + #include <asm/timex.h> 24 + 25 + /* 26 + * read_barrier_depends - Flush all pending reads that subsequents reads 27 + * depend on. 28 + * 29 + * No data-dependent reads from memory-like regions are ever reordered 30 + * over this barrier. All reads preceding this primitive are guaranteed 31 + * to access memory (but not necessarily other CPUs' caches) before any 32 + * reads following this primitive that depend on the data return by 33 + * any of the preceding reads. This primitive is much lighter weight than 34 + * rmb() on most CPUs, and is never heavier weight than is 35 + * rmb(). 36 + * 37 + * These ordering constraints are respected by both the local CPU 38 + * and the compiler. 39 + * 40 + * Ordering is not guaranteed by anything other than these primitives, 41 + * not even by data dependencies. See the documentation for 42 + * memory_barrier() for examples and URLs to more information. 43 + * 44 + * For example, the following code would force ordering (the initial 45 + * value of "a" is zero, "b" is one, and "p" is "&a"): 46 + * 47 + * <programlisting> 48 + * CPU 0 CPU 1 49 + * 50 + * b = 2; 51 + * memory_barrier(); 52 + * p = &b; q = p; 53 + * read_barrier_depends(); 54 + * d = *q; 55 + * </programlisting> 56 + * 57 + * because the read of "*q" depends on the read of "p" and these 58 + * two reads are separated by a read_barrier_depends(). However, 59 + * the following code, with the same initial values for "a" and "b": 60 + * 61 + * <programlisting> 62 + * CPU 0 CPU 1 63 + * 64 + * a = 2; 65 + * memory_barrier(); 66 + * b = 3; y = b; 67 + * read_barrier_depends(); 68 + * x = a; 69 + * </programlisting> 70 + * 71 + * does not enforce ordering, since there is no data dependency between 72 + * the read of "a" and the read of "b". Therefore, on some CPUs, such 73 + * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() 74 + * in cases like this where there are no data dependencies. 75 + */ 76 + #define read_barrier_depends() do { } while (0) 77 + 78 + #define __sync() __insn_mf() 79 + 80 + #if !CHIP_HAS_MF_WAITS_FOR_VICTIMS() 81 + #include <hv/syscall_public.h> 82 + /* 83 + * Issue an uncacheable load to each memory controller, then 84 + * wait until those loads have completed. 85 + */ 86 + static inline void __mb_incoherent(void) 87 + { 88 + long clobber_r10; 89 + asm volatile("swint2" 90 + : "=R10" (clobber_r10) 91 + : "R10" (HV_SYS_fence_incoherent) 92 + : "r0", "r1", "r2", "r3", "r4", 93 + "r5", "r6", "r7", "r8", "r9", 94 + "r11", "r12", "r13", "r14", 95 + "r15", "r16", "r17", "r18", "r19", 96 + "r20", "r21", "r22", "r23", "r24", 97 + "r25", "r26", "r27", "r28", "r29"); 98 + } 99 + #endif 100 + 101 + /* Fence to guarantee visibility of stores to incoherent memory. */ 102 + static inline void 103 + mb_incoherent(void) 104 + { 105 + __insn_mf(); 106 + 107 + #if !CHIP_HAS_MF_WAITS_FOR_VICTIMS() 108 + { 109 + #if CHIP_HAS_TILE_WRITE_PENDING() 110 + const unsigned long WRITE_TIMEOUT_CYCLES = 400; 111 + unsigned long start = get_cycles_low(); 112 + do { 113 + if (__insn_mfspr(SPR_TILE_WRITE_PENDING) == 0) 114 + return; 115 + } while ((get_cycles_low() - start) < WRITE_TIMEOUT_CYCLES); 116 + #endif /* CHIP_HAS_TILE_WRITE_PENDING() */ 117 + (void) __mb_incoherent(); 118 + } 119 + #endif /* CHIP_HAS_MF_WAITS_FOR_VICTIMS() */ 120 + } 121 + 122 + #define fast_wmb() __sync() 123 + #define fast_rmb() __sync() 124 + #define fast_mb() __sync() 125 + #define fast_iob() mb_incoherent() 126 + 127 + #define wmb() fast_wmb() 128 + #define rmb() fast_rmb() 129 + #define mb() fast_mb() 130 + #define iob() fast_iob() 131 + 132 + #ifdef CONFIG_SMP 133 + #define smp_mb() mb() 134 + #define smp_rmb() rmb() 135 + #define smp_wmb() wmb() 136 + #define smp_read_barrier_depends() read_barrier_depends() 137 + #else 138 + #define smp_mb() barrier() 139 + #define smp_rmb() barrier() 140 + #define smp_wmb() barrier() 141 + #define smp_read_barrier_depends() do { } while (0) 142 + #endif 143 + 144 + #define set_mb(var, value) \ 145 + do { var = value; mb(); } while (0) 146 + 147 + #endif /* !__ASSEMBLY__ */ 148 + #endif /* _ASM_TILE_BARRIER_H */
-1
arch/tile/include/asm/bitops_32.h
··· 17 17 18 18 #include <linux/compiler.h> 19 19 #include <linux/atomic.h> 20 - #include <asm/system.h> 21 20 22 21 /* Tile-specific routines to support <asm/bitops.h>. */ 23 22 unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask);
-1
arch/tile/include/asm/bitops_64.h
··· 17 17 18 18 #include <linux/compiler.h> 19 19 #include <linux/atomic.h> 20 - #include <asm/system.h> 21 20 22 21 /* See <asm/bitops.h> for API comments. */ 23 22
+10 -1
arch/tile/include/asm/cacheflush.h
··· 20 20 /* Keep includes the same across arches. */ 21 21 #include <linux/mm.h> 22 22 #include <linux/cache.h> 23 - #include <asm/system.h> 24 23 #include <arch/icache.h> 25 24 26 25 /* Caches are physically-indexed and so don't need special treatment */ ··· 150 151 * that is homed with "hash for home". 151 152 */ 152 153 void finv_buffer_remote(void *buffer, size_t size, int hfh); 154 + 155 + /* 156 + * On SMP systems, when the scheduler does migration-cost autodetection, 157 + * it needs a way to flush as much of the CPU's caches as possible: 158 + * 159 + * TODO: fill this in! 160 + */ 161 + static inline void sched_cacheflush(void) 162 + { 163 + } 153 164 154 165 #endif /* _ASM_TILE_CACHEFLUSH_H */
+20
arch/tile/include/asm/exec.h
··· 1 + /* 2 + * Copyright 2010 Tilera Corporation. All Rights Reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License 6 + * as published by the Free Software Foundation, version 2. 7 + * 8 + * This program is distributed in the hope that it will be useful, but 9 + * WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 + * NON INFRINGEMENT. See the GNU General Public License for 12 + * more details. 13 + */ 14 + 15 + #ifndef _ASM_TILE_EXEC_H 16 + #define _ASM_TILE_EXEC_H 17 + 18 + #define arch_align_stack(x) (x) 19 + 20 + #endif /* _ASM_TILE_EXEC_H */
-1
arch/tile/include/asm/pgtable.h
··· 29 29 #include <linux/spinlock.h> 30 30 #include <asm/processor.h> 31 31 #include <asm/fixmap.h> 32 - #include <asm/system.h> 33 32 34 33 struct mm_struct; 35 34 struct vm_area_struct;
+22
arch/tile/include/asm/setup.h
··· 31 31 void warn_early_printk(void); 32 32 void __init disable_early_printk(void); 33 33 34 + /* Init-time routine to do tile-specific per-cpu setup. */ 35 + void setup_cpu(int boot); 36 + 37 + /* User-level DMA management functions */ 38 + void grant_dma_mpls(void); 39 + void restrict_dma_mpls(void); 40 + 41 + #ifdef CONFIG_HARDWALL 42 + /* User-level network management functions */ 43 + void reset_network_state(void); 44 + void grant_network_mpls(void); 45 + void restrict_network_mpls(void); 46 + struct task_struct; 47 + int hardwall_deactivate(struct task_struct *task); 48 + 49 + /* Hook hardwall code into changes in affinity. */ 50 + #define arch_set_cpus_allowed(p, new_mask) do { \ 51 + if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \ 52 + hardwall_deactivate(p); \ 53 + } while (0) 54 + #endif 55 + 34 56 #endif /* __KERNEL__ */ 35 57 36 58 #endif /* _ASM_TILE_SETUP_H */
-1
arch/tile/include/asm/spinlock_32.h
··· 19 19 20 20 #include <linux/atomic.h> 21 21 #include <asm/page.h> 22 - #include <asm/system.h> 23 22 #include <linux/compiler.h> 24 23 25 24 /*
+76
arch/tile/include/asm/switch_to.h
··· 1 + /* 2 + * Copyright 2010 Tilera Corporation. All Rights Reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License 6 + * as published by the Free Software Foundation, version 2. 7 + * 8 + * This program is distributed in the hope that it will be useful, but 9 + * WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 + * NON INFRINGEMENT. See the GNU General Public License for 12 + * more details. 13 + */ 14 + 15 + #ifndef _ASM_TILE_SWITCH_TO_H 16 + #define _ASM_TILE_SWITCH_TO_H 17 + 18 + #include <arch/sim_def.h> 19 + 20 + /* 21 + * switch_to(n) should switch tasks to task nr n, first 22 + * checking that n isn't the current task, in which case it does nothing. 23 + * The number of callee-saved registers saved on the kernel stack 24 + * is defined here for use in copy_thread() and must agree with __switch_to(). 25 + */ 26 + #define CALLEE_SAVED_FIRST_REG 30 27 + #define CALLEE_SAVED_REGS_COUNT 24 /* r30 to r52, plus an empty to align */ 28 + 29 + #ifndef __ASSEMBLY__ 30 + 31 + struct task_struct; 32 + 33 + /* 34 + * Pause the DMA engine and static network before task switching. 35 + */ 36 + #define prepare_arch_switch(next) _prepare_arch_switch(next) 37 + void _prepare_arch_switch(struct task_struct *next); 38 + 39 + struct task_struct; 40 + #define switch_to(prev, next, last) ((last) = _switch_to((prev), (next))) 41 + extern struct task_struct *_switch_to(struct task_struct *prev, 42 + struct task_struct *next); 43 + 44 + /* Helper function for _switch_to(). */ 45 + extern struct task_struct *__switch_to(struct task_struct *prev, 46 + struct task_struct *next, 47 + unsigned long new_system_save_k_0); 48 + 49 + /* Address that switched-away from tasks are at. */ 50 + extern unsigned long get_switch_to_pc(void); 51 + 52 + /* 53 + * Kernel threads can check to see if they need to migrate their 54 + * stack whenever they return from a context switch; for user 55 + * threads, we defer until they are returning to user-space. 56 + */ 57 + #define finish_arch_switch(prev) do { \ 58 + if (unlikely((prev)->state == TASK_DEAD)) \ 59 + __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \ 60 + ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \ 61 + __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \ 62 + (current->pid << _SIM_CONTROL_OPERATOR_BITS)); \ 63 + if (current->mm == NULL && !kstack_hash && \ 64 + current_thread_info()->homecache_cpu != smp_processor_id()) \ 65 + homecache_migrate_kthread(); \ 66 + } while (0) 67 + 68 + /* Support function for forking a new task. */ 69 + void ret_from_fork(void); 70 + 71 + /* Called from ret_from_fork() when a new process starts up. */ 72 + struct task_struct *sim_notify_fork(struct task_struct *prev); 73 + 74 + #endif /* !__ASSEMBLY__ */ 75 + 76 + #endif /* _ASM_TILE_SWITCH_TO_H */
+4 -261
arch/tile/include/asm/system.h
··· 1 - /* 2 - * Copyright 2010 Tilera Corporation. All Rights Reserved. 3 - * 4 - * This program is free software; you can redistribute it and/or 5 - * modify it under the terms of the GNU General Public License 6 - * as published by the Free Software Foundation, version 2. 7 - * 8 - * This program is distributed in the hope that it will be useful, but 9 - * WITHOUT ANY WARRANTY; without even the implied warranty of 10 - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 - * NON INFRINGEMENT. See the GNU General Public License for 12 - * more details. 13 - */ 14 - 15 - #ifndef _ASM_TILE_SYSTEM_H 16 - #define _ASM_TILE_SYSTEM_H 17 - 18 - #ifndef __ASSEMBLY__ 19 - 20 - #include <linux/types.h> 21 - #include <linux/irqflags.h> 22 - 23 - /* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */ 24 - #include <asm/ptrace.h> 25 - 26 - #include <arch/chip.h> 27 - #include <arch/sim_def.h> 28 - #include <arch/spr_def.h> 29 - 30 - /* 31 - * read_barrier_depends - Flush all pending reads that subsequents reads 32 - * depend on. 33 - * 34 - * No data-dependent reads from memory-like regions are ever reordered 35 - * over this barrier. All reads preceding this primitive are guaranteed 36 - * to access memory (but not necessarily other CPUs' caches) before any 37 - * reads following this primitive that depend on the data return by 38 - * any of the preceding reads. This primitive is much lighter weight than 39 - * rmb() on most CPUs, and is never heavier weight than is 40 - * rmb(). 41 - * 42 - * These ordering constraints are respected by both the local CPU 43 - * and the compiler. 44 - * 45 - * Ordering is not guaranteed by anything other than these primitives, 46 - * not even by data dependencies. See the documentation for 47 - * memory_barrier() for examples and URLs to more information. 48 - * 49 - * For example, the following code would force ordering (the initial 50 - * value of "a" is zero, "b" is one, and "p" is "&a"): 51 - * 52 - * <programlisting> 53 - * CPU 0 CPU 1 54 - * 55 - * b = 2; 56 - * memory_barrier(); 57 - * p = &b; q = p; 58 - * read_barrier_depends(); 59 - * d = *q; 60 - * </programlisting> 61 - * 62 - * because the read of "*q" depends on the read of "p" and these 63 - * two reads are separated by a read_barrier_depends(). However, 64 - * the following code, with the same initial values for "a" and "b": 65 - * 66 - * <programlisting> 67 - * CPU 0 CPU 1 68 - * 69 - * a = 2; 70 - * memory_barrier(); 71 - * b = 3; y = b; 72 - * read_barrier_depends(); 73 - * x = a; 74 - * </programlisting> 75 - * 76 - * does not enforce ordering, since there is no data dependency between 77 - * the read of "a" and the read of "b". Therefore, on some CPUs, such 78 - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() 79 - * in cases like this where there are no data dependencies. 80 - */ 81 - 82 - #define read_barrier_depends() do { } while (0) 83 - 84 - #define __sync() __insn_mf() 85 - 86 - #if CHIP_HAS_SPLIT_CYCLE() 87 - #define get_cycles_low() __insn_mfspr(SPR_CYCLE_LOW) 88 - #else 89 - #define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */ 90 - #endif 91 - 92 - #if !CHIP_HAS_MF_WAITS_FOR_VICTIMS() 93 - #include <hv/syscall_public.h> 94 - /* 95 - * Issue an uncacheable load to each memory controller, then 96 - * wait until those loads have completed. 97 - */ 98 - static inline void __mb_incoherent(void) 99 - { 100 - long clobber_r10; 101 - asm volatile("swint2" 102 - : "=R10" (clobber_r10) 103 - : "R10" (HV_SYS_fence_incoherent) 104 - : "r0", "r1", "r2", "r3", "r4", 105 - "r5", "r6", "r7", "r8", "r9", 106 - "r11", "r12", "r13", "r14", 107 - "r15", "r16", "r17", "r18", "r19", 108 - "r20", "r21", "r22", "r23", "r24", 109 - "r25", "r26", "r27", "r28", "r29"); 110 - } 111 - #endif 112 - 113 - /* Fence to guarantee visibility of stores to incoherent memory. */ 114 - static inline void 115 - mb_incoherent(void) 116 - { 117 - __insn_mf(); 118 - 119 - #if !CHIP_HAS_MF_WAITS_FOR_VICTIMS() 120 - { 121 - #if CHIP_HAS_TILE_WRITE_PENDING() 122 - const unsigned long WRITE_TIMEOUT_CYCLES = 400; 123 - unsigned long start = get_cycles_low(); 124 - do { 125 - if (__insn_mfspr(SPR_TILE_WRITE_PENDING) == 0) 126 - return; 127 - } while ((get_cycles_low() - start) < WRITE_TIMEOUT_CYCLES); 128 - #endif /* CHIP_HAS_TILE_WRITE_PENDING() */ 129 - (void) __mb_incoherent(); 130 - } 131 - #endif /* CHIP_HAS_MF_WAITS_FOR_VICTIMS() */ 132 - } 133 - 134 - #define fast_wmb() __sync() 135 - #define fast_rmb() __sync() 136 - #define fast_mb() __sync() 137 - #define fast_iob() mb_incoherent() 138 - 139 - #define wmb() fast_wmb() 140 - #define rmb() fast_rmb() 141 - #define mb() fast_mb() 142 - #define iob() fast_iob() 143 - 144 - #ifdef CONFIG_SMP 145 - #define smp_mb() mb() 146 - #define smp_rmb() rmb() 147 - #define smp_wmb() wmb() 148 - #define smp_read_barrier_depends() read_barrier_depends() 149 - #else 150 - #define smp_mb() barrier() 151 - #define smp_rmb() barrier() 152 - #define smp_wmb() barrier() 153 - #define smp_read_barrier_depends() do { } while (0) 154 - #endif 155 - 156 - #define set_mb(var, value) \ 157 - do { var = value; mb(); } while (0) 158 - 159 - /* 160 - * Pause the DMA engine and static network before task switching. 161 - */ 162 - #define prepare_arch_switch(next) _prepare_arch_switch(next) 163 - void _prepare_arch_switch(struct task_struct *next); 164 - 165 - 166 - /* 167 - * switch_to(n) should switch tasks to task nr n, first 168 - * checking that n isn't the current task, in which case it does nothing. 169 - * The number of callee-saved registers saved on the kernel stack 170 - * is defined here for use in copy_thread() and must agree with __switch_to(). 171 - */ 172 - #endif /* !__ASSEMBLY__ */ 173 - #define CALLEE_SAVED_FIRST_REG 30 174 - #define CALLEE_SAVED_REGS_COUNT 24 /* r30 to r52, plus an empty to align */ 175 - #ifndef __ASSEMBLY__ 176 - struct task_struct; 177 - #define switch_to(prev, next, last) ((last) = _switch_to((prev), (next))) 178 - extern struct task_struct *_switch_to(struct task_struct *prev, 179 - struct task_struct *next); 180 - 181 - /* Helper function for _switch_to(). */ 182 - extern struct task_struct *__switch_to(struct task_struct *prev, 183 - struct task_struct *next, 184 - unsigned long new_system_save_k_0); 185 - 186 - /* Address that switched-away from tasks are at. */ 187 - extern unsigned long get_switch_to_pc(void); 188 - 189 - /* 190 - * On SMP systems, when the scheduler does migration-cost autodetection, 191 - * it needs a way to flush as much of the CPU's caches as possible: 192 - * 193 - * TODO: fill this in! 194 - */ 195 - static inline void sched_cacheflush(void) 196 - { 197 - } 198 - 199 - #define arch_align_stack(x) (x) 200 - 201 - /* 202 - * Is the kernel doing fixups of unaligned accesses? If <0, no kernel 203 - * intervention occurs and SIGBUS is delivered with no data address 204 - * info. If 0, the kernel single-steps the instruction to discover 205 - * the data address to provide with the SIGBUS. If 1, the kernel does 206 - * a fixup. 207 - */ 208 - extern int unaligned_fixup; 209 - 210 - /* Is the kernel printing on each unaligned fixup? */ 211 - extern int unaligned_printk; 212 - 213 - /* Number of unaligned fixups performed */ 214 - extern unsigned int unaligned_fixup_count; 215 - 216 - /* Init-time routine to do tile-specific per-cpu setup. */ 217 - void setup_cpu(int boot); 218 - 219 - /* User-level DMA management functions */ 220 - void grant_dma_mpls(void); 221 - void restrict_dma_mpls(void); 222 - 223 - #ifdef CONFIG_HARDWALL 224 - /* User-level network management functions */ 225 - void reset_network_state(void); 226 - void grant_network_mpls(void); 227 - void restrict_network_mpls(void); 228 - int hardwall_deactivate(struct task_struct *task); 229 - 230 - /* Hook hardwall code into changes in affinity. */ 231 - #define arch_set_cpus_allowed(p, new_mask) do { \ 232 - if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \ 233 - hardwall_deactivate(p); \ 234 - } while (0) 235 - #endif 236 - 237 - /* 238 - * Kernel threads can check to see if they need to migrate their 239 - * stack whenever they return from a context switch; for user 240 - * threads, we defer until they are returning to user-space. 241 - */ 242 - #define finish_arch_switch(prev) do { \ 243 - if (unlikely((prev)->state == TASK_DEAD)) \ 244 - __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \ 245 - ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \ 246 - __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \ 247 - (current->pid << _SIM_CONTROL_OPERATOR_BITS)); \ 248 - if (current->mm == NULL && !kstack_hash && \ 249 - current_thread_info()->homecache_cpu != smp_processor_id()) \ 250 - homecache_migrate_kthread(); \ 251 - } while (0) 252 - 253 - /* Support function for forking a new task. */ 254 - void ret_from_fork(void); 255 - 256 - /* Called from ret_from_fork() when a new process starts up. */ 257 - struct task_struct *sim_notify_fork(struct task_struct *prev); 258 - 259 - #endif /* !__ASSEMBLY__ */ 260 - 261 - #endif /* _ASM_TILE_SYSTEM_H */ 1 + /* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */ 2 + #include <asm/barrier.h> 3 + #include <asm/exec.h> 4 + #include <asm/switch_to.h>
+2
arch/tile/include/asm/timex.h
··· 29 29 30 30 #if CHIP_HAS_SPLIT_CYCLE() 31 31 cycles_t get_cycles(void); 32 + #define get_cycles_low() __insn_mfspr(SPR_CYCLE_LOW) 32 33 #else 33 34 static inline cycles_t get_cycles(void) 34 35 { 35 36 return __insn_mfspr(SPR_CYCLE); 36 37 } 38 + #define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */ 37 39 #endif 38 40 39 41 cycles_t get_clock_rate(void);
+15
arch/tile/include/asm/unaligned.h
··· 21 21 #define get_unaligned __get_unaligned_le 22 22 #define put_unaligned __put_unaligned_le 23 23 24 + /* 25 + * Is the kernel doing fixups of unaligned accesses? If <0, no kernel 26 + * intervention occurs and SIGBUS is delivered with no data address 27 + * info. If 0, the kernel single-steps the instruction to discover 28 + * the data address to provide with the SIGBUS. If 1, the kernel does 29 + * a fixup. 30 + */ 31 + extern int unaligned_fixup; 32 + 33 + /* Is the kernel printing on each unaligned fixup? */ 34 + extern int unaligned_printk; 35 + 36 + /* Number of unaligned fixups performed */ 37 + extern unsigned int unaligned_fixup_count; 38 + 24 39 #endif /* _ASM_TILE_UNALIGNED_H */
+1
arch/tile/kernel/early_printk.c
··· 16 16 #include <linux/kernel.h> 17 17 #include <linux/init.h> 18 18 #include <linux/string.h> 19 + #include <linux/irqflags.h> 19 20 #include <asm/setup.h> 20 21 #include <hv/hypervisor.h> 21 22
+1
arch/tile/kernel/proc.c
··· 23 23 #include <linux/sysctl.h> 24 24 #include <linux/hardirq.h> 25 25 #include <linux/mman.h> 26 + #include <asm/unaligned.h> 26 27 #include <asm/pgtable.h> 27 28 #include <asm/processor.h> 28 29 #include <asm/sections.h>
+2 -1
arch/tile/kernel/process.c
··· 27 27 #include <linux/kernel.h> 28 28 #include <linux/tracehook.h> 29 29 #include <linux/signal.h> 30 - #include <asm/system.h> 31 30 #include <asm/stack.h> 32 31 #include <asm/homecache.h> 33 32 #include <asm/syscalls.h> 34 33 #include <asm/traps.h> 34 + #include <asm/setup.h> 35 35 #ifdef CONFIG_HARDWALL 36 36 #include <asm/hardwall.h> 37 37 #endif 38 38 #include <arch/chip.h> 39 39 #include <arch/abi.h> 40 + #include <arch/sim_def.h> 40 41 41 42 42 43 /*
+1 -1
arch/tile/kernel/regs_32.S
··· 13 13 */ 14 14 15 15 #include <linux/linkage.h> 16 - #include <asm/system.h> 17 16 #include <asm/ptrace.h> 18 17 #include <asm/asm-offsets.h> 19 18 #include <arch/spr_def.h> 20 19 #include <asm/processor.h> 20 + #include <asm/switch_to.h> 21 21 22 22 /* 23 23 * See <asm/system.h>; called with prev and next task_struct pointers.
+1 -1
arch/tile/kernel/regs_64.S
··· 13 13 */ 14 14 15 15 #include <linux/linkage.h> 16 - #include <asm/system.h> 17 16 #include <asm/ptrace.h> 18 17 #include <asm/asm-offsets.h> 19 18 #include <arch/spr_def.h> 20 19 #include <asm/processor.h> 20 + #include <asm/switch_to.h> 21 21 22 22 /* 23 23 * See <asm/system.h>; called with prev and next task_struct pointers.
+1
arch/tile/kernel/single_step.c
··· 25 25 #include <linux/types.h> 26 26 #include <linux/err.h> 27 27 #include <asm/cacheflush.h> 28 + #include <asm/unaligned.h> 28 29 #include <arch/abi.h> 29 30 #include <arch/opcode.h> 30 31
+1
arch/tile/kernel/traps.c
··· 21 21 #include <linux/ptrace.h> 22 22 #include <asm/stack.h> 23 23 #include <asm/traps.h> 24 + #include <asm/setup.h> 24 25 25 26 #include <arch/interrupts.h> 26 27 #include <arch/spr_def.h>
+1
arch/tile/mm/elf.c
··· 21 21 #include <asm/pgtable.h> 22 22 #include <asm/pgalloc.h> 23 23 #include <asm/sections.h> 24 + #include <arch/sim_def.h> 24 25 25 26 /* Notify a running simulator, if any, that an exec just occurred. */ 26 27 static void sim_notify_exec(const char *binary_name)
-1
arch/tile/mm/fault.c
··· 35 35 #include <linux/syscalls.h> 36 36 #include <linux/uaccess.h> 37 37 38 - #include <asm/system.h> 39 38 #include <asm/pgalloc.h> 40 39 #include <asm/sections.h> 41 40 #include <asm/traps.h>
-1
arch/tile/mm/init.c
··· 38 38 #include <linux/uaccess.h> 39 39 #include <asm/mmu_context.h> 40 40 #include <asm/processor.h> 41 - #include <asm/system.h> 42 41 #include <asm/pgtable.h> 43 42 #include <asm/pgalloc.h> 44 43 #include <asm/dma.h>
-1
arch/tile/mm/pgtable.c
··· 27 27 #include <linux/vmalloc.h> 28 28 #include <linux/smp.h> 29 29 30 - #include <asm/system.h> 31 30 #include <asm/pgtable.h> 32 31 #include <asm/pgalloc.h> 33 32 #include <asm/fixmap.h>