Merge master.kernel.org:/home/rmk/linux-2.6-arm

* master.kernel.org:/home/rmk/linux-2.6-arm:
ARM: 5974/1: arm/mach-at91 Makefile: remove two blanks.
ARM: 6052/1: kdump: make kexec work in interrupt context
ARM: 6051/1: VFP: preserve the HW context when calling signal handlers
ARM: 6050/1: VFP: fix the SMP versions of vfp_{sync,flush}_hwstate
ARM: 6007/1: fix highmem with VIPT cache and DMA
ARM: 5975/1: AT91 slow-clock suspend: don't wait when turning PLLs off

+251 -68
+14 -1
arch/arm/include/asm/highmem.h
··· 11 12 #define kmap_prot PAGE_KERNEL 13 14 - #define flush_cache_kmaps() flush_cache_all() 15 16 extern pte_t *pkmap_page_table; 17 ··· 25 extern void *kmap_high_get(struct page *page); 26 extern void kunmap_high(struct page *page); 27 28 extern void *kmap(struct page *page); 29 extern void kunmap(struct page *page); 30 extern void *kmap_atomic(struct page *page, enum km_type type); 31 extern void kunmap_atomic(void *kvaddr, enum km_type type); 32 extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 33 extern struct page *kmap_atomic_to_page(const void *ptr); 34 35 #endif
··· 11 12 #define kmap_prot PAGE_KERNEL 13 14 + #define flush_cache_kmaps() \ 15 + do { \ 16 + if (cache_is_vivt()) \ 17 + flush_cache_all(); \ 18 + } while (0) 19 20 extern pte_t *pkmap_page_table; 21 ··· 21 extern void *kmap_high_get(struct page *page); 22 extern void kunmap_high(struct page *page); 23 24 + extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte); 25 + extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte); 26 + 27 + /* 28 + * The following functions are already defined by <linux/highmem.h> 29 + * when CONFIG_HIGHMEM is not set. 30 + */ 31 + #ifdef CONFIG_HIGHMEM 32 extern void *kmap(struct page *page); 33 extern void kunmap(struct page *page); 34 extern void *kmap_atomic(struct page *page, enum km_type type); 35 extern void kunmap_atomic(void *kvaddr, enum km_type type); 36 extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 37 extern struct page *kmap_atomic_to_page(const void *ptr); 38 + #endif 39 40 #endif
+1
arch/arm/include/asm/kmap_types.h
··· 18 KM_IRQ1, 19 KM_SOFTIRQ0, 20 KM_SOFTIRQ1, 21 KM_L2_CACHE, 22 KM_TYPE_NR 23 };
··· 18 KM_IRQ1, 19 KM_SOFTIRQ0, 20 KM_SOFTIRQ1, 21 + KM_L1_CACHE, 22 KM_L2_CACHE, 23 KM_TYPE_NR 24 };
+11 -12
arch/arm/include/asm/ucontext.h
··· 59 #endif /* CONFIG_IWMMXT */ 60 61 #ifdef CONFIG_VFP 62 - #if __LINUX_ARM_ARCH__ < 6 63 - /* For ARM pre-v6, we use fstmiax and fldmiax. This adds one extra 64 - * word after the registers, and a word of padding at the end for 65 - * alignment. */ 66 #define VFP_MAGIC 0x56465001 67 - #define VFP_STORAGE_SIZE 152 68 - #else 69 - #define VFP_MAGIC 0x56465002 70 - #define VFP_STORAGE_SIZE 144 71 - #endif 72 73 struct vfp_sigframe 74 { 75 unsigned long magic; 76 unsigned long size; 77 - union vfp_state storage; 78 - }; 79 #endif /* CONFIG_VFP */ 80 81 /* ··· 90 #ifdef CONFIG_IWMMXT 91 struct iwmmxt_sigframe iwmmxt; 92 #endif 93 - #if 0 && defined CONFIG_VFP /* Not yet saved. */ 94 struct vfp_sigframe vfp; 95 #endif 96 /* Something that isn't a valid magic number for any coprocessor. */
··· 59 #endif /* CONFIG_IWMMXT */ 60 61 #ifdef CONFIG_VFP 62 #define VFP_MAGIC 0x56465001 63 64 struct vfp_sigframe 65 { 66 unsigned long magic; 67 unsigned long size; 68 + struct user_vfp ufp; 69 + struct user_vfp_exc ufp_exc; 70 + } __attribute__((__aligned__(8))); 71 + 72 + /* 73 + * 8 byte for magic and size, 264 byte for ufp, 12 bytes for ufp_exc, 74 + * 4 bytes padding. 75 + */ 76 + #define VFP_STORAGE_SIZE sizeof(struct vfp_sigframe) 77 + 78 #endif /* CONFIG_VFP */ 79 80 /* ··· 91 #ifdef CONFIG_IWMMXT 92 struct iwmmxt_sigframe iwmmxt; 93 #endif 94 + #ifdef CONFIG_VFP 95 struct vfp_sigframe vfp; 96 #endif 97 /* Something that isn't a valid magic number for any coprocessor. */
+11 -1
arch/arm/include/asm/user.h
··· 83 84 /* 85 * User specific VFP registers. If only VFPv2 is present, registers 16 to 31 86 - * are ignored by the ptrace system call. 87 */ 88 struct user_vfp { 89 unsigned long long fpregs[32]; 90 unsigned long fpscr; 91 }; 92 93 #endif /* _ARM_USER_H */
··· 83 84 /* 85 * User specific VFP registers. If only VFPv2 is present, registers 16 to 31 86 + * are ignored by the ptrace system call and the signal handler. 87 */ 88 struct user_vfp { 89 unsigned long long fpregs[32]; 90 unsigned long fpscr; 91 + }; 92 + 93 + /* 94 + * VFP exception registers exposed to user space during signal delivery. 95 + * Fields not relavant to the current VFP architecture are ignored. 96 + */ 97 + struct user_vfp_exc { 98 + unsigned long fpexc; 99 + unsigned long fpinst; 100 + unsigned long fpinst2; 101 }; 102 103 #endif /* _ARM_USER_H */
+89 -4
arch/arm/kernel/signal.c
··· 18 #include <asm/cacheflush.h> 19 #include <asm/ucontext.h> 20 #include <asm/unistd.h> 21 22 #include "ptrace.h" 23 #include "signal.h" ··· 176 177 #endif 178 179 /* 180 * Do a signal return; undo the signal stack. These are aligned to 64-bit. 181 */ ··· 318 err |= restore_iwmmxt_context(&aux->iwmmxt); 319 #endif 320 #ifdef CONFIG_VFP 321 - // if (err == 0) 322 - // err |= vfp_restore_state(&sf->aux.vfp); 323 #endif 324 325 return err; ··· 433 err |= preserve_iwmmxt_context(&aux->iwmmxt); 434 #endif 435 #ifdef CONFIG_VFP 436 - // if (err == 0) 437 - // err |= vfp_save_state(&sf->aux.vfp); 438 #endif 439 __put_user_error(0, &aux->end_magic, err); 440
··· 18 #include <asm/cacheflush.h> 19 #include <asm/ucontext.h> 20 #include <asm/unistd.h> 21 + #include <asm/vfp.h> 22 23 #include "ptrace.h" 24 #include "signal.h" ··· 175 176 #endif 177 178 + #ifdef CONFIG_VFP 179 + 180 + static int preserve_vfp_context(struct vfp_sigframe __user *frame) 181 + { 182 + struct thread_info *thread = current_thread_info(); 183 + struct vfp_hard_struct *h = &thread->vfpstate.hard; 184 + const unsigned long magic = VFP_MAGIC; 185 + const unsigned long size = VFP_STORAGE_SIZE; 186 + int err = 0; 187 + 188 + vfp_sync_hwstate(thread); 189 + __put_user_error(magic, &frame->magic, err); 190 + __put_user_error(size, &frame->size, err); 191 + 192 + /* 193 + * Copy the floating point registers. There can be unused 194 + * registers see asm/hwcap.h for details. 195 + */ 196 + err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs, 197 + sizeof(h->fpregs)); 198 + /* 199 + * Copy the status and control register. 200 + */ 201 + __put_user_error(h->fpscr, &frame->ufp.fpscr, err); 202 + 203 + /* 204 + * Copy the exception registers. 205 + */ 206 + __put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err); 207 + __put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); 208 + __put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); 209 + 210 + return err ? -EFAULT : 0; 211 + } 212 + 213 + static int restore_vfp_context(struct vfp_sigframe __user *frame) 214 + { 215 + struct thread_info *thread = current_thread_info(); 216 + struct vfp_hard_struct *h = &thread->vfpstate.hard; 217 + unsigned long magic; 218 + unsigned long size; 219 + unsigned long fpexc; 220 + int err = 0; 221 + 222 + __get_user_error(magic, &frame->magic, err); 223 + __get_user_error(size, &frame->size, err); 224 + 225 + if (err) 226 + return -EFAULT; 227 + if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) 228 + return -EINVAL; 229 + 230 + /* 231 + * Copy the floating point registers. There can be unused 232 + * registers see asm/hwcap.h for details. 233 + */ 234 + err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs, 235 + sizeof(h->fpregs)); 236 + /* 237 + * Copy the status and control register. 238 + */ 239 + __get_user_error(h->fpscr, &frame->ufp.fpscr, err); 240 + 241 + /* 242 + * Sanitise and restore the exception registers. 243 + */ 244 + __get_user_error(fpexc, &frame->ufp_exc.fpexc, err); 245 + /* Ensure the VFP is enabled. */ 246 + fpexc |= FPEXC_EN; 247 + /* Ensure FPINST2 is invalid and the exception flag is cleared. */ 248 + fpexc &= ~(FPEXC_EX | FPEXC_FP2V); 249 + h->fpexc = fpexc; 250 + 251 + __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); 252 + __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); 253 + 254 + if (!err) 255 + vfp_flush_hwstate(thread); 256 + 257 + return err ? -EFAULT : 0; 258 + } 259 + 260 + #endif 261 + 262 /* 263 * Do a signal return; undo the signal stack. These are aligned to 64-bit. 264 */ ··· 233 err |= restore_iwmmxt_context(&aux->iwmmxt); 234 #endif 235 #ifdef CONFIG_VFP 236 + if (err == 0) 237 + err |= restore_vfp_context(&aux->vfp); 238 #endif 239 240 return err; ··· 348 err |= preserve_iwmmxt_context(&aux->iwmmxt); 349 #endif 350 #ifdef CONFIG_VFP 351 + if (err == 0) 352 + err |= preserve_vfp_context(&aux->vfp); 353 #endif 354 __put_user_error(0, &aux->end_magic, err); 355
+2 -2
arch/arm/mach-at91/Makefile
··· 16 obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o 17 obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o 18 obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o 19 - obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o 20 - obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o 21 obj-$(CONFIG_ARCH_AT91CAP9) += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o 22 obj-$(CONFIG_ARCH_AT572D940HF) += at572d940hf.o at91sam926x_time.o at572d940hf_devices.o sam9_smc.o 23 obj-$(CONFIG_ARCH_AT91X40) += at91x40.o at91x40_time.o
··· 16 obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o 17 obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o 18 obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o 19 + obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o 20 + obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o 21 obj-$(CONFIG_ARCH_AT91CAP9) += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o 22 obj-$(CONFIG_ARCH_AT572D940HF) += at572d940hf.o at91sam926x_time.o at572d940hf_devices.o sam9_smc.o 23 obj-$(CONFIG_ARCH_AT91X40) += at91x40.o at91x40_time.o
-4
arch/arm/mach-at91/pm_slowclock.S
··· 175 orr r3, r3, #(1 << 29) /* bit 29 always set */ 176 str r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)] 177 178 - wait_pllalock 179 - 180 /* Save PLLB setting and disable it */ 181 ldr r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] 182 str r3, .saved_pllbr 183 184 mov r3, #AT91_PMC_PLLCOUNT 185 str r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] 186 - 187 - wait_pllblock 188 189 /* Turn off the main oscillator */ 190 ldr r3, [r1, #(AT91_CKGR_MOR - AT91_PMC)]
··· 175 orr r3, r3, #(1 << 29) /* bit 29 always set */ 176 str r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)] 177 178 /* Save PLLB setting and disable it */ 179 ldr r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] 180 str r3, .saved_pllbr 181 182 mov r3, #AT91_PMC_PLLCOUNT 183 str r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] 184 185 /* Turn off the main oscillator */ 186 ldr r3, [r1, #(AT91_CKGR_MOR - AT91_PMC)]
+1 -8
arch/arm/mm/copypage-v6.c
··· 41 kfrom = kmap_atomic(from, KM_USER0); 42 kto = kmap_atomic(to, KM_USER1); 43 copy_page(kto, kfrom); 44 - #ifdef CONFIG_HIGHMEM 45 - /* 46 - * kmap_atomic() doesn't set the page virtual address, and 47 - * kunmap_atomic() takes care of cache flushing already. 48 - */ 49 - if (page_address(to) != NULL) 50 - #endif 51 - __cpuc_flush_dcache_area(kto, PAGE_SIZE); 52 kunmap_atomic(kto, KM_USER1); 53 kunmap_atomic(kfrom, KM_USER0); 54 }
··· 41 kfrom = kmap_atomic(from, KM_USER0); 42 kto = kmap_atomic(to, KM_USER1); 43 copy_page(kto, kfrom); 44 + __cpuc_flush_dcache_area(kto, PAGE_SIZE); 45 kunmap_atomic(kto, KM_USER1); 46 kunmap_atomic(kfrom, KM_USER0); 47 }
+5
arch/arm/mm/dma-mapping.c
··· 464 vaddr += offset; 465 op(vaddr, len, dir); 466 kunmap_high(page); 467 } 468 } else { 469 vaddr = page_address(page) + offset;
··· 464 vaddr += offset; 465 op(vaddr, len, dir); 466 kunmap_high(page); 467 + } else if (cache_is_vipt()) { 468 + pte_t saved_pte; 469 + vaddr = kmap_high_l1_vipt(page, &saved_pte); 470 + op(vaddr + offset, len, dir); 471 + kunmap_high_l1_vipt(page, saved_pte); 472 } 473 } else { 474 vaddr = page_address(page) + offset;
+15 -10
arch/arm/mm/flush.c
··· 13 14 #include <asm/cacheflush.h> 15 #include <asm/cachetype.h> 16 #include <asm/smp_plat.h> 17 #include <asm/system.h> 18 #include <asm/tlbflush.h> ··· 153 154 void __flush_dcache_page(struct address_space *mapping, struct page *page) 155 { 156 - void *addr = page_address(page); 157 - 158 /* 159 * Writeback any data associated with the kernel mapping of this 160 * page. This ensures that data in the physical page is mutually 161 * coherent with the kernels mapping. 162 */ 163 - #ifdef CONFIG_HIGHMEM 164 - /* 165 - * kmap_atomic() doesn't set the page virtual address, and 166 - * kunmap_atomic() takes care of cache flushing already. 167 - */ 168 - if (addr) 169 - #endif 170 - __cpuc_flush_dcache_area(addr, PAGE_SIZE); 171 172 /* 173 * If this is a page cache page, and we have an aliasing VIPT cache,
··· 13 14 #include <asm/cacheflush.h> 15 #include <asm/cachetype.h> 16 + #include <asm/highmem.h> 17 #include <asm/smp_plat.h> 18 #include <asm/system.h> 19 #include <asm/tlbflush.h> ··· 152 153 void __flush_dcache_page(struct address_space *mapping, struct page *page) 154 { 155 /* 156 * Writeback any data associated with the kernel mapping of this 157 * page. This ensures that data in the physical page is mutually 158 * coherent with the kernels mapping. 159 */ 160 + if (!PageHighMem(page)) { 161 + __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); 162 + } else { 163 + void *addr = kmap_high_get(page); 164 + if (addr) { 165 + __cpuc_flush_dcache_area(addr, PAGE_SIZE); 166 + kunmap_high(page); 167 + } else if (cache_is_vipt()) { 168 + pte_t saved_pte; 169 + addr = kmap_high_l1_vipt(page, &saved_pte); 170 + __cpuc_flush_dcache_area(addr, PAGE_SIZE); 171 + kunmap_high_l1_vipt(page, saved_pte); 172 + } 173 + } 174 175 /* 176 * If this is a page cache page, and we have an aliasing VIPT cache,
+86 -1
arch/arm/mm/highmem.c
··· 79 unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); 80 81 if (kvaddr >= (void *)FIXADDR_START) { 82 - __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); 83 #ifdef CONFIG_DEBUG_HIGHMEM 84 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 85 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); ··· 125 pte = TOP_PTE(vaddr); 126 return pte_page(*pte); 127 }
··· 79 unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); 80 81 if (kvaddr >= (void *)FIXADDR_START) { 82 + if (cache_is_vivt()) 83 + __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); 84 #ifdef CONFIG_DEBUG_HIGHMEM 85 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 86 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); ··· 124 pte = TOP_PTE(vaddr); 125 return pte_page(*pte); 126 } 127 + 128 + #ifdef CONFIG_CPU_CACHE_VIPT 129 + 130 + #include <linux/percpu.h> 131 + 132 + /* 133 + * The VIVT cache of a highmem page is always flushed before the page 134 + * is unmapped. Hence unmapped highmem pages need no cache maintenance 135 + * in that case. 136 + * 137 + * However unmapped pages may still be cached with a VIPT cache, and 138 + * it is not possible to perform cache maintenance on them using physical 139 + * addresses unfortunately. So we have no choice but to set up a temporary 140 + * virtual mapping for that purpose. 141 + * 142 + * Yet this VIPT cache maintenance may be triggered from DMA support 143 + * functions which are possibly called from interrupt context. As we don't 144 + * want to keep interrupt disabled all the time when such maintenance is 145 + * taking place, we therefore allow for some reentrancy by preserving and 146 + * restoring the previous fixmap entry before the interrupted context is 147 + * resumed. If the reentrancy depth is 0 then there is no need to restore 148 + * the previous fixmap, and leaving the current one in place allow it to 149 + * be reused the next time without a TLB flush (common with DMA). 150 + */ 151 + 152 + static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth); 153 + 154 + void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) 155 + { 156 + unsigned int idx, cpu = smp_processor_id(); 157 + int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); 158 + unsigned long vaddr, flags; 159 + pte_t pte, *ptep; 160 + 161 + idx = KM_L1_CACHE + KM_TYPE_NR * cpu; 162 + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 163 + ptep = TOP_PTE(vaddr); 164 + pte = mk_pte(page, kmap_prot); 165 + 166 + if (!in_interrupt()) 167 + preempt_disable(); 168 + 169 + raw_local_irq_save(flags); 170 + (*depth)++; 171 + if (pte_val(*ptep) == pte_val(pte)) { 172 + *saved_pte = pte; 173 + } else { 174 + *saved_pte = *ptep; 175 + set_pte_ext(ptep, pte, 0); 176 + local_flush_tlb_kernel_page(vaddr); 177 + } 178 + raw_local_irq_restore(flags); 179 + 180 + return (void *)vaddr; 181 + } 182 + 183 + void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte) 184 + { 185 + unsigned int idx, cpu = smp_processor_id(); 186 + int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); 187 + unsigned long vaddr, flags; 188 + pte_t pte, *ptep; 189 + 190 + idx = KM_L1_CACHE + KM_TYPE_NR * cpu; 191 + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 192 + ptep = TOP_PTE(vaddr); 193 + pte = mk_pte(page, kmap_prot); 194 + 195 + BUG_ON(pte_val(*ptep) != pte_val(pte)); 196 + BUG_ON(*depth <= 0); 197 + 198 + raw_local_irq_save(flags); 199 + (*depth)--; 200 + if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) { 201 + set_pte_ext(ptep, saved_pte, 0); 202 + local_flush_tlb_kernel_page(vaddr); 203 + } 204 + raw_local_irq_restore(flags); 205 + 206 + if (!in_interrupt()) 207 + preempt_enable(); 208 + } 209 + 210 + #endif /* CONFIG_CPU_CACHE_VIPT */
+6 -4
arch/arm/mm/mmu.c
··· 1054 pgd_t *pgd; 1055 int i; 1056 1057 - if (current->mm && current->mm->pgd) 1058 - pgd = current->mm->pgd; 1059 - else 1060 - pgd = init_mm.pgd; 1061 1062 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; 1063 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
··· 1054 pgd_t *pgd; 1055 int i; 1056 1057 + /* 1058 + * We need to access to user-mode page tables here. For kernel threads 1059 + * we don't have any user-mode mappings so we use the context that we 1060 + * "borrowed". 1061 + */ 1062 + pgd = current->active_mm->pgd; 1063 1064 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; 1065 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
+10 -21
arch/arm/vfp/vfpmodule.c
··· 428 static inline void vfp_pm_init(void) { } 429 #endif /* CONFIG_PM */ 430 431 - /* 432 - * Synchronise the hardware VFP state of a thread other than current with the 433 - * saved one. This function is used by the ptrace mechanism. 434 - */ 435 - #ifdef CONFIG_SMP 436 - void vfp_sync_hwstate(struct thread_info *thread) 437 - { 438 - } 439 - 440 - void vfp_flush_hwstate(struct thread_info *thread) 441 - { 442 - /* 443 - * On SMP systems, the VFP state is automatically saved at every 444 - * context switch. We mark the thread VFP state as belonging to a 445 - * non-existent CPU so that the saved one will be reloaded when 446 - * needed. 447 - */ 448 - thread->vfpstate.hard.cpu = NR_CPUS; 449 - } 450 - #else 451 void vfp_sync_hwstate(struct thread_info *thread) 452 { 453 unsigned int cpu = get_cpu(); ··· 470 last_VFP_context[cpu] = NULL; 471 } 472 473 put_cpu(); 474 } 475 - #endif 476 477 #include <linux/smp.h> 478
··· 428 static inline void vfp_pm_init(void) { } 429 #endif /* CONFIG_PM */ 430 431 void vfp_sync_hwstate(struct thread_info *thread) 432 { 433 unsigned int cpu = get_cpu(); ··· 490 last_VFP_context[cpu] = NULL; 491 } 492 493 + #ifdef CONFIG_SMP 494 + /* 495 + * For SMP we still have to take care of the case where the thread 496 + * migrates to another CPU and then back to the original CPU on which 497 + * the last VFP user is still the same thread. Mark the thread VFP 498 + * state as belonging to a non-existent CPU so that the saved one will 499 + * be reloaded in the above case. 500 + */ 501 + thread->vfpstate.hard.cpu = NR_CPUS; 502 + #endif 503 put_cpu(); 504 } 505 506 #include <linux/smp.h> 507