Merge tag 'xtensa-20140830' of git://github.com/czankel/xtensa-linux

Pull Xtensa updates from Chris Zankel:
"Xtensa improvements for 3.17:
- support highmem on cores with aliasing data cache. Enable highmem
on kc705 by default
- simplify addition of new core variants (no need to modify Kconfig /
Makefiles)
- improve robustness of unaligned access handler and its interaction
with window overflow/underflow exception handlers
- deprecate atomic and spill registers syscalls
- clean up Kconfig: remove orphan MATH_EMULATION, sort 'select'
statements
- wire up renameat2 syscall.

Various fixes:
- fix address checks in dma_{alloc,free}_coherent (runtime BUG)
- fix access to THREAD_RA/THREAD_SP/THREAD_DS (debug build breakage)
- fix TLBTEMP_BASE_2 region handling in fast_second_level_miss
(runtime unrecoverable exception)
- fix a6 and a7 handling in fast_syscall_xtensa (runtime userspace
register clobbering)
- fix kernel/user jump out of fast_unaligned (potential runtime
unrecoverabl exception)
- replace termios IOCTL code definitions with constants (userspace
build breakage)"

* tag 'xtensa-20140830' of git://github.com/czankel/xtensa-linux: (25 commits)
xtensa: deprecate fast_xtensa and fast_spill_registers syscalls
xtensa: don't allow overflow/underflow on unaligned stack
xtensa: fix a6 and a7 handling in fast_syscall_xtensa
xtensa: allow single-stepping through unaligned load/store
xtensa: move invalid unaligned instruction handler closer to its users
xtensa: make fast_unaligned store restartable
xtensa: add double exception fixup handler for fast_unaligned
xtensa: fix kernel/user jump out of fast_unaligned
xtensa: configure kc705 for highmem
xtensa: support highmem in aliasing cache flushing code
xtensa: support aliasing cache in kmap
xtensa: support aliasing cache in k[un]map_atomic
xtensa: implement clear_user_highpage and copy_user_highpage
xtensa: fix TLBTEMP_BASE_2 region handling in fast_second_level_miss
xtensa: allow fixmap and kmap span more than one page table
xtensa: make fixmap region addressing grow with index
xtensa: fix access to THREAD_RA/THREAD_SP/THREAD_DS
xtensa: add renameat2 syscall
xtensa: fix address checks in dma_{alloc,free}_coherent
xtensa: replace IOCTL code definitions with constants
...

+75 -17
arch/xtensa/Kconfig
··· 4 4 config XTENSA 5 5 def_bool y 6 6 select ARCH_WANT_FRAME_POINTERS 7 - select HAVE_IDE 8 - select GENERIC_ATOMIC64 9 - select GENERIC_CLOCKEVENTS 10 - select VIRT_TO_BUS 11 - select GENERIC_IRQ_SHOW 12 - select GENERIC_SCHED_CLOCK 13 - select MODULES_USE_ELF_RELA 14 - select GENERIC_PCI_IOMAP 15 7 select ARCH_WANT_IPC_PARSE_VERSION 16 8 select ARCH_WANT_OPTIONAL_GPIOLIB 17 9 select BUILDTIME_EXTABLE_SORT 18 10 select CLONE_BACKWARDS 19 - select IRQ_DOMAIN 20 - select HAVE_OPROFILE 11 + select COMMON_CLK 12 + select GENERIC_ATOMIC64 13 + select GENERIC_CLOCKEVENTS 14 + select GENERIC_IRQ_SHOW 15 + select GENERIC_PCI_IOMAP 16 + select GENERIC_SCHED_CLOCK 21 17 select HAVE_FUNCTION_TRACER 22 18 select HAVE_IRQ_TIME_ACCOUNTING 19 + select HAVE_OPROFILE 23 20 select HAVE_PERF_EVENTS 24 - select COMMON_CLK 21 + select IRQ_DOMAIN 22 + select MODULES_USE_ELF_RELA 23 + select VIRT_TO_BUS 25 24 help 26 25 Xtensa processors are 32-bit RISC machines designed by Tensilica 27 26 primarily for embedded systems. These processors are both ··· 61 62 def_bool y 62 63 63 64 config MMU 64 - def_bool n 65 + bool 66 + default n if !XTENSA_VARIANT_CUSTOM 67 + default XTENSA_VARIANT_MMU if XTENSA_VARIANT_CUSTOM 65 68 66 69 config VARIANT_IRQ_SWITCH 67 70 def_bool n ··· 103 102 select VARIANT_IRQ_SWITCH 104 103 select ARCH_REQUIRE_GPIOLIB 105 104 select XTENSA_CALIBRATE_CCOUNT 105 + 106 + config XTENSA_VARIANT_CUSTOM 107 + bool "Custom Xtensa processor configuration" 108 + select MAY_HAVE_SMP 109 + select HAVE_XTENSA_GPIO32 110 + help 111 + Select this variant to use a custom Xtensa processor configuration. 112 + You will be prompted for a processor variant CORENAME. 106 113 endchoice 114 + 115 + config XTENSA_VARIANT_CUSTOM_NAME 116 + string "Xtensa Processor Custom Core Variant Name" 117 + depends on XTENSA_VARIANT_CUSTOM 118 + help 119 + Provide the name of a custom Xtensa processor variant. 120 + This CORENAME selects arch/xtensa/variant/CORENAME. 121 + Dont forget you have to select MMU if you have one. 122 + 123 + config XTENSA_VARIANT_NAME 124 + string 125 + default "dc232b" if XTENSA_VARIANT_DC232B 126 + default "dc233c" if XTENSA_VARIANT_DC233C 127 + default "fsf" if XTENSA_VARIANT_FSF 128 + default "s6000" if XTENSA_VARIANT_S6000 129 + default XTENSA_VARIANT_CUSTOM_NAME if XTENSA_VARIANT_CUSTOM 130 + 131 + config XTENSA_VARIANT_MMU 132 + bool "Core variant has a Full MMU (TLB, Pages, Protection, etc)" 133 + depends on XTENSA_VARIANT_CUSTOM 134 + default y 135 + help 136 + Build a Conventional Kernel with full MMU support, 137 + ie: it supports a TLB with auto-loading, page protection. 107 138 108 139 config XTENSA_UNALIGNED_USER 109 140 bool "Unaligned memory access in use space" ··· 189 156 190 157 Say N if you want to disable CPU hotplug. 191 158 192 - config MATH_EMULATION 193 - bool "Math emulation" 194 - help 195 - Can we use information of configuration file? 196 - 197 159 config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX 198 160 bool "Initialize Xtensa MMU inside the Linux kernel code" 161 + depends on MMU 199 162 default y 200 163 help 201 164 Earlier version initialized the MMU in the exception vector ··· 221 192 222 193 config HIGHMEM 223 194 bool "High Memory Support" 195 + depends on MMU 224 196 help 225 197 Linux can use the full amount of RAM in the system by 226 198 default. However, the default MMUv2 setup only maps the ··· 237 207 N here. 238 208 239 209 If unsure, say Y. 210 + 211 + config FAST_SYSCALL_XTENSA 212 + bool "Enable fast atomic syscalls" 213 + default n 214 + help 215 + fast_syscall_xtensa is a syscall that can make atomic operations 216 + on UP kernel when processor has no s32c1i support. 217 + 218 + This syscall is deprecated. It may have issues when called with 219 + invalid arguments. It is provided only for backwards compatibility. 220 + Only enable it if your userspace software requires it. 221 + 222 + If unsure, say N. 223 + 224 + config FAST_SYSCALL_SPILL_REGISTERS 225 + bool "Enable spill registers syscall" 226 + default n 227 + help 228 + fast_syscall_spill_registers is a syscall that spills all active 229 + register windows of a calling userspace task onto its stack. 230 + 231 + This syscall is deprecated. It may have issues when called with 232 + invalid arguments. It is provided only for backwards compatibility. 233 + Only enable it if your userspace software requires it. 234 + 235 + If unsure, say N. 240 236 241 237 endmenu 242 238 ··· 306 250 307 251 config XTENSA_PLATFORM_XT2000 308 252 bool "XT2000" 253 + select HAVE_IDE 309 254 help 310 255 XT2000 is the name of Tensilica's feature-rich emulation platform. 311 256 This hardware is capable of running a full Linux distribution. 312 257 313 258 config XTENSA_PLATFORM_S6105 314 259 bool "S6105" 260 + select HAVE_IDE 315 261 select SERIAL_CONSOLE 316 262 select NO_IOPORT_MAP 317 263
+2 -5
arch/xtensa/Makefile
··· 4 4 # for more details. 5 5 # 6 6 # Copyright (C) 2001 - 2005 Tensilica Inc. 7 + # Copyright (C) 2014 Cadence Design Systems Inc. 7 8 # 8 9 # This file is included by the global makefile so that you can add your own 9 10 # architecture-specific flags and dependencies. Remember to do have actions ··· 14 13 # Core configuration. 15 14 # (Use VAR=<xtensa_config> to use another default compiler.) 16 15 17 - variant-$(CONFIG_XTENSA_VARIANT_FSF) := fsf 18 - variant-$(CONFIG_XTENSA_VARIANT_DC232B) := dc232b 19 - variant-$(CONFIG_XTENSA_VARIANT_DC233C) := dc233c 20 - variant-$(CONFIG_XTENSA_VARIANT_S6000) := s6000 21 - variant-$(CONFIG_XTENSA_VARIANT_LINUX_CUSTOM) := custom 16 + variant-y := $(patsubst "%",%,$(CONFIG_XTENSA_VARIANT_NAME)) 22 17 23 18 VARIANT = $(variant-y) 24 19 export VARIANT
+4 -1
arch/xtensa/boot/dts/kc705.dts
··· 4 4 5 5 / { 6 6 compatible = "cdns,xtensa-kc705"; 7 + chosen { 8 + bootargs = "earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000"; 9 + }; 7 10 memory@0 { 8 11 device_type = "memory"; 9 - reg = <0x00000000 0x08000000>; 12 + reg = <0x00000000 0x38000000>; 10 13 }; 11 14 };
-1
arch/xtensa/configs/common_defconfig
··· 66 66 CONFIG_MMU=y 67 67 # CONFIG_XTENSA_UNALIGNED_USER is not set 68 68 # CONFIG_PREEMPT is not set 69 - # CONFIG_MATH_EMULATION is not set 70 69 # CONFIG_HIGHMEM is not set 71 70 72 71 #
+1 -2
arch/xtensa/configs/iss_defconfig
··· 146 146 # CONFIG_XTENSA_VARIANT_S6000 is not set 147 147 # CONFIG_XTENSA_UNALIGNED_USER is not set 148 148 # CONFIG_PREEMPT is not set 149 - # CONFIG_MATH_EMULATION is not set 150 149 CONFIG_XTENSA_CALIBRATE_CCOUNT=y 151 150 CONFIG_SERIAL_CONSOLE=y 152 151 CONFIG_XTENSA_ISS_NETWORK=y ··· 307 308 # EEPROM support 308 309 # 309 310 # CONFIG_EEPROM_93CX6 is not set 310 - CONFIG_HAVE_IDE=y 311 + # CONFIG_HAVE_IDE is not set 311 312 # CONFIG_IDE is not set 312 313 313 314 #
-1
arch/xtensa/configs/s6105_defconfig
··· 109 109 CONFIG_XTENSA_VARIANT_S6000=y 110 110 # CONFIG_XTENSA_UNALIGNED_USER is not set 111 111 CONFIG_PREEMPT=y 112 - # CONFIG_MATH_EMULATION is not set 113 112 # CONFIG_HIGHMEM is not set 114 113 CONFIG_XTENSA_CALIBRATE_CCOUNT=y 115 114 CONFIG_SERIAL_CONSOLE=y
+2
arch/xtensa/include/asm/cacheflush.h
··· 37 37 * specials for cache aliasing: 38 38 * 39 39 * __flush_invalidate_dcache_page_alias(vaddr,paddr) 40 + * __invalidate_dcache_page_alias(vaddr,paddr) 40 41 * __invalidate_icache_page_alias(vaddr,paddr) 41 42 */ 42 43 ··· 63 62 64 63 #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE) 65 64 extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long); 65 + extern void __invalidate_dcache_page_alias(unsigned long, unsigned long); 66 66 #else 67 67 static inline void __flush_invalidate_dcache_page_alias(unsigned long virt, 68 68 unsigned long phys) { }
+26 -4
arch/xtensa/include/asm/fixmap.h
··· 23 23 * Here we define all the compile-time 'special' virtual 24 24 * addresses. The point is to have a constant address at 25 25 * compile time, but to set the physical address only 26 - * in the boot process. We allocate these special addresses 27 - * from the end of the consistent memory region backwards. 26 + * in the boot process. We allocate these special addresses 27 + * from the start of the consistent memory region upwards. 28 28 * Also this lets us do fail-safe vmalloc(), we 29 29 * can guarantee that these special addresses and 30 30 * vmalloc()-ed addresses never overlap. ··· 38 38 #ifdef CONFIG_HIGHMEM 39 39 /* reserved pte's for temporary kernel mappings */ 40 40 FIX_KMAP_BEGIN, 41 - FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, 41 + FIX_KMAP_END = FIX_KMAP_BEGIN + 42 + (KM_TYPE_NR * NR_CPUS * DCACHE_N_COLORS) - 1, 42 43 #endif 43 44 __end_of_fixed_addresses 44 45 }; ··· 48 47 #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) 49 48 #define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK) 50 49 51 - #include <asm-generic/fixmap.h> 50 + #define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT)) 51 + #define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT) 52 + 53 + #ifndef __ASSEMBLY__ 54 + /* 55 + * 'index to address' translation. If anyone tries to use the idx 56 + * directly without translation, we catch the bug with a NULL-deference 57 + * kernel oops. Illegal ranges of incoming indices are caught too. 58 + */ 59 + static __always_inline unsigned long fix_to_virt(const unsigned int idx) 60 + { 61 + BUILD_BUG_ON(idx >= __end_of_fixed_addresses); 62 + return __fix_to_virt(idx); 63 + } 64 + 65 + static inline unsigned long virt_to_fix(const unsigned long vaddr) 66 + { 67 + BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); 68 + return __virt_to_fix(vaddr); 69 + } 70 + 71 + #endif 52 72 53 73 #define kmap_get_fixmap_pte(vaddr) \ 54 74 pte_offset_kernel( \
+38 -2
arch/xtensa/include/asm/highmem.h
··· 12 12 #ifndef _XTENSA_HIGHMEM_H 13 13 #define _XTENSA_HIGHMEM_H 14 14 15 + #include <linux/wait.h> 15 16 #include <asm/cacheflush.h> 16 17 #include <asm/fixmap.h> 17 18 #include <asm/kmap_types.h> 18 19 #include <asm/pgtable.h> 19 20 20 - #define PKMAP_BASE (FIXADDR_START - PMD_SIZE) 21 - #define LAST_PKMAP PTRS_PER_PTE 21 + #define PKMAP_BASE ((FIXADDR_START - \ 22 + (LAST_PKMAP + 1) * PAGE_SIZE) & PMD_MASK) 23 + #define LAST_PKMAP (PTRS_PER_PTE * DCACHE_N_COLORS) 22 24 #define LAST_PKMAP_MASK (LAST_PKMAP - 1) 23 25 #define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT) 24 26 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) 25 27 26 28 #define kmap_prot PAGE_KERNEL 29 + 30 + #if DCACHE_WAY_SIZE > PAGE_SIZE 31 + #define get_pkmap_color get_pkmap_color 32 + static inline int get_pkmap_color(struct page *page) 33 + { 34 + return DCACHE_ALIAS(page_to_phys(page)); 35 + } 36 + 37 + extern unsigned int last_pkmap_nr_arr[]; 38 + 39 + static inline unsigned int get_next_pkmap_nr(unsigned int color) 40 + { 41 + last_pkmap_nr_arr[color] = 42 + (last_pkmap_nr_arr[color] + DCACHE_N_COLORS) & LAST_PKMAP_MASK; 43 + return last_pkmap_nr_arr[color] + color; 44 + } 45 + 46 + static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color) 47 + { 48 + return pkmap_nr < DCACHE_N_COLORS; 49 + } 50 + 51 + static inline int get_pkmap_entries_count(unsigned int color) 52 + { 53 + return LAST_PKMAP / DCACHE_N_COLORS; 54 + } 55 + 56 + extern wait_queue_head_t pkmap_map_wait_arr[]; 57 + 58 + static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) 59 + { 60 + return pkmap_map_wait_arr + color; 61 + } 62 + #endif 27 63 28 64 extern pte_t *pkmap_page_table; 29 65
+12 -2
arch/xtensa/include/asm/page.h
··· 78 78 # define DCACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0) 79 79 #else 80 80 # define DCACHE_ALIAS_ORDER 0 81 + # define DCACHE_ALIAS(a) ((void)(a), 0) 81 82 #endif 83 + #define DCACHE_N_COLORS (1 << DCACHE_ALIAS_ORDER) 82 84 83 85 #if ICACHE_WAY_SIZE > PAGE_SIZE 84 86 # define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT) ··· 136 134 #endif 137 135 138 136 struct page; 137 + struct vm_area_struct; 139 138 extern void clear_page(void *page); 140 139 extern void copy_page(void *to, void *from); 141 140 ··· 146 143 */ 147 144 148 145 #if DCACHE_WAY_SIZE > PAGE_SIZE 149 - extern void clear_user_page(void*, unsigned long, struct page*); 150 - extern void copy_user_page(void*, void*, unsigned long, struct page*); 146 + extern void clear_page_alias(void *vaddr, unsigned long paddr); 147 + extern void copy_page_alias(void *to, void *from, 148 + unsigned long to_paddr, unsigned long from_paddr); 149 + 150 + #define clear_user_highpage clear_user_highpage 151 + void clear_user_highpage(struct page *page, unsigned long vaddr); 152 + #define __HAVE_ARCH_COPY_USER_HIGHPAGE 153 + void copy_user_highpage(struct page *to, struct page *from, 154 + unsigned long vaddr, struct vm_area_struct *vma); 151 155 #else 152 156 # define clear_user_page(page, vaddr, pg) clear_page(page) 153 157 # define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+6 -1
arch/xtensa/include/asm/pgtable.h
··· 67 67 #define VMALLOC_START 0xC0000000 68 68 #define VMALLOC_END 0xC7FEFFFF 69 69 #define TLBTEMP_BASE_1 0xC7FF0000 70 - #define TLBTEMP_BASE_2 0xC7FF8000 70 + #define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE) 71 + #if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE 72 + #define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE) 73 + #else 74 + #define TLBTEMP_SIZE ICACHE_WAY_SIZE 75 + #endif 71 76 72 77 /* 73 78 * For the Xtensa architecture, the PTE layout is as follows:
+5
arch/xtensa/include/asm/uaccess.h
··· 52 52 */ 53 53 .macro get_fs ad, sp 54 54 GET_CURRENT(\ad,\sp) 55 + #if THREAD_CURRENT_DS > 1020 56 + addi \ad, \ad, TASK_THREAD 57 + l32i \ad, \ad, THREAD_CURRENT_DS - TASK_THREAD 58 + #else 55 59 l32i \ad, \ad, THREAD_CURRENT_DS 60 + #endif 56 61 .endm 57 62 58 63 /*
+10 -9
arch/xtensa/include/uapi/asm/ioctls.h
··· 28 28 #define TCSETSW 0x5403 29 29 #define TCSETSF 0x5404 30 30 31 - #define TCGETA _IOR('t', 23, struct termio) 32 - #define TCSETA _IOW('t', 24, struct termio) 33 - #define TCSETAW _IOW('t', 25, struct termio) 34 - #define TCSETAF _IOW('t', 28, struct termio) 31 + #define TCGETA 0x80127417 /* _IOR('t', 23, struct termio) */ 32 + #define TCSETA 0x40127418 /* _IOW('t', 24, struct termio) */ 33 + #define TCSETAW 0x40127419 /* _IOW('t', 25, struct termio) */ 34 + #define TCSETAF 0x4012741C /* _IOW('t', 28, struct termio) */ 35 35 36 36 #define TCSBRK _IO('t', 29) 37 37 #define TCXONC _IO('t', 30) 38 38 #define TCFLSH _IO('t', 31) 39 39 40 - #define TIOCSWINSZ _IOW('t', 103, struct winsize) 41 - #define TIOCGWINSZ _IOR('t', 104, struct winsize) 40 + #define TIOCSWINSZ 0x40087467 /* _IOW('t', 103, struct winsize) */ 41 + #define TIOCGWINSZ 0x80087468 /* _IOR('t', 104, struct winsize) */ 42 42 #define TIOCSTART _IO('t', 110) /* start output, like ^Q */ 43 43 #define TIOCSTOP _IO('t', 111) /* stop output, like ^S */ 44 44 #define TIOCOUTQ _IOR('t', 115, int) /* output queue size */ ··· 88 88 #define TIOCSETD _IOW('T', 35, int) 89 89 #define TIOCGETD _IOR('T', 36, int) 90 90 #define TCSBRKP _IOW('T', 37, int) /* Needed for POSIX tcsendbreak()*/ 91 - #define TIOCTTYGSTRUCT _IOR('T', 38, struct tty_struct) /* For debugging only*/ 92 91 #define TIOCSBRK _IO('T', 39) /* BSD compatibility */ 93 92 #define TIOCCBRK _IO('T', 40) /* BSD compatibility */ 94 93 #define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/ ··· 113 114 #define TIOCSERGETLSR _IOR('T', 89, unsigned int) /* Get line status reg. */ 114 115 /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ 115 116 # define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ 116 - #define TIOCSERGETMULTI _IOR('T', 90, struct serial_multiport_struct) /* Get multiport config */ 117 - #define TIOCSERSETMULTI _IOW('T', 91, struct serial_multiport_struct) /* Set multiport config */ 117 + #define TIOCSERGETMULTI 0x80a8545a /* Get multiport config */ 118 + /* _IOR('T', 90, struct serial_multiport_struct) */ 119 + #define TIOCSERSETMULTI 0x40a8545b /* Set multiport config */ 120 + /* _IOW('T', 91, struct serial_multiport_struct) */ 118 121 119 122 #define TIOCMIWAIT _IO('T', 92) /* wait for a change on serial input line(s) */ 120 123 #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
+4 -1
arch/xtensa/include/uapi/asm/unistd.h
··· 739 739 #define __NR_sched_getattr 335 740 740 __SYSCALL(335, sys_sched_getattr, 3) 741 741 742 - #define __NR_syscall_count 336 742 + #define __NR_renameat2 336 743 + __SYSCALL(336, sys_renameat2, 5) 744 + 745 + #define __NR_syscall_count 337 743 746 744 747 /* 745 748 * sysxtensa syscall handler
+82 -48
arch/xtensa/kernel/align.S
··· 8 8 * this archive for more details. 9 9 * 10 10 * Copyright (C) 2001 - 2005 Tensilica, Inc. 11 + * Copyright (C) 2014 Cadence Design Systems Inc. 11 12 * 12 13 * Rewritten by Chris Zankel <chris@zankel.net> 13 14 * ··· 175 174 s32i a0, a2, PT_AREG2 176 175 s32i a3, a2, PT_AREG3 177 176 177 + rsr a3, excsave1 178 + movi a4, fast_unaligned_fixup 179 + s32i a4, a3, EXC_TABLE_FIXUP 180 + 178 181 /* Keep value of SAR in a0 */ 179 182 180 183 rsr a0, sar ··· 230 225 addx8 a5, a6, a5 231 226 jx a5 # jump into table 232 227 233 - /* Invalid instruction, CRITICAL! */ 234 - .Linvalid_instruction_load: 235 - j .Linvalid_instruction 236 - 237 228 /* Load: Load memory address. */ 238 229 239 230 .Lload: movi a3, ~3 ··· 273 272 /* Set target register. */ 274 273 275 274 1: 276 - 277 - #if XCHAL_HAVE_LOOPS 278 - rsr a5, lend # check if we reached LEND 279 - bne a7, a5, 1f 280 - rsr a5, lcount # and LCOUNT != 0 281 - beqz a5, 1f 282 - addi a5, a5, -1 # decrement LCOUNT and set 283 - rsr a7, lbeg # set PC to LBEGIN 284 - wsr a5, lcount 285 - #endif 286 - 287 - 1: wsr a7, epc1 # skip load instruction 288 275 extui a4, a4, INSN_T, 4 # extract target register 289 276 movi a5, .Lload_table 290 277 addx8 a4, a4, a5 ··· 315 326 mov a3, a14 ; _j 1f; .align 8 316 327 mov a3, a15 ; _j 1f; .align 8 317 328 329 + /* We cannot handle this exception. */ 330 + 331 + .extern _kernel_exception 332 + .Linvalid_instruction_load: 333 + .Linvalid_instruction_store: 334 + 335 + movi a4, 0 336 + rsr a3, excsave1 337 + s32i a4, a3, EXC_TABLE_FIXUP 338 + 339 + /* Restore a4...a8 and SAR, set SP, and jump to default exception. */ 340 + 341 + l32i a8, a2, PT_AREG8 342 + l32i a7, a2, PT_AREG7 343 + l32i a6, a2, PT_AREG6 344 + l32i a5, a2, PT_AREG5 345 + l32i a4, a2, PT_AREG4 346 + wsr a0, sar 347 + mov a1, a2 348 + 349 + rsr a0, ps 350 + bbsi.l a0, PS_UM_BIT, 2f # jump if user mode 351 + 352 + movi a0, _kernel_exception 353 + jx a0 354 + 355 + 2: movi a0, _user_exception 356 + jx a0 357 + 318 358 1: # a7: instruction pointer, a4: instruction, a3: value 319 359 320 360 movi a6, 0 # mask: ffffffff:00000000 ··· 371 353 /* Get memory address */ 372 354 373 355 1: 374 - #if XCHAL_HAVE_LOOPS 375 - rsr a4, lend # check if we reached LEND 376 - bne a7, a4, 1f 377 - rsr a4, lcount # and LCOUNT != 0 378 - beqz a4, 1f 379 - addi a4, a4, -1 # decrement LCOUNT and set 380 - rsr a7, lbeg # set PC to LBEGIN 381 - wsr a4, lcount 382 - #endif 383 - 384 - 1: wsr a7, epc1 # skip store instruction 385 356 movi a4, ~3 386 357 and a4, a4, a8 # align memory address 387 358 ··· 382 375 #endif 383 376 384 377 __ssa8r a8 385 - __src_b a7, a5, a6 # lo-mask F..F0..0 (BE) 0..0F..F (LE) 378 + __src_b a8, a5, a6 # lo-mask F..F0..0 (BE) 0..0F..F (LE) 386 379 __src_b a6, a6, a5 # hi-mask 0..0F..F (BE) F..F0..0 (LE) 387 380 #ifdef UNALIGNED_USER_EXCEPTION 388 381 l32e a5, a4, -8 389 382 #else 390 383 l32i a5, a4, 0 # load lower address word 391 384 #endif 392 - and a5, a5, a7 # mask 393 - __sh a7, a3 # shift value 394 - or a5, a5, a7 # or with original value 385 + and a5, a5, a8 # mask 386 + __sh a8, a3 # shift value 387 + or a5, a5, a8 # or with original value 395 388 #ifdef UNALIGNED_USER_EXCEPTION 396 389 s32e a5, a4, -8 397 - l32e a7, a4, -4 390 + l32e a8, a4, -4 398 391 #else 399 392 s32i a5, a4, 0 # store 400 - l32i a7, a4, 4 # same for upper address word 393 + l32i a8, a4, 4 # same for upper address word 401 394 #endif 402 395 __sl a5, a3 403 - and a6, a7, a6 396 + and a6, a8, a6 404 397 or a6, a6, a5 405 398 #ifdef UNALIGNED_USER_EXCEPTION 406 399 s32e a6, a4, -4 ··· 408 401 s32i a6, a4, 4 409 402 #endif 410 403 411 - /* Done. restore stack and return */ 412 - 413 404 .Lexit: 405 + #if XCHAL_HAVE_LOOPS 406 + rsr a4, lend # check if we reached LEND 407 + bne a7, a4, 1f 408 + rsr a4, lcount # and LCOUNT != 0 409 + beqz a4, 1f 410 + addi a4, a4, -1 # decrement LCOUNT and set 411 + rsr a7, lbeg # set PC to LBEGIN 412 + wsr a4, lcount 413 + #endif 414 + 415 + 1: wsr a7, epc1 # skip emulated instruction 416 + 417 + /* Update icount if we're single-stepping in userspace. */ 418 + rsr a4, icountlevel 419 + beqz a4, 1f 420 + bgeui a4, LOCKLEVEL + 1, 1f 421 + rsr a4, icount 422 + addi a4, a4, 1 423 + wsr a4, icount 424 + 1: 414 425 movi a4, 0 415 426 rsr a3, excsave1 416 427 s32i a4, a3, EXC_TABLE_FIXUP ··· 449 424 l32i a2, a2, PT_AREG2 450 425 rfe 451 426 452 - /* We cannot handle this exception. */ 427 + ENDPROC(fast_unaligned) 453 428 454 - .extern _kernel_exception 455 - .Linvalid_instruction_store: 456 - .Linvalid_instruction: 429 + ENTRY(fast_unaligned_fixup) 457 430 458 - /* Restore a4...a8 and SAR, set SP, and jump to default exception. */ 431 + l32i a2, a3, EXC_TABLE_DOUBLE_SAVE 432 + wsr a3, excsave1 459 433 460 434 l32i a8, a2, PT_AREG8 461 435 l32i a7, a2, PT_AREG7 462 436 l32i a6, a2, PT_AREG6 463 437 l32i a5, a2, PT_AREG5 464 438 l32i a4, a2, PT_AREG4 439 + l32i a0, a2, PT_AREG2 440 + xsr a0, depc # restore depc and a0 465 441 wsr a0, sar 466 - mov a1, a2 442 + 443 + rsr a0, exccause 444 + s32i a0, a2, PT_DEPC # mark as a regular exception 467 445 468 446 rsr a0, ps 469 - bbsi.l a2, PS_UM_BIT, 1f # jump if user mode 447 + bbsi.l a0, PS_UM_BIT, 1f # jump if user mode 470 448 471 - movi a0, _kernel_exception 449 + rsr a0, exccause 450 + addx4 a0, a0, a3 # find entry in table 451 + l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler 452 + l32i a3, a2, PT_AREG3 453 + jx a0 454 + 1: 455 + rsr a0, exccause 456 + addx4 a0, a0, a3 # find entry in table 457 + l32i a0, a0, EXC_TABLE_FAST_USER # load handler 458 + l32i a3, a2, PT_AREG3 472 459 jx a0 473 460 474 - 1: movi a0, _user_exception 475 - jx a0 476 - 477 - ENDPROC(fast_unaligned) 461 + ENDPROC(fast_unaligned_fixup) 478 462 479 463 #endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
+42 -12
arch/xtensa/kernel/entry.S
··· 986 986 * j done 987 987 */ 988 988 989 + #ifdef CONFIG_FAST_SYSCALL_XTENSA 990 + 989 991 #define TRY \ 990 992 .section __ex_table, "a"; \ 991 993 .word 66f, 67f; \ ··· 1003 1001 movi a7, 4 # sizeof(unsigned int) 1004 1002 access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp 1005 1003 1006 - addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1 1007 - _bgeui a6, SYS_XTENSA_COUNT - 1, .Lill 1008 - _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp 1004 + _bgeui a6, SYS_XTENSA_COUNT, .Lill 1005 + _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp 1009 1006 1010 1007 /* Fall through for ATOMIC_CMP_SWP. */ 1011 1008 ··· 1016 1015 l32i a7, a2, PT_AREG7 # restore a7 1017 1016 l32i a0, a2, PT_AREG0 # restore a0 1018 1017 movi a2, 1 # and return 1 1019 - addi a6, a6, 1 # restore a6 (really necessary?) 1020 1018 rfe 1021 1019 1022 1020 1: l32i a7, a2, PT_AREG7 # restore a7 1023 1021 l32i a0, a2, PT_AREG0 # restore a0 1024 1022 movi a2, 0 # return 0 (note that we cannot set 1025 - addi a6, a6, 1 # restore a6 (really necessary?) 1026 1023 rfe 1027 1024 1028 1025 .Lnswp: /* Atomic set, add, and exg_add. */ 1029 1026 1030 1027 TRY l32i a7, a3, 0 # orig 1028 + addi a6, a6, -SYS_XTENSA_ATOMIC_SET 1031 1029 add a0, a4, a7 # + arg 1032 1030 moveqz a0, a4, a6 # set 1031 + addi a6, a6, SYS_XTENSA_ATOMIC_SET 1033 1032 TRY s32i a0, a3, 0 # write new value 1034 1033 1035 1034 mov a0, a2 1036 1035 mov a2, a7 1037 1036 l32i a7, a0, PT_AREG7 # restore a7 1038 1037 l32i a0, a0, PT_AREG0 # restore a0 1039 - addi a6, a6, 1 # restore a6 (really necessary?) 1040 1038 rfe 1041 1039 1042 1040 CATCH ··· 1044 1044 movi a2, -EFAULT 1045 1045 rfe 1046 1046 1047 - .Lill: l32i a7, a2, PT_AREG0 # restore a7 1047 + .Lill: l32i a7, a2, PT_AREG7 # restore a7 1048 1048 l32i a0, a2, PT_AREG0 # restore a0 1049 1049 movi a2, -EINVAL 1050 1050 rfe 1051 1051 1052 1052 ENDPROC(fast_syscall_xtensa) 1053 + 1054 + #else /* CONFIG_FAST_SYSCALL_XTENSA */ 1055 + 1056 + ENTRY(fast_syscall_xtensa) 1057 + 1058 + l32i a0, a2, PT_AREG0 # restore a0 1059 + movi a2, -ENOSYS 1060 + rfe 1061 + 1062 + ENDPROC(fast_syscall_xtensa) 1063 + 1064 + #endif /* CONFIG_FAST_SYSCALL_XTENSA */ 1053 1065 1054 1066 1055 1067 /* fast_syscall_spill_registers. ··· 1077 1065 * 1078 1066 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. 1079 1067 */ 1068 + 1069 + #ifdef CONFIG_FAST_SYSCALL_SPILL_REGISTERS 1080 1070 1081 1071 ENTRY(fast_syscall_spill_registers) 1082 1072 ··· 1414 1400 1415 1401 ENDPROC(fast_syscall_spill_registers_fixup_return) 1416 1402 1403 + #else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */ 1404 + 1405 + ENTRY(fast_syscall_spill_registers) 1406 + 1407 + l32i a0, a2, PT_AREG0 # restore a0 1408 + movi a2, -ENOSYS 1409 + rfe 1410 + 1411 + ENDPROC(fast_syscall_spill_registers) 1412 + 1413 + #endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */ 1414 + 1417 1415 #ifdef CONFIG_MMU 1418 1416 /* 1419 1417 * We should never get here. Bail out! ··· 1591 1565 rsr a0, excvaddr 1592 1566 bltu a0, a3, 2f 1593 1567 1594 - addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT)) 1568 + addi a1, a0, -TLBTEMP_SIZE 1595 1569 bgeu a1, a3, 2f 1596 1570 1597 1571 /* Check if we have to restore an ITLB mapping. */ ··· 1846 1820 1847 1821 entry a1, 16 1848 1822 1849 - mov a10, a2 # preserve 'prev' (a2) 1850 1823 mov a11, a3 # and 'next' (a3) 1851 1824 1852 1825 l32i a4, a2, TASK_THREAD_INFO ··· 1853 1828 1854 1829 save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER 1855 1830 1856 - s32i a0, a10, THREAD_RA # save return address 1857 - s32i a1, a10, THREAD_SP # save stack pointer 1831 + #if THREAD_RA > 1020 || THREAD_SP > 1020 1832 + addi a10, a2, TASK_THREAD 1833 + s32i a0, a10, THREAD_RA - TASK_THREAD # save return address 1834 + s32i a1, a10, THREAD_SP - TASK_THREAD # save stack pointer 1835 + #else 1836 + s32i a0, a2, THREAD_RA # save return address 1837 + s32i a1, a2, THREAD_SP # save stack pointer 1838 + #endif 1858 1839 1859 1840 /* Disable ints while we manipulate the stack pointer. */ 1860 1841 ··· 1901 1870 load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER 1902 1871 1903 1872 wsr a14, ps 1904 - mov a2, a10 # return 'prev' 1905 1873 rsync 1906 1874 1907 1875 retw
+6 -6
arch/xtensa/kernel/pci-dma.c
··· 49 49 50 50 /* We currently don't support coherent memory outside KSEG */ 51 51 52 - if (ret < XCHAL_KSEG_CACHED_VADDR 53 - || ret >= XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE) 54 - BUG(); 52 + BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR || 53 + ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); 55 54 56 55 57 56 if (ret != 0) { ··· 67 68 void dma_free_coherent(struct device *hwdev, size_t size, 68 69 void *vaddr, dma_addr_t dma_handle) 69 70 { 70 - long addr=(long)vaddr+XCHAL_KSEG_CACHED_VADDR-XCHAL_KSEG_BYPASS_VADDR; 71 + unsigned long addr = (unsigned long)vaddr + 72 + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; 71 73 72 - if (addr < 0 || addr >= XCHAL_KSEG_SIZE) 73 - BUG(); 74 + BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR || 75 + addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); 74 76 75 77 free_pages(addr, get_order(size)); 76 78 }
+1 -4
arch/xtensa/kernel/traps.c
··· 101 101 #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION 102 102 #ifdef CONFIG_XTENSA_UNALIGNED_USER 103 103 { EXCCAUSE_UNALIGNED, USER, fast_unaligned }, 104 - #else 105 - { EXCCAUSE_UNALIGNED, 0, do_unaligned_user }, 106 104 #endif 105 + { EXCCAUSE_UNALIGNED, 0, do_unaligned_user }, 107 106 { EXCCAUSE_UNALIGNED, KRNL, fast_unaligned }, 108 107 #endif 109 108 #ifdef CONFIG_MMU ··· 263 264 */ 264 265 265 266 #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION 266 - #ifndef CONFIG_XTENSA_UNALIGNED_USER 267 267 void 268 268 do_unaligned_user (struct pt_regs *regs) 269 269 { ··· 283 285 force_sig_info(SIGSEGV, &info, current); 284 286 285 287 } 286 - #endif 287 288 #endif 288 289 289 290 void
+7 -1
arch/xtensa/kernel/vectors.S
··· 454 454 s32i a0, a2, PT_DEPC 455 455 456 456 _DoubleExceptionVector_handle_exception: 457 + addi a0, a0, -EXCCAUSE_UNALIGNED 458 + beqz a0, 2f 457 459 addx4 a0, a0, a3 458 - l32i a0, a0, EXC_TABLE_FAST_USER 460 + l32i a0, a0, EXC_TABLE_FAST_USER + 4 * EXCCAUSE_UNALIGNED 461 + xsr a3, excsave1 462 + jx a0 463 + 2: 464 + movi a0, user_exception 459 465 xsr a3, excsave1 460 466 jx a0 461 467
+2 -2
arch/xtensa/kernel/vmlinux.lds.S
··· 269 269 .UserExceptionVector.literal) 270 270 SECTION_VECTOR (_DoubleExceptionVector_literal, 271 271 .DoubleExceptionVector.literal, 272 - DOUBLEEXC_VECTOR_VADDR - 40, 272 + DOUBLEEXC_VECTOR_VADDR - 48, 273 273 SIZEOF(.UserExceptionVector.text), 274 274 .UserExceptionVector.text) 275 275 SECTION_VECTOR (_DoubleExceptionVector_text, 276 276 .DoubleExceptionVector.text, 277 277 DOUBLEEXC_VECTOR_VADDR, 278 - 40, 278 + 48, 279 279 .DoubleExceptionVector.literal) 280 280 281 281 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
+68 -9
arch/xtensa/mm/cache.c
··· 59 59 * 60 60 */ 61 61 62 - #if (DCACHE_WAY_SIZE > PAGE_SIZE) && defined(CONFIG_HIGHMEM) 63 - #error "HIGHMEM is not supported on cores with aliasing cache." 64 - #endif 62 + #if (DCACHE_WAY_SIZE > PAGE_SIZE) 63 + static inline void kmap_invalidate_coherent(struct page *page, 64 + unsigned long vaddr) 65 + { 66 + if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { 67 + unsigned long kvaddr; 68 + 69 + if (!PageHighMem(page)) { 70 + kvaddr = (unsigned long)page_to_virt(page); 71 + 72 + __invalidate_dcache_page(kvaddr); 73 + } else { 74 + kvaddr = TLBTEMP_BASE_1 + 75 + (page_to_phys(page) & DCACHE_ALIAS_MASK); 76 + 77 + __invalidate_dcache_page_alias(kvaddr, 78 + page_to_phys(page)); 79 + } 80 + } 81 + } 82 + 83 + static inline void *coherent_kvaddr(struct page *page, unsigned long base, 84 + unsigned long vaddr, unsigned long *paddr) 85 + { 86 + if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { 87 + *paddr = page_to_phys(page); 88 + return (void *)(base + (vaddr & DCACHE_ALIAS_MASK)); 89 + } else { 90 + *paddr = 0; 91 + return page_to_virt(page); 92 + } 93 + } 94 + 95 + void clear_user_highpage(struct page *page, unsigned long vaddr) 96 + { 97 + unsigned long paddr; 98 + void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr); 99 + 100 + pagefault_disable(); 101 + kmap_invalidate_coherent(page, vaddr); 102 + set_bit(PG_arch_1, &page->flags); 103 + clear_page_alias(kvaddr, paddr); 104 + pagefault_enable(); 105 + } 106 + 107 + void copy_user_highpage(struct page *dst, struct page *src, 108 + unsigned long vaddr, struct vm_area_struct *vma) 109 + { 110 + unsigned long dst_paddr, src_paddr; 111 + void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr, 112 + &dst_paddr); 113 + void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr, 114 + &src_paddr); 115 + 116 + pagefault_disable(); 117 + kmap_invalidate_coherent(dst, vaddr); 118 + set_bit(PG_arch_1, &dst->flags); 119 + copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr); 120 + pagefault_enable(); 121 + } 122 + 123 + #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */ 65 124 66 125 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 67 126 ··· 162 103 if (!alias && !mapping) 163 104 return; 164 105 165 - __flush_invalidate_dcache_page((long)page_address(page)); 106 + virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK); 107 + __flush_invalidate_dcache_page_alias(virt, phys); 166 108 167 109 virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK); 168 110 ··· 228 168 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 229 169 230 170 if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { 231 - 232 - unsigned long paddr = (unsigned long) page_address(page); 233 171 unsigned long phys = page_to_phys(page); 234 - unsigned long tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK); 172 + unsigned long tmp; 235 173 236 - __flush_invalidate_dcache_page(paddr); 237 - 174 + tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK); 175 + __flush_invalidate_dcache_page_alias(tmp, phys); 176 + tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK); 238 177 __flush_invalidate_dcache_page_alias(tmp, phys); 239 178 __invalidate_icache_page_alias(tmp, phys); 240 179
+31 -10
arch/xtensa/mm/highmem.c
··· 14 14 15 15 static pte_t *kmap_pte; 16 16 17 + #if DCACHE_WAY_SIZE > PAGE_SIZE 18 + unsigned int last_pkmap_nr_arr[DCACHE_N_COLORS]; 19 + wait_queue_head_t pkmap_map_wait_arr[DCACHE_N_COLORS]; 20 + 21 + static void __init kmap_waitqueues_init(void) 22 + { 23 + unsigned int i; 24 + 25 + for (i = 0; i < ARRAY_SIZE(pkmap_map_wait_arr); ++i) 26 + init_waitqueue_head(pkmap_map_wait_arr + i); 27 + } 28 + #else 29 + static inline void kmap_waitqueues_init(void) 30 + { 31 + } 32 + #endif 33 + 34 + static inline enum fixed_addresses kmap_idx(int type, unsigned long color) 35 + { 36 + return (type + KM_TYPE_NR * smp_processor_id()) * DCACHE_N_COLORS + 37 + color; 38 + } 39 + 17 40 void *kmap_atomic(struct page *page) 18 41 { 19 42 enum fixed_addresses idx; 20 43 unsigned long vaddr; 21 - int type; 22 44 23 45 pagefault_disable(); 24 46 if (!PageHighMem(page)) 25 47 return page_address(page); 26 48 27 - type = kmap_atomic_idx_push(); 28 - idx = type + KM_TYPE_NR * smp_processor_id(); 49 + idx = kmap_idx(kmap_atomic_idx_push(), 50 + DCACHE_ALIAS(page_to_phys(page))); 29 51 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 30 52 #ifdef CONFIG_DEBUG_HIGHMEM 31 - BUG_ON(!pte_none(*(kmap_pte - idx))); 53 + BUG_ON(!pte_none(*(kmap_pte + idx))); 32 54 #endif 33 - set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL_EXEC)); 55 + set_pte(kmap_pte + idx, mk_pte(page, PAGE_KERNEL_EXEC)); 34 56 35 57 return (void *)vaddr; 36 58 } ··· 60 38 61 39 void __kunmap_atomic(void *kvaddr) 62 40 { 63 - int idx, type; 64 - 65 41 if (kvaddr >= (void *)FIXADDR_START && 66 42 kvaddr < (void *)FIXADDR_TOP) { 67 - type = kmap_atomic_idx(); 68 - idx = type + KM_TYPE_NR * smp_processor_id(); 43 + int idx = kmap_idx(kmap_atomic_idx(), 44 + DCACHE_ALIAS((unsigned long)kvaddr)); 69 45 70 46 /* 71 47 * Force other mappings to Oops if they'll try to access this ··· 71 51 * is a bad idea also, in case the page changes cacheability 72 52 * attributes or becomes a protected page in a hypervisor. 73 53 */ 74 - pte_clear(&init_mm, kvaddr, kmap_pte - idx); 54 + pte_clear(&init_mm, kvaddr, kmap_pte + idx); 75 55 local_flush_tlb_kernel_range((unsigned long)kvaddr, 76 56 (unsigned long)kvaddr + PAGE_SIZE); 77 57 ··· 89 69 /* cache the first kmap pte */ 90 70 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); 91 71 kmap_pte = kmap_get_fixmap_pte(kmap_vstart); 72 + kmap_waitqueues_init(); 92 73 }
+52 -64
arch/xtensa/mm/misc.S
··· 110 110 #if (DCACHE_WAY_SIZE > PAGE_SIZE) 111 111 112 112 /* 113 - * clear_user_page (void *addr, unsigned long vaddr, struct page *page) 114 - * a2 a3 a4 113 + * clear_page_alias(void *addr, unsigned long paddr) 114 + * a2 a3 115 115 */ 116 116 117 - ENTRY(clear_user_page) 117 + ENTRY(clear_page_alias) 118 118 119 119 entry a1, 32 120 120 121 - /* Mark page dirty and determine alias. */ 121 + /* Skip setting up a temporary DTLB if not aliased low page. */ 122 122 123 - movi a7, (1 << PG_ARCH_1) 124 - l32i a5, a4, PAGE_FLAGS 125 - xor a6, a2, a3 126 - extui a3, a3, PAGE_SHIFT, DCACHE_ALIAS_ORDER 127 - extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER 128 - or a5, a5, a7 129 - slli a3, a3, PAGE_SHIFT 130 - s32i a5, a4, PAGE_FLAGS 123 + movi a5, PAGE_OFFSET 124 + movi a6, 0 125 + beqz a3, 1f 131 126 132 - /* Skip setting up a temporary DTLB if not aliased. */ 127 + /* Setup a temporary DTLB for the addr. */ 133 128 134 - beqz a6, 1f 135 - 136 - /* Invalidate kernel page. */ 137 - 138 - mov a10, a2 139 - call8 __invalidate_dcache_page 140 - 141 - /* Setup a temporary DTLB with the color of the VPN */ 142 - 143 - movi a4, ((PAGE_KERNEL | _PAGE_HW_WRITE) - PAGE_OFFSET) & 0xffffffff 144 - movi a5, TLBTEMP_BASE_1 # virt 145 - add a6, a2, a4 # ppn 146 - add a2, a5, a3 # add 'color' 147 - 129 + addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) 130 + mov a4, a2 148 131 wdtlb a6, a2 149 132 dsync 150 133 ··· 148 165 149 166 /* We need to invalidate the temporary idtlb entry, if any. */ 150 167 151 - 1: addi a2, a2, -PAGE_SIZE 152 - idtlb a2 168 + 1: idtlb a4 153 169 dsync 154 170 155 171 retw 156 172 157 - ENDPROC(clear_user_page) 173 + ENDPROC(clear_page_alias) 158 174 159 175 /* 160 - * copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page) 161 - * a2 a3 a4 a5 176 + * copy_page_alias(void *to, void *from, 177 + * a2 a3 178 + * unsigned long to_paddr, unsigned long from_paddr) 179 + * a4 a5 162 180 */ 163 181 164 - ENTRY(copy_user_page) 182 + ENTRY(copy_page_alias) 165 183 166 184 entry a1, 32 167 185 168 - /* Mark page dirty and determine alias for destination. */ 186 + /* Skip setting up a temporary DTLB for destination if not aliased. */ 169 187 170 - movi a8, (1 << PG_ARCH_1) 171 - l32i a9, a5, PAGE_FLAGS 172 - xor a6, a2, a4 173 - xor a7, a3, a4 174 - extui a4, a4, PAGE_SHIFT, DCACHE_ALIAS_ORDER 175 - extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER 176 - extui a7, a7, PAGE_SHIFT, DCACHE_ALIAS_ORDER 177 - or a9, a9, a8 178 - slli a4, a4, PAGE_SHIFT 179 - s32i a9, a5, PAGE_FLAGS 180 - movi a5, ((PAGE_KERNEL | _PAGE_HW_WRITE) - PAGE_OFFSET) & 0xffffffff 188 + movi a6, 0 189 + movi a7, 0 190 + beqz a4, 1f 181 191 182 - beqz a6, 1f 192 + /* Setup a temporary DTLB for destination. */ 183 193 184 - /* Invalidate dcache */ 185 - 186 - mov a10, a2 187 - call8 __invalidate_dcache_page 188 - 189 - /* Setup a temporary DTLB with a matching color. */ 190 - 191 - movi a8, TLBTEMP_BASE_1 # base 192 - add a6, a2, a5 # ppn 193 - add a2, a8, a4 # add 'color' 194 - 194 + addi a6, a4, (PAGE_KERNEL | _PAGE_HW_WRITE) 195 195 wdtlb a6, a2 196 196 dsync 197 197 198 - /* Skip setting up a temporary DTLB for destination if not aliased. */ 198 + /* Skip setting up a temporary DTLB for source if not aliased. */ 199 199 200 - 1: beqz a7, 1f 200 + 1: beqz a5, 1f 201 201 202 - /* Setup a temporary DTLB with a matching color. */ 202 + /* Setup a temporary DTLB for source. */ 203 203 204 - movi a8, TLBTEMP_BASE_2 # base 205 - add a7, a3, a5 # ppn 206 - add a3, a8, a4 204 + addi a7, a5, PAGE_KERNEL 207 205 addi a8, a3, 1 # way1 208 206 209 207 wdtlb a7, a8 ··· 235 271 236 272 retw 237 273 238 - ENDPROC(copy_user_page) 274 + ENDPROC(copy_page_alias) 239 275 240 276 #endif 241 277 ··· 264 300 retw 265 301 266 302 ENDPROC(__flush_invalidate_dcache_page_alias) 303 + 304 + /* 305 + * void __invalidate_dcache_page_alias (addr, phys) 306 + * a2 a3 307 + */ 308 + 309 + ENTRY(__invalidate_dcache_page_alias) 310 + 311 + entry sp, 16 312 + 313 + movi a7, 0 # required for exception handler 314 + addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) 315 + mov a4, a2 316 + wdtlb a6, a2 317 + dsync 318 + 319 + ___invalidate_dcache_page a2 a3 320 + 321 + idtlb a4 322 + dsync 323 + 324 + retw 325 + 326 + ENDPROC(__invalidate_dcache_page_alias) 267 327 #endif 268 328 269 329 ENTRY(__tlbtemp_mapping_itlb)
+22 -16
arch/xtensa/mm/mmu.c
··· 18 18 #include <asm/io.h> 19 19 20 20 #if defined(CONFIG_HIGHMEM) 21 - static void * __init init_pmd(unsigned long vaddr) 21 + static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages) 22 22 { 23 23 pgd_t *pgd = pgd_offset_k(vaddr); 24 24 pmd_t *pmd = pmd_offset(pgd, vaddr); 25 + pte_t *pte; 26 + unsigned long i; 25 27 26 - if (pmd_none(*pmd)) { 27 - unsigned i; 28 - pte_t *pte = alloc_bootmem_low_pages(PAGE_SIZE); 28 + n_pages = ALIGN(n_pages, PTRS_PER_PTE); 29 29 30 - for (i = 0; i < 1024; i++) 31 - pte_clear(NULL, 0, pte + i); 30 + pr_debug("%s: vaddr: 0x%08lx, n_pages: %ld\n", 31 + __func__, vaddr, n_pages); 32 32 33 - set_pmd(pmd, __pmd(((unsigned long)pte) & PAGE_MASK)); 34 - BUG_ON(pte != pte_offset_kernel(pmd, 0)); 35 - pr_debug("%s: vaddr: 0x%08lx, pmd: 0x%p, pte: 0x%p\n", 36 - __func__, vaddr, pmd, pte); 37 - return pte; 38 - } else { 39 - return pte_offset_kernel(pmd, 0); 33 + pte = alloc_bootmem_low_pages(n_pages * sizeof(pte_t)); 34 + 35 + for (i = 0; i < n_pages; ++i) 36 + pte_clear(NULL, 0, pte + i); 37 + 38 + for (i = 0; i < n_pages; i += PTRS_PER_PTE, ++pmd) { 39 + pte_t *cur_pte = pte + i; 40 + 41 + BUG_ON(!pmd_none(*pmd)); 42 + set_pmd(pmd, __pmd(((unsigned long)cur_pte) & PAGE_MASK)); 43 + BUG_ON(cur_pte != pte_offset_kernel(pmd, 0)); 44 + pr_debug("%s: pmd: 0x%p, pte: 0x%p\n", 45 + __func__, pmd, cur_pte); 40 46 } 47 + return pte; 41 48 } 42 49 43 50 static void __init fixedrange_init(void) 44 51 { 45 - BUILD_BUG_ON(FIXADDR_SIZE > PMD_SIZE); 46 - init_pmd(__fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK); 52 + init_pmd(__fix_to_virt(0), __end_of_fixed_addresses); 47 53 } 48 54 #endif 49 55 ··· 58 52 memset(swapper_pg_dir, 0, PAGE_SIZE); 59 53 #ifdef CONFIG_HIGHMEM 60 54 fixedrange_init(); 61 - pkmap_page_table = init_pmd(PKMAP_BASE); 55 + pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP); 62 56 kmap_init(); 63 57 #endif 64 58 }