Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'nios2-v4.1-rc1' of git://git.rocketboards.org/linux-socfpga-next

Pull arch/nios2 updates from Ley Foon Tan:

- update cache management code

- rework trap handler with new define trap #.

- fix on check header warning.

* tag 'nios2-v4.1-rc1' of git://git.rocketboards.org/linux-socfpga-next:
nios2: rework cache
nios2: Add types.h header required for __u32 type
nios2: rework trap handler
nios2: remove end address checking for initda

+127 -55
-1
arch/nios2/include/asm/Kbuild
··· 46 46 generic-y += sembuf.h 47 47 generic-y += serial.h 48 48 generic-y += shmbuf.h 49 - generic-y += shmparam.h 50 49 generic-y += siginfo.h 51 50 generic-y += signal.h 52 51 generic-y += socket.h
+21
arch/nios2/include/asm/shmparam.h
··· 1 + /* 2 + * Copyright Altera Corporation (C) <2015>. All rights reserved 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms and conditions of the GNU General Public License, 6 + * version 2, as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope it will be useful, but WITHOUT 9 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 + * more details. 12 + * 13 + * You should have received a copy of the GNU General Public License along with 14 + * this program. If not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + #ifndef _ASM_NIOS2_SHMPARAM_H 17 + #define _ASM_NIOS2_SHMPARAM_H 18 + 19 + #define SHMLBA CONFIG_NIOS2_DCACHE_SIZE 20 + 21 + #endif /* _ASM_NIOS2_SHMPARAM_H */
+2
arch/nios2/include/uapi/asm/ptrace.h
··· 14 14 15 15 #ifndef __ASSEMBLY__ 16 16 17 + #include <linux/types.h> 18 + 17 19 /* 18 20 * Register numbers used by 'ptrace' system call interface. 19 21 */
+42 -29
arch/nios2/kernel/entry.S
··· 92 92 93 93 trap_table: 94 94 .word handle_system_call /* 0 */ 95 - .word instruction_trap /* 1 */ 96 - .word instruction_trap /* 2 */ 97 - .word instruction_trap /* 3 */ 98 - .word instruction_trap /* 4 */ 99 - .word instruction_trap /* 5 */ 100 - .word instruction_trap /* 6 */ 101 - .word instruction_trap /* 7 */ 102 - .word instruction_trap /* 8 */ 103 - .word instruction_trap /* 9 */ 104 - .word instruction_trap /* 10 */ 105 - .word instruction_trap /* 11 */ 106 - .word instruction_trap /* 12 */ 107 - .word instruction_trap /* 13 */ 108 - .word instruction_trap /* 14 */ 109 - .word instruction_trap /* 15 */ 110 - .word instruction_trap /* 16 */ 111 - .word instruction_trap /* 17 */ 112 - .word instruction_trap /* 18 */ 113 - .word instruction_trap /* 19 */ 114 - .word instruction_trap /* 20 */ 115 - .word instruction_trap /* 21 */ 116 - .word instruction_trap /* 22 */ 117 - .word instruction_trap /* 23 */ 118 - .word instruction_trap /* 24 */ 119 - .word instruction_trap /* 25 */ 120 - .word instruction_trap /* 26 */ 121 - .word instruction_trap /* 27 */ 122 - .word instruction_trap /* 28 */ 123 - .word instruction_trap /* 29 */ 95 + .word handle_trap_1 /* 1 */ 96 + .word handle_trap_2 /* 2 */ 97 + .word handle_trap_3 /* 3 */ 98 + .word handle_trap_reserved /* 4 */ 99 + .word handle_trap_reserved /* 5 */ 100 + .word handle_trap_reserved /* 6 */ 101 + .word handle_trap_reserved /* 7 */ 102 + .word handle_trap_reserved /* 8 */ 103 + .word handle_trap_reserved /* 9 */ 104 + .word handle_trap_reserved /* 10 */ 105 + .word handle_trap_reserved /* 11 */ 106 + .word handle_trap_reserved /* 12 */ 107 + .word handle_trap_reserved /* 13 */ 108 + .word handle_trap_reserved /* 14 */ 109 + .word handle_trap_reserved /* 15 */ 110 + .word handle_trap_reserved /* 16 */ 111 + .word handle_trap_reserved /* 17 */ 112 + .word handle_trap_reserved /* 18 */ 113 + .word handle_trap_reserved /* 19 */ 114 + .word handle_trap_reserved /* 20 */ 115 + .word handle_trap_reserved /* 21 */ 116 + .word handle_trap_reserved /* 22 */ 117 + .word handle_trap_reserved /* 23 */ 118 + .word handle_trap_reserved /* 24 */ 119 + .word handle_trap_reserved /* 25 */ 120 + .word handle_trap_reserved /* 26 */ 121 + .word handle_trap_reserved /* 27 */ 122 + .word handle_trap_reserved /* 28 */ 123 + .word handle_trap_reserved /* 29 */ 124 124 #ifdef CONFIG_KGDB 125 125 .word handle_kgdb_breakpoint /* 30 KGDB breakpoint */ 126 126 #else ··· 454 454 call kgdb_breakpoint_c 455 455 br ret_from_exception 456 456 #endif 457 + 458 + handle_trap_1: 459 + call handle_trap_1_c 460 + br ret_from_exception 461 + 462 + handle_trap_2: 463 + call handle_trap_2_c 464 + br ret_from_exception 465 + 466 + handle_trap_3: 467 + handle_trap_reserved: 468 + call handle_trap_3_c 469 + br ret_from_exception 457 470 458 471 /* 459 472 * Beware - when entering resume, prev (the current task) is
+27 -7
arch/nios2/kernel/traps.c
··· 23 23 24 24 static DEFINE_SPINLOCK(die_lock); 25 25 26 + static void _send_sig(int signo, int code, unsigned long addr) 27 + { 28 + siginfo_t info; 29 + 30 + info.si_signo = signo; 31 + info.si_errno = 0; 32 + info.si_code = code; 33 + info.si_addr = (void __user *) addr; 34 + force_sig_info(signo, &info, current); 35 + } 36 + 26 37 void die(const char *str, struct pt_regs *regs, long err) 27 38 { 28 39 console_verbose(); ··· 50 39 51 40 void _exception(int signo, struct pt_regs *regs, int code, unsigned long addr) 52 41 { 53 - siginfo_t info; 54 - 55 42 if (!user_mode(regs)) 56 43 die("Exception in kernel mode", regs, signo); 57 44 58 - info.si_signo = signo; 59 - info.si_errno = 0; 60 - info.si_code = code; 61 - info.si_addr = (void __user *) addr; 62 - force_sig_info(signo, &info, current); 45 + _send_sig(signo, code, addr); 63 46 } 64 47 65 48 /* ··· 187 182 show_regs(regs); 188 183 189 184 pr_emerg("opcode: 0x%08lx\n", *(unsigned long *)(regs->ea)); 185 + } 186 + 187 + asmlinkage void handle_trap_1_c(struct pt_regs *fp) 188 + { 189 + _send_sig(SIGUSR1, 0, fp->ea); 190 + } 191 + 192 + asmlinkage void handle_trap_2_c(struct pt_regs *fp) 193 + { 194 + _send_sig(SIGUSR2, 0, fp->ea); 195 + } 196 + 197 + asmlinkage void handle_trap_3_c(struct pt_regs *fp) 198 + { 199 + _send_sig(SIGILL, ILL_ILLTRP, fp->ea); 190 200 }
+35 -18
arch/nios2/mm/cacheflush.c
··· 58 58 end += (cpuinfo.dcache_line_size - 1); 59 59 end &= ~(cpuinfo.dcache_line_size - 1); 60 60 61 - if (end > start + cpuinfo.dcache_size) 62 - end = start + cpuinfo.dcache_size; 63 - 64 61 for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) { 65 62 __asm__ __volatile__ (" initda 0(%0)\n" 66 63 : /* Outputs */ ··· 128 131 129 132 void flush_icache_range(unsigned long start, unsigned long end) 130 133 { 134 + __flush_dcache(start, end); 131 135 __flush_icache(start, end); 132 136 } 133 137 134 138 void flush_dcache_range(unsigned long start, unsigned long end) 135 139 { 136 140 __flush_dcache(start, end); 141 + __flush_icache(start, end); 137 142 } 138 143 EXPORT_SYMBOL(flush_dcache_range); 139 144 ··· 158 159 unsigned long start = (unsigned long) page_address(page); 159 160 unsigned long end = start + PAGE_SIZE; 160 161 162 + __flush_dcache(start, end); 161 163 __flush_icache(start, end); 162 164 } 163 165 ··· 171 171 __flush_dcache(start, end); 172 172 if (vma->vm_flags & VM_EXEC) 173 173 __flush_icache(start, end); 174 + } 175 + 176 + void __flush_dcache_page(struct address_space *mapping, struct page *page) 177 + { 178 + /* 179 + * Writeback any data associated with the kernel mapping of this 180 + * page. This ensures that data in the physical page is mutually 181 + * coherent with the kernels mapping. 182 + */ 183 + unsigned long start = (unsigned long)page_address(page); 184 + 185 + __flush_dcache_all(start, start + PAGE_SIZE); 174 186 } 175 187 176 188 void flush_dcache_page(struct page *page) ··· 202 190 if (mapping && !mapping_mapped(mapping)) { 203 191 clear_bit(PG_dcache_clean, &page->flags); 204 192 } else { 205 - unsigned long start = (unsigned long)page_address(page); 206 - 207 - __flush_dcache_all(start, start + PAGE_SIZE); 208 - if (mapping) 193 + __flush_dcache_page(mapping, page); 194 + if (mapping) { 195 + unsigned long start = (unsigned long)page_address(page); 209 196 flush_aliases(mapping, page); 197 + flush_icache_range(start, start + PAGE_SIZE); 198 + } 210 199 set_bit(PG_dcache_clean, &page->flags); 211 200 } 212 201 } ··· 218 205 { 219 206 unsigned long pfn = pte_pfn(*pte); 220 207 struct page *page; 208 + struct address_space *mapping; 221 209 222 210 if (!pfn_valid(pfn)) 223 211 return; ··· 231 217 if (page == ZERO_PAGE(0)) 232 218 return; 233 219 234 - if (!PageReserved(page) && 235 - !test_and_set_bit(PG_dcache_clean, &page->flags)) { 236 - unsigned long start = page_to_virt(page); 237 - struct address_space *mapping; 220 + mapping = page_mapping(page); 221 + if (!test_and_set_bit(PG_dcache_clean, &page->flags)) 222 + __flush_dcache_page(mapping, page); 238 223 239 - __flush_dcache(start, start + PAGE_SIZE); 240 - 241 - mapping = page_mapping(page); 242 - if (mapping) 243 - flush_aliases(mapping, page); 224 + if(mapping) 225 + { 226 + flush_aliases(mapping, page); 227 + if (vma->vm_flags & VM_EXEC) 228 + flush_icache_page(vma, page); 244 229 } 245 230 } 246 231 ··· 247 234 struct page *to) 248 235 { 249 236 __flush_dcache(vaddr, vaddr + PAGE_SIZE); 237 + __flush_icache(vaddr, vaddr + PAGE_SIZE); 250 238 copy_page(vto, vfrom); 251 239 __flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE); 240 + __flush_icache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE); 252 241 } 253 242 254 243 void clear_user_page(void *addr, unsigned long vaddr, struct page *page) 255 244 { 256 245 __flush_dcache(vaddr, vaddr + PAGE_SIZE); 246 + __flush_icache(vaddr, vaddr + PAGE_SIZE); 257 247 clear_page(addr); 258 248 __flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE); 249 + __flush_icache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE); 259 250 } 260 251 261 252 void copy_from_user_page(struct vm_area_struct *vma, struct page *page, ··· 268 251 { 269 252 flush_cache_page(vma, user_vaddr, page_to_pfn(page)); 270 253 memcpy(dst, src, len); 271 - __flush_dcache((unsigned long)src, (unsigned long)src + len); 254 + __flush_dcache_all((unsigned long)src, (unsigned long)src + len); 272 255 if (vma->vm_flags & VM_EXEC) 273 256 __flush_icache((unsigned long)src, (unsigned long)src + len); 274 257 } ··· 279 262 { 280 263 flush_cache_page(vma, user_vaddr, page_to_pfn(page)); 281 264 memcpy(dst, src, len); 282 - __flush_dcache((unsigned long)dst, (unsigned long)dst + len); 265 + __flush_dcache_all((unsigned long)dst, (unsigned long)dst + len); 283 266 if (vma->vm_flags & VM_EXEC) 284 267 __flush_icache((unsigned long)dst, (unsigned long)dst + len); 285 268 }