Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'csky-for-linus-5.4-rc1' of git://github.com/c-sky/csky-linux

Pull csky updates from Guo Ren:
"This round of csky subsystem just some fixups:

- Fix mb() synchronization problem

- Fix dma_alloc_coherent with PAGE_SO attribute

- Fix cache_op failed when cross memory ZONEs

- Optimize arch_sync_dma_for_cpu/device with dma_inv_range

- Fix ioremap function losing

- Fix arch_get_unmapped_area() implementation

- Fix defer cache flush for 610

- Support kernel non-aligned access

- Fix 610 vipt cache flush mechanism

- Fix add zero_fp fixup perf backtrace panic

- Move static keyword to the front of declaration

- Fix csky_pmu.max_period assignment

- Use generic free_initrd_mem()

- entry: Remove unneeded need_resched() loop"

* tag 'csky-for-linus-5.4-rc1' of git://github.com/c-sky/csky-linux:
csky: Move static keyword to the front of declaration
csky: entry: Remove unneeded need_resched() loop
csky: Fixup csky_pmu.max_period assignment
csky: Fixup add zero_fp fixup perf backtrace panic
csky: Use generic free_initrd_mem()
csky: Fixup 610 vipt cache flush mechanism
csky: Support kernel non-aligned access
csky: Fixup defer cache flush for 610
csky: Fixup arch_get_unmapped_area() implementation
csky: Fixup ioremap function losing
csky: Optimize arch_sync_dma_for_cpu/device with dma_inv_range
csky/dma: Fixup cache_op failed when cross memory ZONEs
csky: Fixup dma_alloc_coherent with PAGE_SO attribute
csky: Fixup mb() synchronization problem

+292 -213
+45 -17
arch/csky/abiv1/alignment.c
··· 5 5 #include <linux/uaccess.h> 6 6 #include <linux/ptrace.h> 7 7 8 - static int align_enable = 1; 9 - static int align_count; 8 + static int align_kern_enable = 1; 9 + static int align_usr_enable = 1; 10 + static int align_kern_count = 0; 11 + static int align_usr_count = 0; 10 12 11 13 static inline uint32_t get_ptreg(struct pt_regs *regs, uint32_t rx) 12 14 { ··· 33 31 { 34 32 uint32_t val; 35 33 int err; 36 - 37 - if (!access_ok((void *)addr, 1)) 38 - return 1; 39 34 40 35 asm volatile ( 41 36 "movi %0, 0\n" ··· 65 66 static int stb_asm(uint32_t addr, uint32_t val) 66 67 { 67 68 int err; 68 - 69 - if (!access_ok((void *)addr, 1)) 70 - return 1; 71 69 72 70 asm volatile ( 73 71 "movi %0, 0\n" ··· 199 203 if (stb_asm(addr, byte3)) 200 204 return 1; 201 205 202 - align_count++; 203 - 204 206 return 0; 205 207 } 206 208 ··· 220 226 uint32_t addr = 0; 221 227 222 228 if (!user_mode(regs)) 229 + goto kernel_area; 230 + 231 + if (!align_usr_enable) { 232 + pr_err("%s user disabled.\n", __func__); 223 233 goto bad_area; 234 + } 235 + 236 + align_usr_count++; 224 237 225 238 ret = get_user(tmp, (uint16_t *)instruction_pointer(regs)); 226 239 if (ret) { ··· 235 234 goto bad_area; 236 235 } 237 236 237 + goto good_area; 238 + 239 + kernel_area: 240 + if (!align_kern_enable) { 241 + pr_err("%s kernel disabled.\n", __func__); 242 + goto bad_area; 243 + } 244 + 245 + align_kern_count++; 246 + 247 + tmp = *(uint16_t *)instruction_pointer(regs); 248 + 249 + good_area: 238 250 opcode = (uint32_t)tmp; 239 251 240 252 rx = opcode & 0xf; ··· 300 286 force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr); 301 287 } 302 288 303 - static struct ctl_table alignment_tbl[4] = { 289 + static struct ctl_table alignment_tbl[5] = { 304 290 { 305 - .procname = "enable", 306 - .data = &align_enable, 307 - .maxlen = sizeof(align_enable), 291 + .procname = "kernel_enable", 292 + .data = &align_kern_enable, 293 + .maxlen = sizeof(align_kern_enable), 308 294 .mode = 0666, 309 295 .proc_handler = &proc_dointvec 310 296 }, 311 297 { 312 - .procname = "count", 313 - .data = &align_count, 314 - .maxlen = sizeof(align_count), 298 + .procname = "user_enable", 299 + .data = &align_usr_enable, 300 + .maxlen = sizeof(align_usr_enable), 301 + .mode = 0666, 302 + .proc_handler = &proc_dointvec 303 + }, 304 + { 305 + .procname = "kernel_count", 306 + .data = &align_kern_count, 307 + .maxlen = sizeof(align_kern_count), 308 + .mode = 0666, 309 + .proc_handler = &proc_dointvec 310 + }, 311 + { 312 + .procname = "user_count", 313 + .data = &align_usr_count, 314 + .maxlen = sizeof(align_usr_count), 315 315 .mode = 0666, 316 316 .proc_handler = &proc_dointvec 317 317 },
+47 -23
arch/csky/abiv1/cacheflush.c
··· 11 11 #include <asm/cacheflush.h> 12 12 #include <asm/cachectl.h> 13 13 14 + #define PG_dcache_clean PG_arch_1 15 + 14 16 void flush_dcache_page(struct page *page) 15 17 { 16 - struct address_space *mapping = page_mapping(page); 17 - unsigned long addr; 18 + struct address_space *mapping; 18 19 19 - if (mapping && !mapping_mapped(mapping)) { 20 - set_bit(PG_arch_1, &(page)->flags); 20 + if (page == ZERO_PAGE(0)) 21 21 return; 22 + 23 + mapping = page_mapping_file(page); 24 + 25 + if (mapping && !page_mapcount(page)) 26 + clear_bit(PG_dcache_clean, &page->flags); 27 + else { 28 + dcache_wbinv_all(); 29 + if (mapping) 30 + icache_inv_all(); 31 + set_bit(PG_dcache_clean, &page->flags); 22 32 } 23 - 24 - /* 25 - * We could delay the flush for the !page_mapping case too. But that 26 - * case is for exec env/arg pages and those are %99 certainly going to 27 - * get faulted into the tlb (and thus flushed) anyways. 28 - */ 29 - addr = (unsigned long) page_address(page); 30 - dcache_wb_range(addr, addr + PAGE_SIZE); 31 33 } 34 + EXPORT_SYMBOL(flush_dcache_page); 32 35 33 - void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 34 - pte_t *pte) 36 + void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, 37 + pte_t *ptep) 35 38 { 36 - unsigned long addr; 39 + unsigned long pfn = pte_pfn(*ptep); 37 40 struct page *page; 38 - unsigned long pfn; 39 41 40 - pfn = pte_pfn(*pte); 41 - if (unlikely(!pfn_valid(pfn))) 42 + if (!pfn_valid(pfn)) 42 43 return; 43 44 44 45 page = pfn_to_page(pfn); 45 - addr = (unsigned long) page_address(page); 46 + if (page == ZERO_PAGE(0)) 47 + return; 46 48 47 - if (vma->vm_flags & VM_EXEC || 48 - pages_do_alias(addr, address & PAGE_MASK)) 49 - cache_wbinv_all(); 49 + if (!test_and_set_bit(PG_dcache_clean, &page->flags)) 50 + dcache_wbinv_all(); 50 51 51 - clear_bit(PG_arch_1, &(page)->flags); 52 + if (page_mapping_file(page)) { 53 + if (vma->vm_flags & VM_EXEC) 54 + icache_inv_all(); 55 + } 56 + } 57 + 58 + void flush_kernel_dcache_page(struct page *page) 59 + { 60 + struct address_space *mapping; 61 + 62 + mapping = page_mapping_file(page); 63 + 64 + if (!mapping || mapping_mapped(mapping)) 65 + dcache_wbinv_all(); 66 + } 67 + EXPORT_SYMBOL(flush_kernel_dcache_page); 68 + 69 + void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 70 + unsigned long end) 71 + { 72 + dcache_wbinv_all(); 73 + 74 + if (vma->vm_flags & VM_EXEC) 75 + icache_inv_all(); 52 76 }
+31 -14
arch/csky/abiv1/inc/abi/cacheflush.h
··· 4 4 #ifndef __ABI_CSKY_CACHEFLUSH_H 5 5 #define __ABI_CSKY_CACHEFLUSH_H 6 6 7 - #include <linux/compiler.h> 7 + #include <linux/mm.h> 8 8 #include <asm/string.h> 9 9 #include <asm/cache.h> 10 10 11 11 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 12 12 extern void flush_dcache_page(struct page *); 13 13 14 - #define flush_cache_mm(mm) cache_wbinv_all() 14 + #define flush_cache_mm(mm) dcache_wbinv_all() 15 15 #define flush_cache_page(vma, page, pfn) cache_wbinv_all() 16 16 #define flush_cache_dup_mm(mm) cache_wbinv_all() 17 + 18 + #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 19 + extern void flush_kernel_dcache_page(struct page *); 20 + 21 + #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) 22 + #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) 23 + 24 + static inline void flush_kernel_vmap_range(void *addr, int size) 25 + { 26 + dcache_wbinv_all(); 27 + } 28 + static inline void invalidate_kernel_vmap_range(void *addr, int size) 29 + { 30 + dcache_wbinv_all(); 31 + } 32 + 33 + #define ARCH_HAS_FLUSH_ANON_PAGE 34 + static inline void flush_anon_page(struct vm_area_struct *vma, 35 + struct page *page, unsigned long vmaddr) 36 + { 37 + if (PageAnon(page)) 38 + cache_wbinv_all(); 39 + } 17 40 18 41 /* 19 42 * if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken. 20 43 * Use cache_wbinv_all() here and need to be improved in future. 21 44 */ 22 - #define flush_cache_range(vma, start, end) cache_wbinv_all() 23 - #define flush_cache_vmap(start, end) cache_wbinv_range(start, end) 24 - #define flush_cache_vunmap(start, end) cache_wbinv_range(start, end) 45 + extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 46 + #define flush_cache_vmap(start, end) cache_wbinv_all() 47 + #define flush_cache_vunmap(start, end) cache_wbinv_all() 25 48 26 - #define flush_icache_page(vma, page) cache_wbinv_all() 49 + #define flush_icache_page(vma, page) do {} while (0); 27 50 #define flush_icache_range(start, end) cache_wbinv_range(start, end) 28 51 29 - #define flush_icache_user_range(vma, pg, adr, len) \ 30 - cache_wbinv_range(adr, adr + len) 52 + #define flush_icache_user_range(vma,page,addr,len) \ 53 + flush_dcache_page(page) 31 54 32 55 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 33 56 do { \ 34 - cache_wbinv_all(); \ 35 57 memcpy(dst, src, len); \ 36 - cache_wbinv_all(); \ 37 58 } while (0) 38 59 39 60 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 40 61 do { \ 41 - cache_wbinv_all(); \ 42 62 memcpy(dst, src, len); \ 43 63 cache_wbinv_all(); \ 44 64 } while (0) 45 - 46 - #define flush_dcache_mmap_lock(mapping) do {} while (0) 47 - #define flush_dcache_mmap_unlock(mapping) do {} while (0) 48 65 49 66 #endif /* __ABI_CSKY_CACHEFLUSH_H */
+3 -2
arch/csky/abiv1/inc/abi/page.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 3 4 - extern unsigned long shm_align_mask; 4 + #include <asm/shmparam.h> 5 + 5 6 extern void flush_dcache_page(struct page *page); 6 7 7 8 static inline unsigned long pages_do_alias(unsigned long addr1, 8 9 unsigned long addr2) 9 10 { 10 - return (addr1 ^ addr2) & shm_align_mask; 11 + return (addr1 ^ addr2) & (SHMLBA-1); 11 12 } 12 13 13 14 static inline void clear_user_page(void *addr, unsigned long vaddr,
+41 -36
arch/csky/abiv1/mmap.c
··· 9 9 #include <linux/random.h> 10 10 #include <linux/io.h> 11 11 12 - unsigned long shm_align_mask = (0x4000 >> 1) - 1; /* Sane caches */ 12 + #define COLOUR_ALIGN(addr,pgoff) \ 13 + ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ 14 + (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) 13 15 14 - #define COLOUR_ALIGN(addr, pgoff) \ 15 - ((((addr) + shm_align_mask) & ~shm_align_mask) + \ 16 - (((pgoff) << PAGE_SHIFT) & shm_align_mask)) 17 - 18 - unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, 16 + /* 17 + * We need to ensure that shared mappings are correctly aligned to 18 + * avoid aliasing issues with VIPT caches. We need to ensure that 19 + * a specific page of an object is always mapped at a multiple of 20 + * SHMLBA bytes. 21 + * 22 + * We unconditionally provide this function for all cases. 23 + */ 24 + unsigned long 25 + arch_get_unmapped_area(struct file *filp, unsigned long addr, 19 26 unsigned long len, unsigned long pgoff, unsigned long flags) 20 27 { 21 - struct vm_area_struct *vmm; 22 - int do_color_align; 28 + struct mm_struct *mm = current->mm; 29 + struct vm_area_struct *vma; 30 + int do_align = 0; 31 + struct vm_unmapped_area_info info; 23 32 33 + /* 34 + * We only need to do colour alignment if either the I or D 35 + * caches alias. 36 + */ 37 + do_align = filp || (flags & MAP_SHARED); 38 + 39 + /* 40 + * We enforce the MAP_FIXED case. 41 + */ 24 42 if (flags & MAP_FIXED) { 25 - /* 26 - * We do not accept a shared mapping if it would violate 27 - * cache aliasing constraints. 28 - */ 29 - if ((flags & MAP_SHARED) && 30 - ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) 43 + if (flags & MAP_SHARED && 44 + (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) 31 45 return -EINVAL; 32 46 return addr; 33 47 } 34 48 35 49 if (len > TASK_SIZE) 36 50 return -ENOMEM; 37 - do_color_align = 0; 38 - if (filp || (flags & MAP_SHARED)) 39 - do_color_align = 1; 51 + 40 52 if (addr) { 41 - if (do_color_align) 53 + if (do_align) 42 54 addr = COLOUR_ALIGN(addr, pgoff); 43 55 else 44 56 addr = PAGE_ALIGN(addr); 45 - vmm = find_vma(current->mm, addr); 46 - if (TASK_SIZE - len >= addr && 47 - (!vmm || addr + len <= vmm->vm_start)) 48 - return addr; 49 - } 50 - addr = TASK_UNMAPPED_BASE; 51 - if (do_color_align) 52 - addr = COLOUR_ALIGN(addr, pgoff); 53 - else 54 - addr = PAGE_ALIGN(addr); 55 57 56 - for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { 57 - /* At this point: (!vmm || addr < vmm->vm_end). */ 58 - if (TASK_SIZE - len < addr) 59 - return -ENOMEM; 60 - if (!vmm || addr + len <= vmm->vm_start) 58 + vma = find_vma(mm, addr); 59 + if (TASK_SIZE - len >= addr && 60 + (!vma || addr + len <= vm_start_gap(vma))) 61 61 return addr; 62 - addr = vmm->vm_end; 63 - if (do_color_align) 64 - addr = COLOUR_ALIGN(addr, pgoff); 65 62 } 63 + 64 + info.flags = 0; 65 + info.length = len; 66 + info.low_limit = mm->mmap_base; 67 + info.high_limit = TASK_SIZE; 68 + info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; 69 + info.align_offset = pgoff << PAGE_SHIFT; 70 + return vm_unmapped_area(&info); 66 71 }
+7 -8
arch/csky/include/asm/barrier.h
··· 9 9 #define nop() asm volatile ("nop\n":::"memory") 10 10 11 11 /* 12 - * sync: completion barrier 13 - * sync.s: completion barrier and shareable to other cores 14 - * sync.i: completion barrier with flush cpu pipeline 15 - * sync.is: completion barrier with flush cpu pipeline and shareable to 16 - * other cores 12 + * sync: completion barrier, all sync.xx instructions 13 + * guarantee the last response recieved by bus transaction 14 + * made by ld/st instructions before sync.s 15 + * sync.s: inherit from sync, but also shareable to other cores 16 + * sync.i: inherit from sync, but also flush cpu pipeline 17 + * sync.is: the same with sync.i + sync.s 17 18 * 18 19 * bar.brwarw: ordering barrier for all load/store instructions before it 19 20 * bar.brwarws: ordering barrier for all load/store instructions before it ··· 28 27 */ 29 28 30 29 #ifdef CONFIG_CPU_HAS_CACHEV2 31 - #define mb() asm volatile ("bar.brwarw\n":::"memory") 32 - #define rmb() asm volatile ("bar.brar\n":::"memory") 33 - #define wmb() asm volatile ("bar.bwaw\n":::"memory") 30 + #define mb() asm volatile ("sync.s\n":::"memory") 34 31 35 32 #ifdef CONFIG_SMP 36 33 #define __smp_mb() asm volatile ("bar.brwarws\n":::"memory")
+1
arch/csky/include/asm/cache.h
··· 24 24 void cache_wbinv_all(void); 25 25 26 26 void dma_wbinv_range(unsigned long start, unsigned long end); 27 + void dma_inv_range(unsigned long start, unsigned long end); 27 28 void dma_wb_range(unsigned long start, unsigned long end); 28 29 29 30 #endif
+12 -11
arch/csky/include/asm/io.h
··· 4 4 #ifndef __ASM_CSKY_IO_H 5 5 #define __ASM_CSKY_IO_H 6 6 7 - #include <abi/pgtable-bits.h> 7 + #include <asm/pgtable.h> 8 8 #include <linux/types.h> 9 9 #include <linux/version.h> 10 - 11 - extern void __iomem *ioremap(phys_addr_t offset, size_t size); 12 - 13 - extern void iounmap(void *addr); 14 - 15 - extern int remap_area_pages(unsigned long address, phys_addr_t phys_addr, 16 - size_t size, unsigned long flags); 17 10 18 11 /* 19 12 * I/O memory access primitives. Reads are ordered relative to any ··· 33 40 #define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); mb(); }) 34 41 #endif 35 42 36 - #define ioremap_nocache(phy, sz) ioremap(phy, sz) 37 - #define ioremap_wc ioremap_nocache 38 - #define ioremap_wt ioremap_nocache 43 + /* 44 + * I/O memory mapping functions. 45 + */ 46 + extern void __iomem *ioremap_cache(phys_addr_t addr, size_t size); 47 + extern void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot); 48 + extern void iounmap(void *addr); 49 + 50 + #define ioremap(addr, size) __ioremap((addr), (size), pgprot_noncached(PAGE_KERNEL)) 51 + #define ioremap_wc(addr, size) __ioremap((addr), (size), pgprot_writecombine(PAGE_KERNEL)) 52 + #define ioremap_nocache(addr, size) ioremap((addr), (size)) 53 + #define ioremap_cache ioremap_cache 39 54 40 55 #include <asm-generic/io.h> 41 56
+10
arch/csky/include/asm/pgtable.h
··· 258 258 { 259 259 unsigned long prot = pgprot_val(_prot); 260 260 261 + prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED | _PAGE_SO; 262 + 263 + return __pgprot(prot); 264 + } 265 + 266 + #define pgprot_writecombine pgprot_writecombine 267 + static inline pgprot_t pgprot_writecombine(pgprot_t _prot) 268 + { 269 + unsigned long prot = pgprot_val(_prot); 270 + 261 271 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; 262 272 263 273 return __pgprot(prot);
+30 -24
arch/csky/kernel/entry.S
··· 17 17 #define PTE_INDX_SHIFT 10 18 18 #define _PGDIR_SHIFT 22 19 19 20 + .macro zero_fp 21 + #ifdef CONFIG_STACKTRACE 22 + movi r8, 0 23 + #endif 24 + .endm 25 + 20 26 .macro tlbop_begin name, val0, val1, val2 21 27 ENTRY(csky_\name) 22 28 mtcr a3, ss2 ··· 102 96 SAVE_ALL 0 103 97 .endm 104 98 .macro tlbop_end is_write 99 + zero_fp 105 100 RD_MEH a2 106 101 psrset ee, ie 107 102 mov a0, sp ··· 127 120 128 121 ENTRY(csky_systemcall) 129 122 SAVE_ALL TRAP0_SIZE 123 + zero_fp 130 124 131 125 psrset ee, ie 132 126 ··· 144 136 mov r9, sp 145 137 bmaski r10, THREAD_SHIFT 146 138 andn r9, r10 147 - ldw r8, (r9, TINFO_FLAGS) 148 - ANDI_R3 r8, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT) 149 - cmpnei r8, 0 139 + ldw r12, (r9, TINFO_FLAGS) 140 + ANDI_R3 r12, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT) 141 + cmpnei r12, 0 150 142 bt csky_syscall_trace 151 143 #if defined(__CSKYABIV2__) 152 144 subi sp, 8 ··· 188 180 189 181 ENTRY(ret_from_kernel_thread) 190 182 jbsr schedule_tail 191 - mov a0, r8 183 + mov a0, r10 192 184 jsr r9 193 185 jbsr ret_from_exception 194 186 ··· 197 189 mov r9, sp 198 190 bmaski r10, THREAD_SHIFT 199 191 andn r9, r10 200 - ldw r8, (r9, TINFO_FLAGS) 201 - ANDI_R3 r8, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT) 202 - cmpnei r8, 0 192 + ldw r12, (r9, TINFO_FLAGS) 193 + ANDI_R3 r12, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT) 194 + cmpnei r12, 0 203 195 bf ret_from_exception 204 196 mov a0, sp /* sp = pt_regs pointer */ 205 197 jbsr syscall_trace_exit ··· 217 209 bmaski r10, THREAD_SHIFT 218 210 andn r9, r10 219 211 220 - ldw r8, (r9, TINFO_FLAGS) 221 - andi r8, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED) 222 - cmpnei r8, 0 212 + ldw r12, (r9, TINFO_FLAGS) 213 + andi r12, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED) 214 + cmpnei r12, 0 223 215 bt exit_work 224 216 1: 225 217 RESTORE_ALL ··· 228 220 lrw syscallid, ret_from_exception 229 221 mov lr, syscallid 230 222 231 - btsti r8, TIF_NEED_RESCHED 223 + btsti r12, TIF_NEED_RESCHED 232 224 bt work_resched 233 225 234 226 mov a0, sp 235 - mov a1, r8 227 + mov a1, r12 236 228 jmpi do_notify_resume 237 229 238 230 work_resched: ··· 240 232 241 233 ENTRY(csky_trap) 242 234 SAVE_ALL 0 235 + zero_fp 243 236 psrset ee 244 237 mov a0, sp /* Push Stack pointer arg */ 245 238 jbsr trap_c /* Call C-level trap handler */ ··· 274 265 275 266 ENTRY(csky_irq) 276 267 SAVE_ALL 0 268 + zero_fp 277 269 psrset ee 278 270 279 271 #ifdef CONFIG_PREEMPT ··· 286 276 * Get task_struct->stack.preempt_count for current, 287 277 * and increase 1. 288 278 */ 289 - ldw r8, (r9, TINFO_PREEMPT) 290 - addi r8, 1 291 - stw r8, (r9, TINFO_PREEMPT) 279 + ldw r12, (r9, TINFO_PREEMPT) 280 + addi r12, 1 281 + stw r12, (r9, TINFO_PREEMPT) 292 282 #endif 293 283 294 284 mov a0, sp 295 285 jbsr csky_do_IRQ 296 286 297 287 #ifdef CONFIG_PREEMPT 298 - subi r8, 1 299 - stw r8, (r9, TINFO_PREEMPT) 300 - cmpnei r8, 0 288 + subi r12, 1 289 + stw r12, (r9, TINFO_PREEMPT) 290 + cmpnei r12, 0 301 291 bt 2f 302 - ldw r8, (r9, TINFO_FLAGS) 303 - btsti r8, TIF_NEED_RESCHED 292 + ldw r12, (r9, TINFO_FLAGS) 293 + btsti r12, TIF_NEED_RESCHED 304 294 bf 2f 305 - 1: 306 295 jbsr preempt_schedule_irq /* irq en/disable is done inside */ 307 - ldw r7, (r9, TINFO_FLAGS) /* get new tasks TI_FLAGS */ 308 - btsti r7, TIF_NEED_RESCHED 309 - bt 1b /* go again */ 310 296 #endif 311 297 2: 312 298 jmpi ret_from_exception
+2 -2
arch/csky/kernel/perf_event.c
··· 1306 1306 &csky_pmu.count_width)) { 1307 1307 csky_pmu.count_width = DEFAULT_COUNT_WIDTH; 1308 1308 } 1309 - csky_pmu.max_period = BIT(csky_pmu.count_width) - 1; 1309 + csky_pmu.max_period = BIT_ULL(csky_pmu.count_width) - 1; 1310 1310 1311 1311 csky_pmu.plat_device = pdev; 1312 1312 ··· 1337 1337 return ret; 1338 1338 } 1339 1339 1340 - const static struct of_device_id csky_pmu_of_device_ids[] = { 1340 + static const struct of_device_id csky_pmu_of_device_ids[] = { 1341 1341 {.compatible = "csky,csky-pmu"}, 1342 1342 {}, 1343 1343 };
+1 -1
arch/csky/kernel/process.c
··· 55 55 if (unlikely(p->flags & PF_KTHREAD)) { 56 56 memset(childregs, 0, sizeof(struct pt_regs)); 57 57 childstack->r15 = (unsigned long) ret_from_kernel_thread; 58 - childstack->r8 = kthread_arg; 58 + childstack->r10 = kthread_arg; 59 59 childstack->r9 = usp; 60 60 childregs->sr = mfcr("psr"); 61 61 } else {
+6 -1
arch/csky/mm/cachev1.c
··· 120 120 cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1); 121 121 } 122 122 123 + void dma_inv_range(unsigned long start, unsigned long end) 124 + { 125 + cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1); 126 + } 127 + 123 128 void dma_wb_range(unsigned long start, unsigned long end) 124 129 { 125 - cache_op_range(start, end, DATA_CACHE|CACHE_INV, 1); 130 + cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1); 126 131 }
+10 -1
arch/csky/mm/cachev2.c
··· 69 69 sync_is(); 70 70 } 71 71 72 + void dma_inv_range(unsigned long start, unsigned long end) 73 + { 74 + unsigned long i = start & ~(L1_CACHE_BYTES - 1); 75 + 76 + for (; i < end; i += L1_CACHE_BYTES) 77 + asm volatile("dcache.iva %0\n"::"r"(i):"memory"); 78 + sync_is(); 79 + } 80 + 72 81 void dma_wb_range(unsigned long start, unsigned long end) 73 82 { 74 83 unsigned long i = start & ~(L1_CACHE_BYTES - 1); 75 84 76 85 for (; i < end; i += L1_CACHE_BYTES) 77 - asm volatile("dcache.civa %0\n"::"r"(i):"memory"); 86 + asm volatile("dcache.cva %0\n"::"r"(i):"memory"); 78 87 sync_is(); 79 88 }
+28 -48
arch/csky/mm/dma-mapping.c
··· 14 14 #include <linux/version.h> 15 15 #include <asm/cache.h> 16 16 17 - void arch_dma_prep_coherent(struct page *page, size_t size) 18 - { 19 - if (PageHighMem(page)) { 20 - unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 21 - 22 - do { 23 - void *ptr = kmap_atomic(page); 24 - size_t _size = (size < PAGE_SIZE) ? size : PAGE_SIZE; 25 - 26 - memset(ptr, 0, _size); 27 - dma_wbinv_range((unsigned long)ptr, 28 - (unsigned long)ptr + _size); 29 - 30 - kunmap_atomic(ptr); 31 - 32 - page++; 33 - size -= PAGE_SIZE; 34 - count--; 35 - } while (count); 36 - } else { 37 - void *ptr = page_address(page); 38 - 39 - memset(ptr, 0, size); 40 - dma_wbinv_range((unsigned long)ptr, (unsigned long)ptr + size); 41 - } 42 - } 43 - 44 17 static inline void cache_op(phys_addr_t paddr, size_t size, 45 18 void (*fn)(unsigned long start, unsigned long end)) 46 19 { 47 - struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); 48 - unsigned int offset = paddr & ~PAGE_MASK; 49 - size_t left = size; 50 - unsigned long start; 20 + struct page *page = phys_to_page(paddr); 21 + void *start = __va(page_to_phys(page)); 22 + unsigned long offset = offset_in_page(paddr); 23 + size_t left = size; 51 24 52 25 do { 53 26 size_t len = left; 54 27 28 + if (offset + len > PAGE_SIZE) 29 + len = PAGE_SIZE - offset; 30 + 55 31 if (PageHighMem(page)) { 56 - void *addr; 32 + start = kmap_atomic(page); 57 33 58 - if (offset + len > PAGE_SIZE) { 59 - if (offset >= PAGE_SIZE) { 60 - page += offset >> PAGE_SHIFT; 61 - offset &= ~PAGE_MASK; 62 - } 63 - len = PAGE_SIZE - offset; 64 - } 34 + fn((unsigned long)start + offset, 35 + (unsigned long)start + offset + len); 65 36 66 - addr = kmap_atomic(page); 67 - start = (unsigned long)(addr + offset); 68 - fn(start, start + len); 69 - kunmap_atomic(addr); 37 + kunmap_atomic(start); 70 38 } else { 71 - start = (unsigned long)phys_to_virt(paddr); 72 - fn(start, start + size); 39 + fn((unsigned long)start + offset, 40 + (unsigned long)start + offset + len); 73 41 } 74 42 offset = 0; 43 + 75 44 page++; 45 + start += PAGE_SIZE; 76 46 left -= len; 77 47 } while (left); 48 + } 49 + 50 + static void dma_wbinv_set_zero_range(unsigned long start, unsigned long end) 51 + { 52 + memset((void *)start, 0, end - start); 53 + dma_wbinv_range(start, end); 54 + } 55 + 56 + void arch_dma_prep_coherent(struct page *page, size_t size) 57 + { 58 + cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range); 78 59 } 79 60 80 61 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, ··· 79 98 { 80 99 switch (dir) { 81 100 case DMA_TO_DEVICE: 82 - cache_op(paddr, size, dma_wb_range); 83 - break; 101 + return; 84 102 case DMA_FROM_DEVICE: 85 103 case DMA_BIDIRECTIONAL: 86 - cache_op(paddr, size, dma_wbinv_range); 104 + cache_op(paddr, size, dma_inv_range); 87 105 break; 88 106 default: 89 107 BUG();
-16
arch/csky/mm/init.c
··· 60 60 mem_init_print_info(NULL); 61 61 } 62 62 63 - #ifdef CONFIG_BLK_DEV_INITRD 64 - void free_initrd_mem(unsigned long start, unsigned long end) 65 - { 66 - if (start < end) 67 - pr_info("Freeing initrd memory: %ldk freed\n", 68 - (end - start) >> 10); 69 - 70 - for (; start < end; start += PAGE_SIZE) { 71 - ClearPageReserved(virt_to_page(start)); 72 - init_page_count(virt_to_page(start)); 73 - free_page(start); 74 - totalram_pages_inc(); 75 - } 76 - } 77 - #endif 78 - 79 63 extern char __init_begin[], __init_end[]; 80 64 81 65 void free_initmem(void)
+18 -9
arch/csky/mm/ioremap.c
··· 8 8 9 9 #include <asm/pgtable.h> 10 10 11 - void __iomem *ioremap(phys_addr_t addr, size_t size) 11 + static void __iomem *__ioremap_caller(phys_addr_t addr, size_t size, 12 + pgprot_t prot, void *caller) 12 13 { 13 14 phys_addr_t last_addr; 14 15 unsigned long offset, vaddr; 15 16 struct vm_struct *area; 16 - pgprot_t prot; 17 17 18 18 last_addr = addr + size - 1; 19 19 if (!size || last_addr < addr) ··· 23 23 addr &= PAGE_MASK; 24 24 size = PAGE_ALIGN(size + offset); 25 25 26 - area = get_vm_area_caller(size, VM_ALLOC, __builtin_return_address(0)); 26 + area = get_vm_area_caller(size, VM_IOREMAP, caller); 27 27 if (!area) 28 28 return NULL; 29 29 30 30 vaddr = (unsigned long)area->addr; 31 - 32 - prot = __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | 33 - _PAGE_GLOBAL | _CACHE_UNCACHED | _PAGE_SO); 34 31 35 32 if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) { 36 33 free_vm_area(area); ··· 36 39 37 40 return (void __iomem *)(vaddr + offset); 38 41 } 39 - EXPORT_SYMBOL(ioremap); 42 + 43 + void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot) 44 + { 45 + return __ioremap_caller(phys_addr, size, prot, 46 + __builtin_return_address(0)); 47 + } 48 + EXPORT_SYMBOL(__ioremap); 49 + 50 + void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size) 51 + { 52 + return __ioremap_caller(phys_addr, size, PAGE_KERNEL, 53 + __builtin_return_address(0)); 54 + } 55 + EXPORT_SYMBOL(ioremap_cache); 40 56 41 57 void iounmap(void __iomem *addr) 42 58 { ··· 61 51 unsigned long size, pgprot_t vma_prot) 62 52 { 63 53 if (!pfn_valid(pfn)) { 64 - vma_prot.pgprot |= _PAGE_SO; 65 54 return pgprot_noncached(vma_prot); 66 55 } else if (file->f_flags & O_SYNC) { 67 - return pgprot_noncached(vma_prot); 56 + return pgprot_writecombine(vma_prot); 68 57 } 69 58 70 59 return vma_prot;