Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'arc-v3.10-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc

Pull second set of arc arch updates from Vineet Gupta:
"Aliasing VIPT dcache support for ARC

I'm satisified with testing, specially with fuse which has
historically given grief to VIPT arches (ARM/PARISC...)"

* tag 'arc-v3.10-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc:
ARC: [TB10x] Remove GENERIC_GPIO
ARC: [mm] Aliasing VIPT dcache support 4/4
ARC: [mm] Aliasing VIPT dcache support 3/4
ARC: [mm] Aliasing VIPT dcache support 2/4
ARC: [mm] Aliasing VIPT dcache support 1/4
ARC: [mm] refactor the core (i|d)cache line ops loops
ARC: [mm] serious bug in vaddr based icache flush

+377 -74
+4
arch/arc/Kconfig
··· 182 182 Note that Global I/D ENABLE + Per Page DISABLE works but corollary 183 183 Global DISABLE + Per Page ENABLE won't work 184 184 185 + config ARC_CACHE_VIPT_ALIASING 186 + bool "Support VIPT Aliasing D$" 187 + default n 188 + 185 189 endif #ARC_CACHE 186 190 187 191 config ARC_HAS_ICCM
-1
arch/arc/include/asm/Kbuild
··· 32 32 generic-y += scatterlist.h 33 33 generic-y += sembuf.h 34 34 generic-y += shmbuf.h 35 - generic-y += shmparam.h 36 35 generic-y += siginfo.h 37 36 generic-y += socket.h 38 37 generic-y += sockios.h
-3
arch/arc/include/asm/cache.h
··· 55 55 : "r"(data), "r"(ptr)); \ 56 56 }) 57 57 58 - /* used to give SHMLBA a value to avoid Cache Aliasing */ 59 - extern unsigned int ARC_shmlba; 60 - 61 58 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES 62 59 63 60 /*
+49 -9
arch/arc/include/asm/cacheflush.h
··· 19 19 #define _ASM_CACHEFLUSH_H 20 20 21 21 #include <linux/mm.h> 22 + #include <asm/shmparam.h> 22 23 23 24 /* 24 25 * Semantically we need this because icache doesn't snoop dcache/dma. ··· 34 33 void flush_icache_range(unsigned long start, unsigned long end); 35 34 void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len); 36 35 void __inv_icache_page(unsigned long paddr, unsigned long vaddr); 37 - void __flush_dcache_page(unsigned long paddr); 36 + void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr); 37 + #define __flush_dcache_page(p, v) \ 38 + ___flush_dcache_page((unsigned long)p, (unsigned long)v) 38 39 39 40 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 40 41 ··· 53 50 #define flush_cache_vmap(start, end) flush_cache_all() 54 51 #define flush_cache_vunmap(start, end) flush_cache_all() 55 52 56 - /* 57 - * VM callbacks when entire/range of user-space V-P mappings are 58 - * torn-down/get-invalidated 59 - * 60 - * Currently we don't support D$ aliasing configs for our VIPT caches 61 - * NOPS for VIPT Cache with non-aliasing D$ configurations only 62 - */ 63 - #define flush_cache_dup_mm(mm) /* called on fork */ 53 + #define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */ 54 + 55 + #ifndef CONFIG_ARC_CACHE_VIPT_ALIASING 56 + 64 57 #define flush_cache_mm(mm) /* called on munmap/exit */ 65 58 #define flush_cache_range(mm, u_vstart, u_vend) 66 59 #define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */ 60 + 61 + #else /* VIPT aliasing dcache */ 62 + 63 + /* To clear out stale userspace mappings */ 64 + void flush_cache_mm(struct mm_struct *mm); 65 + void flush_cache_range(struct vm_area_struct *vma, 66 + unsigned long start,unsigned long end); 67 + void flush_cache_page(struct vm_area_struct *vma, 68 + unsigned long user_addr, unsigned long page); 69 + 70 + /* 71 + * To make sure that userspace mapping is flushed to memory before 72 + * get_user_pages() uses a kernel mapping to access the page 73 + */ 74 + #define ARCH_HAS_FLUSH_ANON_PAGE 75 + void flush_anon_page(struct vm_area_struct *vma, 76 + struct page *page, unsigned long u_vaddr); 77 + 78 + #endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */ 79 + 80 + /* 81 + * Simple wrapper over config option 82 + * Bootup code ensures that hardware matches kernel configuration 83 + */ 84 + static inline int cache_is_vipt_aliasing(void) 85 + { 86 + #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING 87 + return 1; 88 + #else 89 + return 0; 90 + #endif 91 + } 92 + 93 + #define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 3) 94 + 95 + /* 96 + * checks if two addresses (after page aligning) index into same cache set 97 + */ 98 + #define addr_not_cache_congruent(addr1, addr2) \ 99 + cache_is_vipt_aliasing() ? \ 100 + (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0 \ 67 101 68 102 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 69 103 do { \
+15 -1
arch/arc/include/asm/page.h
··· 16 16 #define get_user_page(vaddr) __get_free_page(GFP_KERNEL) 17 17 #define free_user_page(page, addr) free_page(addr) 18 18 19 - /* TBD: for now don't worry about VIPT D$ aliasing */ 20 19 #define clear_page(paddr) memset((paddr), 0, PAGE_SIZE) 21 20 #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) 22 21 22 + #ifndef CONFIG_ARC_CACHE_VIPT_ALIASING 23 + 23 24 #define clear_user_page(addr, vaddr, pg) clear_page(addr) 24 25 #define copy_user_page(vto, vfrom, vaddr, pg) copy_page(vto, vfrom) 26 + 27 + #else /* VIPT aliasing dcache */ 28 + 29 + struct vm_area_struct; 30 + struct page; 31 + 32 + #define __HAVE_ARCH_COPY_USER_HIGHPAGE 33 + 34 + void copy_user_highpage(struct page *to, struct page *from, 35 + unsigned long u_vaddr, struct vm_area_struct *vma); 36 + void clear_user_page(void *to, unsigned long u_vaddr, struct page *page); 37 + 38 + #endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */ 25 39 26 40 #undef STRICT_MM_TYPECHECKS 27 41
+3
arch/arc/include/asm/pgtable.h
··· 395 395 396 396 #include <asm-generic/pgtable.h> 397 397 398 + /* to cope with aliasing VIPT cache */ 399 + #define HAVE_ARCH_UNMAPPED_AREA 400 + 398 401 /* 399 402 * No page table caches to initialise 400 403 */
+18
arch/arc/include/asm/shmparam.h
··· 1 + /* 2 + * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + */ 8 + 9 + #ifndef __ARC_ASM_SHMPARAM_H 10 + #define __ARC_ASM_SHMPARAM_H 11 + 12 + /* Handle upto 2 cache bins */ 13 + #define SHMLBA (2 * PAGE_SIZE) 14 + 15 + /* Enforce SHMLBA in shmat */ 16 + #define __ARCH_FORCE_SHMLBA 17 + 18 + #endif
+9 -2
arch/arc/include/asm/tlb.h
··· 30 30 /* 31 31 * This pair is called at time of munmap/exit to flush cache and TLB entries 32 32 * for mappings being torn down. 33 - * 1) cache-flush part -implemented via tlb_start_vma( ) can be NOP (for now) 34 - * as we don't support aliasing configs in our VIPT D$. 33 + * 1) cache-flush part -implemented via tlb_start_vma( ) for VIPT aliasing D$ 35 34 * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range 36 35 * 37 36 * Note, read http://lkml.org/lkml/2004/1/15/6 38 37 */ 38 + #ifndef CONFIG_ARC_CACHE_VIPT_ALIASING 39 39 #define tlb_start_vma(tlb, vma) 40 + #else 41 + #define tlb_start_vma(tlb, vma) \ 42 + do { \ 43 + if (!tlb->fullmm) \ 44 + flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 45 + } while(0) 46 + #endif 40 47 41 48 #define tlb_end_vma(tlb, vma) \ 42 49 do { \
+1 -1
arch/arc/mm/Makefile
··· 7 7 # 8 8 9 9 obj-y := extable.o ioremap.o dma.o fault.o init.o 10 - obj-y += tlb.o tlbex.o cache_arc700.o 10 + obj-y += tlb.o tlbex.o cache_arc700.o mmap.o
+178 -43
arch/arc/mm/cache_arc700.c
··· 68 68 #include <linux/mmu_context.h> 69 69 #include <linux/syscalls.h> 70 70 #include <linux/uaccess.h> 71 + #include <linux/pagemap.h> 71 72 #include <asm/cacheflush.h> 72 73 #include <asm/cachectl.h> 73 74 #include <asm/setup.h> ··· 139 138 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; 140 139 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; 141 140 int way_pg_ratio = way_pg_ratio; 141 + int dcache_does_alias; 142 142 char str[256]; 143 143 144 144 printk(arc_cache_mumbojumbo(0, str, sizeof(str))); ··· 186 184 panic("Cache H/W doesn't match kernel Config"); 187 185 } 188 186 187 + dcache_does_alias = (dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE; 188 + 189 189 /* check for D-Cache aliasing */ 190 - if ((dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE) 191 - panic("D$ aliasing not handled right now\n"); 190 + if (dcache_does_alias && !cache_is_vipt_aliasing()) 191 + panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); 192 + else if (!dcache_does_alias && cache_is_vipt_aliasing()) 193 + panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n"); 192 194 #endif 193 195 194 196 /* Set the default Invalidate Mode to "simpy discard dirty lines" ··· 275 269 * Per Line Operation on D-Cache 276 270 * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete 277 271 * It's sole purpose is to help gcc generate ZOL 272 + * (aliasing VIPT dcache flushing needs both vaddr and paddr) 278 273 */ 279 - static inline void __dc_line_loop(unsigned long start, unsigned long sz, 280 - int aux_reg) 274 + static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr, 275 + unsigned long sz, const int aux_reg) 281 276 { 282 - int num_lines, slack; 277 + int num_lines; 283 278 284 279 /* Ensure we properly floor/ceil the non-line aligned/sized requests 285 - * and have @start - aligned to cache line and integral @num_lines. 280 + * and have @paddr - aligned to cache line and integral @num_lines. 286 281 * This however can be avoided for page sized since: 287 - * -@start will be cache-line aligned already (being page aligned) 282 + * -@paddr will be cache-line aligned already (being page aligned) 288 283 * -@sz will be integral multiple of line size (being page sized). 289 284 */ 290 285 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) { 291 - slack = start & ~DCACHE_LINE_MASK; 292 - sz += slack; 293 - start -= slack; 286 + sz += paddr & ~DCACHE_LINE_MASK; 287 + paddr &= DCACHE_LINE_MASK; 288 + vaddr &= DCACHE_LINE_MASK; 294 289 } 295 290 296 291 num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN); 292 + 293 + #if (CONFIG_ARC_MMU_VER <= 2) 294 + paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; 295 + #endif 297 296 298 297 while (num_lines-- > 0) { 299 298 #if (CONFIG_ARC_MMU_VER > 2) 300 299 /* 301 300 * Just as for I$, in MMU v3, D$ ops also require 302 301 * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops 303 - * But we pass phy addr for both. This works since Linux 304 - * doesn't support aliasing configs for D$, yet. 305 - * Thus paddr is enough to provide both tag and index. 306 302 */ 307 - write_aux_reg(ARC_REG_DC_PTAG, start); 303 + write_aux_reg(ARC_REG_DC_PTAG, paddr); 304 + 305 + write_aux_reg(aux_reg, vaddr); 306 + vaddr += ARC_DCACHE_LINE_LEN; 307 + #else 308 + /* paddr contains stuffed vaddrs bits */ 309 + write_aux_reg(aux_reg, paddr); 308 310 #endif 309 - write_aux_reg(aux_reg, start); 310 - start += ARC_DCACHE_LINE_LEN; 311 + paddr += ARC_DCACHE_LINE_LEN; 311 312 } 312 313 } 314 + 315 + /* For kernel mappings cache operation: index is same as paddr */ 316 + #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) 313 317 314 318 /* 315 319 * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback) 316 320 */ 317 - static inline void __dc_line_op(unsigned long start, unsigned long sz, 318 - const int cacheop) 321 + static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr, 322 + unsigned long sz, const int cacheop) 319 323 { 320 324 unsigned long flags, tmp = tmp; 321 325 int aux; ··· 348 332 else 349 333 aux = ARC_REG_DC_FLDL; 350 334 351 - __dc_line_loop(start, sz, aux); 335 + __dc_line_loop(paddr, vaddr, sz, aux); 352 336 353 337 if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */ 354 338 wait_for_flush(); ··· 363 347 #else 364 348 365 349 #define __dc_entire_op(cacheop) 366 - #define __dc_line_op(start, sz, cacheop) 350 + #define __dc_line_op(paddr, vaddr, sz, cacheop) 351 + #define __dc_line_op_k(paddr, sz, cacheop) 367 352 368 353 #endif /* CONFIG_ARC_HAS_DCACHE */ 369 354 ··· 416 399 /*********************************************************** 417 400 * Machine specific helper for per line I-Cache invalidate. 418 401 */ 419 - static void __ic_line_inv_vaddr(unsigned long phy_start, unsigned long vaddr, 402 + static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, 420 403 unsigned long sz) 421 404 { 422 405 unsigned long flags; 423 - int num_lines, slack; 424 - unsigned int addr; 406 + int num_lines; 425 407 426 408 /* 427 409 * Ensure we properly floor/ceil the non-line aligned/sized requests: 428 410 * However page sized flushes can be compile time optimised. 429 - * -@phy_start will be cache-line aligned already (being page aligned) 411 + * -@paddr will be cache-line aligned already (being page aligned) 430 412 * -@sz will be integral multiple of line size (being page sized). 431 413 */ 432 414 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) { 433 - slack = phy_start & ~ICACHE_LINE_MASK; 434 - sz += slack; 435 - phy_start -= slack; 415 + sz += paddr & ~ICACHE_LINE_MASK; 416 + paddr &= ICACHE_LINE_MASK; 417 + vaddr &= ICACHE_LINE_MASK; 436 418 } 437 419 438 420 num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN); 439 421 440 - #if (CONFIG_ARC_MMU_VER > 2) 441 - vaddr &= ~ICACHE_LINE_MASK; 442 - addr = phy_start; 443 - #else 422 + #if (CONFIG_ARC_MMU_VER <= 2) 444 423 /* bits 17:13 of vaddr go as bits 4:0 of paddr */ 445 - addr = phy_start | ((vaddr >> 13) & 0x1F); 424 + paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; 446 425 #endif 447 426 448 427 local_irq_save(flags); 449 428 while (num_lines-- > 0) { 450 429 #if (CONFIG_ARC_MMU_VER > 2) 451 430 /* tag comes from phy addr */ 452 - write_aux_reg(ARC_REG_IC_PTAG, addr); 431 + write_aux_reg(ARC_REG_IC_PTAG, paddr); 453 432 454 433 /* index bits come from vaddr */ 455 434 write_aux_reg(ARC_REG_IC_IVIL, vaddr); 456 435 vaddr += ARC_ICACHE_LINE_LEN; 457 436 #else 458 437 /* paddr contains stuffed vaddrs bits */ 459 - write_aux_reg(ARC_REG_IC_IVIL, addr); 438 + write_aux_reg(ARC_REG_IC_IVIL, paddr); 460 439 #endif 461 - addr += ARC_ICACHE_LINE_LEN; 440 + paddr += ARC_ICACHE_LINE_LEN; 462 441 } 463 442 local_irq_restore(flags); 464 443 } ··· 470 457 * Exported APIs 471 458 */ 472 459 460 + /* 461 + * Handle cache congruency of kernel and userspace mappings of page when kernel 462 + * writes-to/reads-from 463 + * 464 + * The idea is to defer flushing of kernel mapping after a WRITE, possible if: 465 + * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent 466 + * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache) 467 + * -In SMP, if hardware caches are coherent 468 + * 469 + * There's a corollary case, where kernel READs from a userspace mapped page. 470 + * If the U-mapping is not congruent to to K-mapping, former needs flushing. 471 + */ 473 472 void flush_dcache_page(struct page *page) 474 473 { 475 - /* Make a note that dcache is not yet flushed for this page */ 476 - set_bit(PG_arch_1, &page->flags); 474 + struct address_space *mapping; 475 + 476 + if (!cache_is_vipt_aliasing()) { 477 + set_bit(PG_arch_1, &page->flags); 478 + return; 479 + } 480 + 481 + /* don't handle anon pages here */ 482 + mapping = page_mapping(page); 483 + if (!mapping) 484 + return; 485 + 486 + /* 487 + * pagecache page, file not yet mapped to userspace 488 + * Make a note that K-mapping is dirty 489 + */ 490 + if (!mapping_mapped(mapping)) { 491 + set_bit(PG_arch_1, &page->flags); 492 + } else if (page_mapped(page)) { 493 + 494 + /* kernel reading from page with U-mapping */ 495 + void *paddr = page_address(page); 496 + unsigned long vaddr = page->index << PAGE_CACHE_SHIFT; 497 + 498 + if (addr_not_cache_congruent(paddr, vaddr)) 499 + __flush_dcache_page(paddr, vaddr); 500 + } 477 501 } 478 502 EXPORT_SYMBOL(flush_dcache_page); 479 503 480 504 481 505 void dma_cache_wback_inv(unsigned long start, unsigned long sz) 482 506 { 483 - __dc_line_op(start, sz, OP_FLUSH_N_INV); 507 + __dc_line_op_k(start, sz, OP_FLUSH_N_INV); 484 508 } 485 509 EXPORT_SYMBOL(dma_cache_wback_inv); 486 510 487 511 void dma_cache_inv(unsigned long start, unsigned long sz) 488 512 { 489 - __dc_line_op(start, sz, OP_INV); 513 + __dc_line_op_k(start, sz, OP_INV); 490 514 } 491 515 EXPORT_SYMBOL(dma_cache_inv); 492 516 493 517 void dma_cache_wback(unsigned long start, unsigned long sz) 494 518 { 495 - __dc_line_op(start, sz, OP_FLUSH); 519 + __dc_line_op_k(start, sz, OP_FLUSH); 496 520 } 497 521 EXPORT_SYMBOL(dma_cache_wback); 498 522 ··· 610 560 611 561 local_irq_save(flags); 612 562 __ic_line_inv_vaddr(paddr, vaddr, len); 613 - __dc_line_op(paddr, len, OP_FLUSH); 563 + __dc_line_op(paddr, vaddr, len, OP_FLUSH); 614 564 local_irq_restore(flags); 615 565 } 616 566 ··· 620 570 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE); 621 571 } 622 572 623 - void __flush_dcache_page(unsigned long paddr) 573 + /* 574 + * wrapper to clearout kernel or userspace mappings of a page 575 + * For kernel mappings @vaddr == @paddr 576 + */ 577 + void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr) 624 578 { 625 - __dc_line_op(paddr, PAGE_SIZE, OP_FLUSH_N_INV); 579 + __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV); 626 580 } 627 581 628 582 void flush_icache_all(void) ··· 654 600 local_irq_restore(flags); 655 601 656 602 } 603 + 604 + #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING 605 + 606 + void flush_cache_mm(struct mm_struct *mm) 607 + { 608 + flush_cache_all(); 609 + } 610 + 611 + void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, 612 + unsigned long pfn) 613 + { 614 + unsigned int paddr = pfn << PAGE_SHIFT; 615 + 616 + __sync_icache_dcache(paddr, u_vaddr, PAGE_SIZE); 617 + } 618 + 619 + void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 620 + unsigned long end) 621 + { 622 + flush_cache_all(); 623 + } 624 + 625 + void copy_user_highpage(struct page *to, struct page *from, 626 + unsigned long u_vaddr, struct vm_area_struct *vma) 627 + { 628 + void *kfrom = page_address(from); 629 + void *kto = page_address(to); 630 + int clean_src_k_mappings = 0; 631 + 632 + /* 633 + * If SRC page was already mapped in userspace AND it's U-mapping is 634 + * not congruent with K-mapping, sync former to physical page so that 635 + * K-mapping in memcpy below, sees the right data 636 + * 637 + * Note that while @u_vaddr refers to DST page's userspace vaddr, it is 638 + * equally valid for SRC page as well 639 + */ 640 + if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) { 641 + __flush_dcache_page(kfrom, u_vaddr); 642 + clean_src_k_mappings = 1; 643 + } 644 + 645 + copy_page(kto, kfrom); 646 + 647 + /* 648 + * Mark DST page K-mapping as dirty for a later finalization by 649 + * update_mmu_cache(). Although the finalization could have been done 650 + * here as well (given that both vaddr/paddr are available). 651 + * But update_mmu_cache() already has code to do that for other 652 + * non copied user pages (e.g. read faults which wire in pagecache page 653 + * directly). 654 + */ 655 + set_bit(PG_arch_1, &to->flags); 656 + 657 + /* 658 + * if SRC was already usermapped and non-congruent to kernel mapping 659 + * sync the kernel mapping back to physical page 660 + */ 661 + if (clean_src_k_mappings) { 662 + __flush_dcache_page(kfrom, kfrom); 663 + } else { 664 + set_bit(PG_arch_1, &from->flags); 665 + } 666 + } 667 + 668 + void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) 669 + { 670 + clear_page(to); 671 + set_bit(PG_arch_1, &page->flags); 672 + } 673 + 674 + void flush_anon_page(struct vm_area_struct *vma, struct page *page, 675 + unsigned long u_vaddr) 676 + { 677 + /* TBD: do we really need to clear the kernel mapping */ 678 + __flush_dcache_page(page_address(page), u_vaddr); 679 + __flush_dcache_page(page_address(page), page_address(page)); 680 + 681 + } 682 + 683 + #endif 657 684 658 685 /********************************************************************** 659 686 * Explicit Cache flush request from user space via syscall
+78
arch/arc/mm/mmap.c
··· 1 + /* 2 + * ARC700 mmap 3 + * 4 + * (started from arm version - for VIPT alias handling) 5 + * 6 + * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + */ 12 + 13 + #include <linux/fs.h> 14 + #include <linux/mm.h> 15 + #include <linux/mman.h> 16 + #include <linux/sched.h> 17 + #include <asm/cacheflush.h> 18 + 19 + #define COLOUR_ALIGN(addr, pgoff) \ 20 + ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \ 21 + (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1))) 22 + 23 + /* 24 + * Ensure that shared mappings are correctly aligned to 25 + * avoid aliasing issues with VIPT caches. 26 + * We need to ensure that 27 + * a specific page of an object is always mapped at a multiple of 28 + * SHMLBA bytes. 29 + */ 30 + unsigned long 31 + arch_get_unmapped_area(struct file *filp, unsigned long addr, 32 + unsigned long len, unsigned long pgoff, unsigned long flags) 33 + { 34 + struct mm_struct *mm = current->mm; 35 + struct vm_area_struct *vma; 36 + int do_align = 0; 37 + int aliasing = cache_is_vipt_aliasing(); 38 + struct vm_unmapped_area_info info; 39 + 40 + /* 41 + * We only need to do colour alignment if D cache aliases. 42 + */ 43 + if (aliasing) 44 + do_align = filp || (flags & MAP_SHARED); 45 + 46 + /* 47 + * We enforce the MAP_FIXED case. 48 + */ 49 + if (flags & MAP_FIXED) { 50 + if (aliasing && flags & MAP_SHARED && 51 + (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) 52 + return -EINVAL; 53 + return addr; 54 + } 55 + 56 + if (len > TASK_SIZE) 57 + return -ENOMEM; 58 + 59 + if (addr) { 60 + if (do_align) 61 + addr = COLOUR_ALIGN(addr, pgoff); 62 + else 63 + addr = PAGE_ALIGN(addr); 64 + 65 + vma = find_vma(mm, addr); 66 + if (TASK_SIZE - len >= addr && 67 + (!vma || addr + len <= vma->vm_start)) 68 + return addr; 69 + } 70 + 71 + info.flags = 0; 72 + info.length = len; 73 + info.low_limit = mm->mmap_base; 74 + info.high_limit = TASK_SIZE; 75 + info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; 76 + info.align_offset = pgoff << PAGE_SHIFT; 77 + return vm_unmapped_area(&info); 78 + }
+22 -7
arch/arc/mm/tlb.c
··· 421 421 /* 422 422 * Called at the end of pagefault, for a userspace mapped page 423 423 * -pre-install the corresponding TLB entry into MMU 424 - * -Finalize the delayed D-cache flush (wback+inv kernel mapping) 424 + * -Finalize the delayed D-cache flush of kernel mapping of page due to 425 + * flush_dcache_page(), copy_user_page() 426 + * 427 + * Note that flush (when done) involves both WBACK - so physical page is 428 + * in sync as well as INV - so any non-congruent aliases don't remain 425 429 */ 426 430 void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, 427 431 pte_t *ptep) 428 432 { 429 433 unsigned long vaddr = vaddr_unaligned & PAGE_MASK; 434 + unsigned long paddr = pte_val(*ptep) & PAGE_MASK; 430 435 431 436 create_tlb(vma, vaddr, ptep); 432 437 433 - /* icache doesn't snoop dcache, thus needs to be made coherent here */ 434 - if (vma->vm_flags & VM_EXEC) { 438 + /* 439 + * Exec page : Independent of aliasing/page-color considerations, 440 + * since icache doesn't snoop dcache on ARC, any dirty 441 + * K-mapping of a code page needs to be wback+inv so that 442 + * icache fetch by userspace sees code correctly. 443 + * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it 444 + * so userspace sees the right data. 445 + * (Avoids the flush for Non-exec + congruent mapping case) 446 + */ 447 + if (vma->vm_flags & VM_EXEC || addr_not_cache_congruent(paddr, vaddr)) { 435 448 struct page *page = pfn_to_page(pte_pfn(*ptep)); 436 449 437 - /* if page was dcache dirty, flush now */ 438 450 int dirty = test_and_clear_bit(PG_arch_1, &page->flags); 439 451 if (dirty) { 440 - unsigned long paddr = pte_val(*ptep) & PAGE_MASK; 441 - __flush_dcache_page(paddr); 442 - __inv_icache_page(paddr, vaddr); 452 + /* wback + inv dcache lines */ 453 + __flush_dcache_page(paddr, paddr); 454 + 455 + /* invalidate any existing icache lines */ 456 + if (vma->vm_flags & VM_EXEC) 457 + __inv_icache_page(paddr, vaddr); 443 458 } 444 459 } 445 460 }
-7
arch/arc/plat-tb10x/Kconfig
··· 27 27 Abilis Systems. TB10x is based on the ARC700 CPU architecture. 28 28 Say Y if you are building a kernel for one of the SOCs in this 29 29 series (e.g. TB100 or TB101). If in doubt say N. 30 - 31 - if ARC_PLAT_TB10X 32 - 33 - config GENERIC_GPIO 34 - def_bool y 35 - 36 - endif