at v5.4 3.2 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * OpenRISC Linux 4 * 5 * Linux architectural port borrowing liberally from similar works of 6 * others. All original copyrights apply as per the original source 7 * declaration. 8 * 9 * OpenRISC implementation: 10 * Copyright (C) Jan Henrik Weinstock <jan.weinstock@rwth-aachen.de> 11 * et al. 12 */ 13 14#ifndef __ASM_CACHEFLUSH_H 15#define __ASM_CACHEFLUSH_H 16 17#include <linux/mm.h> 18 19/* 20 * Helper function for flushing or invalidating entire pages from data 21 * and instruction caches. SMP needs a little extra work, since we need 22 * to flush the pages on all cpus. 23 */ 24extern void local_dcache_page_flush(struct page *page); 25extern void local_icache_page_inv(struct page *page); 26 27/* 28 * Data cache flushing always happen on the local cpu. Instruction cache 29 * invalidations need to be broadcasted to all other cpu in the system in 30 * case of SMP configurations. 31 */ 32#ifndef CONFIG_SMP 33#define dcache_page_flush(page) local_dcache_page_flush(page) 34#define icache_page_inv(page) local_icache_page_inv(page) 35#else /* CONFIG_SMP */ 36#define dcache_page_flush(page) local_dcache_page_flush(page) 37#define icache_page_inv(page) smp_icache_page_inv(page) 38extern void smp_icache_page_inv(struct page *page); 39#endif /* CONFIG_SMP */ 40 41/* 42 * Synchronizes caches. Whenever a cpu writes executable code to memory, this 43 * should be called to make sure the processor sees the newly written code. 44 */ 45static inline void sync_icache_dcache(struct page *page) 46{ 47 if (!IS_ENABLED(CONFIG_DCACHE_WRITETHROUGH)) 48 dcache_page_flush(page); 49 icache_page_inv(page); 50} 51 52/* 53 * Pages with this bit set need not be flushed/invalidated, since 54 * they have not changed since last flush. New pages start with 55 * PG_arch_1 not set and are therefore dirty by default. 56 */ 57#define PG_dc_clean PG_arch_1 58 59#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 60static inline void flush_dcache_page(struct page *page) 61{ 62 clear_bit(PG_dc_clean, &page->flags); 63} 64 65/* 66 * Other interfaces are not required since we do not have virtually 67 * indexed or tagged caches. So we can use the default here. 68 */ 69#define flush_cache_all() do { } while (0) 70#define flush_cache_mm(mm) do { } while (0) 71#define flush_cache_dup_mm(mm) do { } while (0) 72#define flush_cache_range(vma, start, end) do { } while (0) 73#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 74#define flush_dcache_mmap_lock(mapping) do { } while (0) 75#define flush_dcache_mmap_unlock(mapping) do { } while (0) 76#define flush_icache_range(start, end) do { } while (0) 77#define flush_icache_page(vma, pg) do { } while (0) 78#define flush_icache_user_range(vma, pg, adr, len) do { } while (0) 79#define flush_cache_vmap(start, end) do { } while (0) 80#define flush_cache_vunmap(start, end) do { } while (0) 81 82#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 83 do { \ 84 memcpy(dst, src, len); \ 85 if (vma->vm_flags & VM_EXEC) \ 86 sync_icache_dcache(page); \ 87 } while (0) 88 89#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 90 memcpy(dst, src, len) 91 92#endif /* __ASM_CACHEFLUSH_H */