Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sh/mm: enable ARCH_HAS_VM_GET_PAGE_PROT

This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports
standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT,
which looks up a private and static protection_map[] array. Subsequently
all __SXXX and __PXXX macros can be dropped which are no longer needed.

Link: https://lkml.kernel.org/r/20220711070600.2378316-26-anshuman.khandual@arm.com
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: Rich Felker <dalias@libc.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Brian Cain <bcain@quicinc.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chris Zankel <chris@zankel.net>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Dinh Nguyen <dinguyen@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Guo Ren <guoren@kernel.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Sam Ravnborg <sam@ravnborg.org>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vineet Gupta <vgupta@kernel.org>
Cc: WANG Xuerui <kernel@xen0n.name>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Anshuman Khandual and committed by
akpm
34516fd8 91a8da02

+21 -17
+1
arch/sh/Kconfig
··· 12 12 select ARCH_HAS_GCOV_PROFILE_ALL 13 13 select ARCH_HAS_PTE_SPECIAL 14 14 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 15 + select ARCH_HAS_VM_GET_PAGE_PROT 15 16 select ARCH_HIBERNATION_POSSIBLE if MMU 16 17 select ARCH_MIGHT_HAVE_PC_PARPORT 17 18 select ARCH_WANT_IPC_PARSE_VERSION
-17
arch/sh/include/asm/pgtable.h
··· 89 89 * completely separate permission bits for user and kernel space. 90 90 */ 91 91 /*xwr*/ 92 - #define __P000 PAGE_NONE 93 - #define __P001 PAGE_READONLY 94 - #define __P010 PAGE_COPY 95 - #define __P011 PAGE_COPY 96 - #define __P100 PAGE_EXECREAD 97 - #define __P101 PAGE_EXECREAD 98 - #define __P110 PAGE_COPY 99 - #define __P111 PAGE_COPY 100 - 101 - #define __S000 PAGE_NONE 102 - #define __S001 PAGE_READONLY 103 - #define __S010 PAGE_WRITEONLY 104 - #define __S011 PAGE_SHARED 105 - #define __S100 PAGE_EXECREAD 106 - #define __S101 PAGE_EXECREAD 107 - #define __S110 PAGE_RWX 108 - #define __S111 PAGE_RWX 109 92 110 93 typedef pte_t *pte_addr_t; 111 94
+20
arch/sh/mm/mmap.c
··· 19 19 EXPORT_SYMBOL(shm_align_mask); 20 20 21 21 #ifdef CONFIG_MMU 22 + static const pgprot_t protection_map[16] = { 23 + [VM_NONE] = PAGE_NONE, 24 + [VM_READ] = PAGE_READONLY, 25 + [VM_WRITE] = PAGE_COPY, 26 + [VM_WRITE | VM_READ] = PAGE_COPY, 27 + [VM_EXEC] = PAGE_EXECREAD, 28 + [VM_EXEC | VM_READ] = PAGE_EXECREAD, 29 + [VM_EXEC | VM_WRITE] = PAGE_COPY, 30 + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY, 31 + [VM_SHARED] = PAGE_NONE, 32 + [VM_SHARED | VM_READ] = PAGE_READONLY, 33 + [VM_SHARED | VM_WRITE] = PAGE_WRITEONLY, 34 + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, 35 + [VM_SHARED | VM_EXEC] = PAGE_EXECREAD, 36 + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_EXECREAD, 37 + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX, 38 + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX 39 + }; 40 + DECLARE_VM_GET_PAGE_PROT 41 + 22 42 /* 23 43 * To avoid cache aliases, we map the shared page with same color. 24 44 */