Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sh: Various nommu fixes.

This fixes up some of the various outstanding nommu bugs on
SH.

Signed-off-by: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>

authored by

Yoshinori Sato and committed by
Paul Mundt
e96636cc e7f93a35

+50 -23
+8 -2
arch/sh/boot/compressed/Makefile
··· 21 21 CONFIG_PAGE_OFFSET ?= 0x80000000 22 22 CONFIG_MEMORY_START ?= 0x0c000000 23 23 CONFIG_BOOT_LINK_OFFSET ?= 0x00800000 24 - IMAGE_OFFSET := $(shell printf "0x%8x" $$[$(CONFIG_PAGE_OFFSET)+$(CONFIG_MEMORY_START)+$(CONFIG_BOOT_LINK_OFFSET)]) 24 + 25 + IMAGE_OFFSET := $(shell printf "0x%8x" $$[$(CONFIG_PAGE_OFFSET) + \ 26 + $(CONFIG_MEMORY_START) + \ 27 + $(CONFIG_BOOT_LINK_OFFSET)]) 28 + 29 + LIBGCC := $(shell $(CC) $(CFLAGS) -print-libgcc-file-name) 25 30 26 31 LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -e startup -T $(obj)/../../kernel/vmlinux.lds 27 32 28 - $(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o FORCE 33 + 34 + $(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(LIBGCC) FORCE 29 35 $(call if_changed,ld) 30 36 @: 31 37
+5
arch/sh/mm/init.c
··· 248 248 * Setup wrappers for copy/clear_page(), these will get overridden 249 249 * later in the boot process if a better method is available. 250 250 */ 251 + #ifdef CONFIG_MMU 251 252 copy_page = copy_page_slow; 252 253 clear_page = clear_page_slow; 254 + #else 255 + copy_page = copy_page_nommu; 256 + clear_page = clear_page_nommu; 257 + #endif 253 258 254 259 /* this will put all low memory onto the freelists */ 255 260 totalram_pages += free_all_bootmem_node(NODE_DATA(0));
+9 -8
arch/sh/mm/pg-nommu.c
··· 14 14 #include <linux/string.h> 15 15 #include <asm/page.h> 16 16 17 - static void copy_page_nommu(void *to, void *from) 17 + void copy_page_nommu(void *to, void *from) 18 18 { 19 19 memcpy(to, from, PAGE_SIZE); 20 20 } 21 21 22 - static void clear_page_nommu(void *to) 22 + void clear_page_nommu(void *to) 23 23 { 24 24 memset(to, 0, PAGE_SIZE); 25 25 } 26 26 27 - static int __init pg_nommu_init(void) 27 + __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n) 28 28 { 29 - copy_page = copy_page_nommu; 30 - clear_page = clear_page_nommu; 31 - 29 + memcpy(to, from, n); 32 30 return 0; 33 31 } 34 32 35 - subsys_initcall(pg_nommu_init); 36 - 33 + __kernel_size_t __clear_user(void *to, __kernel_size_t n) 34 + { 35 + memset(to, 0, n); 36 + return 0; 37 + }
+8
include/asm-sh/addrspace.h
··· 14 14 #include <asm/cpu/addrspace.h> 15 15 16 16 /* Memory segments (32bit Privileged mode addresses) */ 17 + #ifdef CONFIG_MMU 17 18 #define P0SEG 0x00000000 18 19 #define P1SEG 0x80000000 19 20 #define P2SEG 0xa0000000 20 21 #define P3SEG 0xc0000000 21 22 #define P4SEG 0xe0000000 23 + #else 24 + #define P0SEG 0x00000000 25 + #define P1SEG 0x00000000 26 + #define P2SEG 0x20000000 27 + #define P3SEG 0x40000000 28 + #define P4SEG 0x80000000 29 + #endif 22 30 23 31 /* Returns the privileged segment base of a given address */ 24 32 #define PXSEG(a) (((unsigned long)(a)) & 0xe0000000)
+1 -1
include/asm-sh/flat.h
··· 13 13 #define __ASM_SH_FLAT_H 14 14 15 15 #define flat_stack_align(sp) /* nothing needed */ 16 - #define flat_argvp_envp_on_stack() 1 16 + #define flat_argvp_envp_on_stack() 0 17 17 #define flat_old_ram_flag(flags) (flags) 18 18 #define flat_reloc_valid(reloc, size) ((reloc) <= (size)) 19 19 #define flat_get_addr_from_rp(rp, relval, flags) get_unaligned(rp)
+1 -12
include/asm-sh/mmu.h
··· 3 3 4 4 #if !defined(CONFIG_MMU) 5 5 6 - struct mm_rblock_struct { 7 - int size; 8 - int refcount; 9 - void *kblock; 10 - }; 11 - 12 - struct mm_tblock_struct { 13 - struct mm_rblock_struct *rblock; 14 - struct mm_tblock_struct *next; 15 - }; 16 - 17 6 typedef struct { 18 - struct mm_tblock_struct tblock; 7 + struct vm_list_struct *vmlist; 19 8 unsigned long end_brk; 20 9 } mm_context_t; 21 10
+5
include/asm-sh/page.h
··· 38 38 extern void (*clear_page)(void *to); 39 39 extern void (*copy_page)(void *to, void *from); 40 40 41 + #ifdef CONFIG_MMU 41 42 extern void clear_page_slow(void *to); 42 43 extern void copy_page_slow(void *to, void *from); 44 + #else 45 + extern void clear_page_nommu(void *to); 46 + extern void copy_page_nommu(void *to, void *from); 47 + #endif 43 48 44 49 #if defined(CONFIG_MMU) && (defined(CONFIG_CPU_SH4) || \ 45 50 defined(CONFIG_SH7705_CACHE_32KB))
+13
include/asm-sh/uaccess.h
··· 168 168 __gu_err; \ 169 169 }) 170 170 171 + #ifdef CONFIG_MMU 171 172 #define __get_user_check(x,ptr,size) \ 172 173 ({ \ 173 174 long __gu_err, __gu_val; \ ··· 258 257 : "r" (addr) \ 259 258 : "t"); \ 260 259 }) 260 + #else /* CONFIG_MMU */ 261 + #define __get_user_check(x,ptr,size) \ 262 + ({ \ 263 + long __gu_err, __gu_val; \ 264 + if (__access_ok((unsigned long)(ptr), (size))) { \ 265 + __get_user_size(__gu_val, (ptr), (size), __gu_err); \ 266 + (x) = (__typeof__(*(ptr)))__gu_val; \ 267 + } else \ 268 + __gu_err = -EFAULT; \ 269 + __gu_err; \ 270 + }) 271 + #endif 261 272 262 273 #define __get_user_asm(x, addr, err, insn) \ 263 274 ({ \