Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sh: __addr_ok() and other misc nommu fixups.

A few more outstanding nommu fixups..

Signed-off-by: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>

authored by

Yoshinori Sato and committed by
Paul Mundt
a2d1a5fa 0b892935

+33 -22
+3 -3
arch/sh/boot/compressed/Makefile
··· 22 22 CONFIG_MEMORY_START ?= 0x0c000000 23 23 CONFIG_BOOT_LINK_OFFSET ?= 0x00800000 24 24 25 - IMAGE_OFFSET := $(shell printf "0x%8x" $$[$(CONFIG_PAGE_OFFSET) + \ 26 - $(CONFIG_MEMORY_START) + \ 27 - $(CONFIG_BOOT_LINK_OFFSET)]) 25 + IMAGE_OFFSET := $(shell printf "0x%08x" $$[$(CONFIG_PAGE_OFFSET) + \ 26 + $(CONFIG_MEMORY_START) + \ 27 + $(CONFIG_BOOT_LINK_OFFSET)]) 28 28 29 29 LIBGCC := $(shell $(CC) $(CFLAGS) -print-libgcc-file-name) 30 30
+9 -4
arch/sh/kernel/process.c
··· 302 302 { 303 303 ctrl_outl(pc, UBC_BARA); 304 304 305 + #ifdef CONFIG_MMU 305 306 /* We don't have any ASID settings for the SH-2! */ 306 307 if (cpu_data->type != CPU_SH7604) 307 308 ctrl_outb(asid, UBC_BASRA); 309 + #endif 308 310 309 311 ctrl_outl(0, UBC_BAMRA); 310 312 ··· 349 347 } 350 348 #endif 351 349 350 + #ifdef CONFIG_MMU 352 351 /* 353 352 * Restore the kernel mode register 354 353 * k7 (r7_bank1) ··· 357 354 asm volatile("ldc %0, r7_bank" 358 355 : /* no output */ 359 356 : "r" (task_thread_info(next))); 357 + #endif 360 358 361 - #ifdef CONFIG_MMU 362 359 /* If no tasks are using the UBC, we're done */ 363 360 if (ubc_usercnt == 0) 364 361 /* If no tasks are using the UBC, we're done */; 365 362 else if (next->thread.ubc_pc && next->mm) { 366 - ubc_set_tracing(next->mm->context & MMU_CONTEXT_ASID_MASK, 367 - next->thread.ubc_pc); 363 + int asid = 0; 364 + #ifdef CONFIG_MMU 365 + asid |= next->mm->context & MMU_CONTEXT_ASID_MASK; 366 + #endif 367 + ubc_set_tracing(asid, next->thread.ubc_pc); 368 368 } else { 369 369 ctrl_outw(0, UBC_BBRA); 370 370 ctrl_outw(0, UBC_BBRB); 371 371 } 372 - #endif 373 372 374 373 return prev; 375 374 }
+4 -6
arch/sh/kernel/sh_ksyms.c
··· 79 79 DECLARE_EXPORT(__movstr_i4_even); 80 80 DECLARE_EXPORT(__movstr_i4_odd); 81 81 DECLARE_EXPORT(__movstrSI12_i4); 82 + #endif 82 83 84 + #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) 83 85 /* needed by some modules */ 84 86 EXPORT_SYMBOL(flush_cache_all); 85 87 EXPORT_SYMBOL(flush_cache_range); 86 88 EXPORT_SYMBOL(flush_dcache_page); 87 89 EXPORT_SYMBOL(__flush_purge_region); 88 - EXPORT_SYMBOL(clear_user_page); 89 90 #endif 90 91 91 - #if defined(CONFIG_SH7705_CACHE_32KB) 92 - EXPORT_SYMBOL(flush_cache_all); 93 - EXPORT_SYMBOL(flush_cache_range); 94 - EXPORT_SYMBOL(flush_dcache_page); 95 - EXPORT_SYMBOL(__flush_purge_region); 92 + #ifdef CONFIG_MMU 93 + EXPORT_SYMBOL(clear_user_page); 96 94 #endif 97 95 98 96 EXPORT_SYMBOL(flush_tlb_page);
+1 -1
arch/sh/kernel/sys_sh.c
··· 44 44 return error; 45 45 } 46 46 47 - #if defined(HAVE_ARCH_UNMAPPED_AREA) 47 + #if defined(HAVE_ARCH_UNMAPPED_AREA) && defined(CONFIG_MMU) 48 48 /* 49 49 * To avoid cache alias, we map the shard page with same color. 50 50 */
+1 -1
arch/sh/mm/Kconfig
··· 194 194 195 195 config 32BIT 196 196 bool "Support 32-bit physical addressing through PMB" 197 - depends on CPU_SH4A 197 + depends on CPU_SH4A && MMU 198 198 default y 199 199 help 200 200 If you say Y here, physical addressing will be extended to
+2 -2
arch/sh/mm/Makefile
··· 6 6 7 7 obj-$(CONFIG_CPU_SH2) += cache-sh2.o 8 8 obj-$(CONFIG_CPU_SH3) += cache-sh3.o 9 - obj-$(CONFIG_CPU_SH4) += cache-sh4.o pg-sh4.o 9 + obj-$(CONFIG_CPU_SH4) += cache-sh4.o 10 10 11 11 obj-$(CONFIG_DMA_PAGE_OPS) += pg-dma.o 12 12 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o ··· 19 19 20 20 ifdef CONFIG_MMU 21 21 obj-$(CONFIG_CPU_SH3) += tlb-sh3.o 22 - obj-$(CONFIG_CPU_SH4) += tlb-sh4.o 22 + obj-$(CONFIG_CPU_SH4) += tlb-sh4.o pg-sh4.o 23 23 obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh7705.o 24 24 endif 25 25
+2 -2
include/asm-sh/addrspace.h
··· 14 14 #include <asm/cpu/addrspace.h> 15 15 16 16 /* Memory segments (32bit Privileged mode addresses) */ 17 - #ifdef CONFIG_MMU 17 + #ifndef CONFIG_CPU_SH2A 18 18 #define P0SEG 0x00000000 19 19 #define P1SEG 0x80000000 20 20 #define P2SEG 0xa0000000 ··· 24 24 #define P0SEG 0x00000000 25 25 #define P1SEG 0x00000000 26 26 #define P2SEG 0x20000000 27 - #define P3SEG 0x40000000 27 + #define P3SEG 0x00000000 28 28 #define P4SEG 0x80000000 29 29 #endif 30 30
+5
include/asm-sh/io.h
··· 216 216 217 217 #define IO_SPACE_LIMIT 0xffffffff 218 218 219 + #ifdef CONFIG_MMU 219 220 /* 220 221 * Change virtual addresses to physical addresses and vv. 221 222 * These are trivial on the 1:1 Linux/SuperH mapping ··· 230 229 { 231 230 return (void *)P1SEGADDR(address); 232 231 } 232 + #else 233 + #define phys_to_virt(address) ((void *)(address)) 234 + #define virt_to_phys(address) ((unsigned long)(address)) 235 + #endif 233 236 234 237 #define virt_to_bus virt_to_phys 235 238 #define bus_to_virt phys_to_virt
+6 -3
include/asm-sh/uaccess.h
··· 34 34 35 35 #define segment_eq(a,b) ((a).seg == (b).seg) 36 36 37 - #define __addr_ok(addr) \ 38 - ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg)) 39 - 40 37 #define get_ds() (KERNEL_DS) 41 38 42 39 #if !defined(CONFIG_MMU) 40 + /* NOMMU is always true */ 41 + #define __addr_ok(addr) (1) 42 + 43 43 static inline mm_segment_t get_fs(void) 44 44 { 45 45 return USER_DS; ··· 66 66 return ((addr >= memory_start) && ((addr + size) < memory_end)); 67 67 } 68 68 #else /* CONFIG_MMU */ 69 + #define __addr_ok(addr) \ 70 + ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg)) 71 + 69 72 #define get_fs() (current_thread_info()->addr_limit) 70 73 #define set_fs(x) (current_thread_info()->addr_limit = (x)) 71 74