Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'microblaze-v5.11' of git://git.monstr.eu/linux-2.6-microblaze

Pull microblaze updates from Michal Simek:
"The biggest change is to remove support for noMMU configuration.

FPGAs are bigger so people use Microblaze with MMU for a lot of years
and there is likely no user of this code anymore. No one is updating
libraries for this configuration either.

- Remove noMMU support

- Add support for TIF_NOTIFY_SIGNAL

- Small header fix"

* tag 'microblaze-v5.11' of git://git.monstr.eu/linux-2.6-microblaze:
microblaze: Remove noMMU code
microblaze: add support for TIF_NOTIFY_SIGNAL
microblaze: Replace <linux/clk-provider.h> by <linux/of_clk.h>

+16 -1255
+6 -47
arch/microblaze/Kconfig
··· 3 3 def_bool y 4 4 select ARCH_32BIT_OFF_T 5 5 select ARCH_NO_SWAP 6 - select ARCH_HAS_BINFMT_FLAT if !MMU 7 6 select ARCH_HAS_DMA_PREP_COHERENT 8 7 select ARCH_HAS_GCOV_PROFILE_ALL 9 8 select ARCH_HAS_SYNC_DMA_FOR_CPU 10 9 select ARCH_HAS_SYNC_DMA_FOR_DEVICE 11 - select ARCH_HAS_DMA_SET_UNCACHED if !MMU 12 10 select ARCH_MIGHT_HAVE_PC_PARPORT 13 11 select ARCH_WANT_IPC_PARSE_VERSION 14 12 select BUILDTIME_TABLE_SORT 15 13 select TIMER_OF 16 14 select CLONE_BACKWARDS3 17 15 select COMMON_CLK 18 - select DMA_DIRECT_REMAP if MMU 16 + select DMA_DIRECT_REMAP 19 17 select GENERIC_ATOMIC64 20 18 select GENERIC_CPU_DEVICES 21 19 select GENERIC_IDLE_POLL_SETUP ··· 42 44 select TRACING_SUPPORT 43 45 select VIRT_TO_BUS 44 46 select CPU_NO_EFFICIENT_FFS 45 - select MMU_GATHER_NO_RANGE if MMU 47 + select MMU_GATHER_NO_RANGE 46 48 select SPARSE_IRQ 47 49 select SET_FS 48 50 ··· 93 95 source "kernel/Kconfig.hz" 94 96 95 97 config MMU 96 - bool "MMU support" 97 - default n 98 + def_bool y 98 99 99 100 comment "Boot options" 100 101 ··· 139 142 comment "Default settings for advanced configuration options are used" 140 143 depends on !ADVANCED_OPTIONS 141 144 142 - config XILINX_UNCACHED_SHADOW 143 - bool "Are you using uncached shadow for RAM ?" 144 - depends on ADVANCED_OPTIONS && !MMU 145 - default n 146 - help 147 - This is needed to be able to allocate uncachable memory regions. 148 - The feature requires the design to define the RAM memory controller 149 - window to be twice as large as the actual physical memory. 150 - 151 145 config HIGHMEM 152 146 bool "High memory support" 153 - depends on MMU 154 147 select KMAP_LOCAL 155 148 help 156 149 The address space of Microblaze processors is only 4 Gigabytes large ··· 154 167 155 168 config LOWMEM_SIZE_BOOL 156 169 bool "Set maximum low memory" 157 - depends on ADVANCED_OPTIONS && MMU 170 + depends on ADVANCED_OPTIONS 158 171 help 159 172 This option allows you to set the maximum amount of memory which 160 173 will be used as "low memory", that is, memory which the kernel can ··· 192 205 193 206 config KERNEL_START 194 207 hex "Virtual address of kernel base" if KERNEL_START_BOOL 195 - default "0xc0000000" if MMU 196 - default KERNEL_BASE_ADDR if !MMU 208 + default "0xc0000000" 197 209 198 210 config TASK_SIZE_BOOL 199 211 bool "Set custom user task size" 200 - depends on ADVANCED_OPTIONS && MMU 212 + depends on ADVANCED_OPTIONS 201 213 help 202 214 This option allows you to set the amount of virtual address space 203 215 allocated to user tasks. This can be useful in optimizing the ··· 207 221 config TASK_SIZE 208 222 hex "Size of user task space" if TASK_SIZE_BOOL 209 223 default "0x80000000" 210 - 211 - choice 212 - prompt "Page size" 213 - default MICROBLAZE_4K_PAGES 214 - depends on ADVANCED_OPTIONS && !MMU 215 - help 216 - Select the kernel logical page size. Increasing the page size 217 - will reduce software overhead at each page boundary, allow 218 - hardware prefetch mechanisms to be more effective, and allow 219 - larger dma transfers increasing IO efficiency and reducing 220 - overhead. However the utilization of memory will increase. 221 - For example, each cached file will using a multiple of the 222 - page size to hold its contents and the difference between the 223 - end of file and the end of page is wasted. 224 - 225 - If unsure, choose 4K_PAGES. 226 - 227 - config MICROBLAZE_4K_PAGES 228 - bool "4k page size" 229 - 230 - config MICROBLAZE_16K_PAGES 231 - bool "16k page size" 232 - 233 - config MICROBLAZE_64K_PAGES 234 - bool "64k page size" 235 - 236 - endchoice 237 224 238 225 endmenu 239 226
+1 -10
arch/microblaze/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 KBUILD_DEFCONFIG := mmu_defconfig 3 3 4 - ifeq ($(CONFIG_MMU),y) 5 4 UTS_SYSNAME = -DUTS_SYSNAME=\"Linux\" 6 - else 7 - UTS_SYSNAME = -DUTS_SYSNAME=\"uClinux\" 8 - endif 9 5 10 6 # What CPU vesion are we building for, and crack it open 11 7 # as major.minor.rev ··· 63 67 64 68 core-y += $(boot)/dts/ 65 69 66 - # defines filename extension depending memory management type 67 - ifeq ($(CONFIG_MMU),) 68 - MMU := -nommu 69 - endif 70 - 71 - export MMU DTB 70 + export DTB 72 71 73 72 all: linux.bin 74 73
-1
arch/microblaze/configs/mmu_defconfig
··· 16 16 CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL=2 17 17 CONFIG_XILINX_MICROBLAZE0_USE_FPU=2 18 18 CONFIG_HZ_100=y 19 - CONFIG_MMU=y 20 19 CONFIG_CMDLINE_BOOL=y 21 20 CONFIG_CMDLINE_FORCE=y 22 21 CONFIG_HIGHMEM=y
-90
arch/microblaze/configs/nommu_defconfig
··· 1 - CONFIG_SYSVIPC=y 2 - CONFIG_POSIX_MQUEUE=y 3 - CONFIG_AUDIT=y 4 - CONFIG_BSD_PROCESS_ACCT=y 5 - CONFIG_BSD_PROCESS_ACCT_V3=y 6 - CONFIG_IKCONFIG=y 7 - CONFIG_IKCONFIG_PROC=y 8 - CONFIG_SYSFS_DEPRECATED=y 9 - CONFIG_SYSFS_DEPRECATED_V2=y 10 - # CONFIG_BASE_FULL is not set 11 - CONFIG_KALLSYMS_ALL=y 12 - CONFIG_EMBEDDED=y 13 - CONFIG_SLAB=y 14 - CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1 15 - CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1 16 - CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1 17 - CONFIG_XILINX_MICROBLAZE0_USE_DIV=1 18 - CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL=2 19 - CONFIG_XILINX_MICROBLAZE0_USE_FPU=2 20 - CONFIG_HZ_100=y 21 - CONFIG_CMDLINE_BOOL=y 22 - CONFIG_CMDLINE_FORCE=y 23 - CONFIG_PCI_XILINX=y 24 - CONFIG_MODULES=y 25 - CONFIG_MODULE_UNLOAD=y 26 - # CONFIG_BLK_DEV_BSG is not set 27 - CONFIG_PARTITION_ADVANCED=y 28 - # CONFIG_EFI_PARTITION is not set 29 - CONFIG_NET=y 30 - CONFIG_PACKET=y 31 - CONFIG_UNIX=y 32 - CONFIG_INET=y 33 - # CONFIG_IPV6 is not set 34 - CONFIG_PCI=y 35 - CONFIG_MTD=y 36 - CONFIG_MTD_CMDLINE_PARTS=y 37 - CONFIG_MTD_BLOCK=y 38 - CONFIG_MTD_CFI=y 39 - CONFIG_MTD_CFI_INTELEXT=y 40 - CONFIG_MTD_CFI_AMDSTD=y 41 - CONFIG_MTD_RAM=y 42 - CONFIG_MTD_UCLINUX=y 43 - CONFIG_BLK_DEV_RAM=y 44 - CONFIG_BLK_DEV_RAM_SIZE=8192 45 - CONFIG_NETDEVICES=y 46 - CONFIG_XILINX_EMACLITE=y 47 - CONFIG_XILINX_LL_TEMAC=y 48 - # CONFIG_INPUT is not set 49 - # CONFIG_SERIO is not set 50 - # CONFIG_VT is not set 51 - CONFIG_SERIAL_8250=y 52 - CONFIG_SERIAL_8250_CONSOLE=y 53 - CONFIG_SERIAL_OF_PLATFORM=y 54 - CONFIG_SERIAL_UARTLITE=y 55 - CONFIG_SERIAL_UARTLITE_CONSOLE=y 56 - # CONFIG_HW_RANDOM is not set 57 - CONFIG_XILINX_HWICAP=y 58 - CONFIG_I2C=y 59 - CONFIG_I2C_XILINX=y 60 - CONFIG_SPI=y 61 - CONFIG_SPI_XILINX=y 62 - CONFIG_GPIOLIB=y 63 - CONFIG_GPIO_SYSFS=y 64 - CONFIG_GPIO_XILINX=y 65 - CONFIG_POWER_RESET=y 66 - CONFIG_POWER_RESET_GPIO_RESTART=y 67 - # CONFIG_HWMON is not set 68 - CONFIG_WATCHDOG=y 69 - CONFIG_XILINX_WATCHDOG=y 70 - CONFIG_FB=y 71 - CONFIG_FB_XILINX=y 72 - # CONFIG_USB_SUPPORT is not set 73 - CONFIG_EXT3_FS=y 74 - # CONFIG_DNOTIFY is not set 75 - CONFIG_CRAMFS=y 76 - CONFIG_ROMFS_FS=y 77 - CONFIG_NFS_FS=y 78 - CONFIG_NFS_V3_ACL=y 79 - CONFIG_NLS=y 80 - CONFIG_KEYS=y 81 - CONFIG_ENCRYPTED_KEYS=y 82 - CONFIG_CRYPTO_ECB=y 83 - CONFIG_CRYPTO_MD4=y 84 - CONFIG_CRYPTO_MD5=y 85 - CONFIG_CRYPTO_ARC4=y 86 - CONFIG_CRYPTO_DES=y 87 - CONFIG_DEBUG_INFO=y 88 - CONFIG_DEBUG_SLAB=y 89 - CONFIG_DETECT_HUNG_TASK=y 90 - CONFIG_DEBUG_SPINLOCK=y
-6
arch/microblaze/include/asm/dma.h
··· 6 6 #ifndef _ASM_MICROBLAZE_DMA_H 7 7 #define _ASM_MICROBLAZE_DMA_H 8 8 9 - #ifndef CONFIG_MMU 10 - /* we don't have dma address limit. define it as zero to be 11 - * unlimited. */ 12 - #define MAX_DMA_ADDRESS (0) 13 - #else 14 9 /* Virtual address corresponding to last available physical memory address. */ 15 10 #define MAX_DMA_ADDRESS (CONFIG_KERNEL_START + memory_size - 1) 16 - #endif 17 11 18 12 #ifdef CONFIG_PCI 19 13 extern int isa_dma_bridge_buggy;
-5
arch/microblaze/include/asm/exceptions.h
··· 11 11 #define _ASM_MICROBLAZE_EXCEPTIONS_H 12 12 13 13 #ifdef __KERNEL__ 14 - 15 - #ifndef CONFIG_MMU 16 - #define EX_HANDLER_STACK_SIZ (4*19) 17 - #endif 18 - 19 14 #ifndef __ASSEMBLY__ 20 15 21 16 /* Macros to enable and disable HW exceptions in the MSR */
-3
arch/microblaze/include/asm/io.h
··· 30 30 #define PCI_IOBASE ((void __iomem *)_IO_BASE) 31 31 #define IO_SPACE_LIMIT (0xFFFFFFFF) 32 32 33 - #ifdef CONFIG_MMU 34 33 #define page_to_bus(page) (page_to_phys(page)) 35 34 36 35 extern void iounmap(volatile void __iomem *addr); 37 36 38 37 extern void __iomem *ioremap(phys_addr_t address, unsigned long size); 39 - 40 - #endif /* CONFIG_MMU */ 41 38 42 39 /* Big Endian */ 43 40 #define out_be32(a, v) __raw_writel((v), (void __iomem __force *)(a))
-4
arch/microblaze/include/asm/mmu.h
··· 8 8 #ifndef _ASM_MICROBLAZE_MMU_H 9 9 #define _ASM_MICROBLAZE_MMU_H 10 10 11 - # ifndef CONFIG_MMU 12 - # include <asm-generic/mmu.h> 13 - # else /* CONFIG_MMU */ 14 11 # ifdef __KERNEL__ 15 12 # ifndef __ASSEMBLY__ 16 13 ··· 116 119 # define TLB_G 0x00000001 /* Memory is guarded from prefetch */ 117 120 118 121 # endif /* __KERNEL__ */ 119 - # endif /* CONFIG_MMU */ 120 122 #endif /* _ASM_MICROBLAZE_MMU_H */
-4
arch/microblaze/include/asm/mmu_context.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifdef CONFIG_MMU 3 2 # include <asm/mmu_context_mm.h> 4 - #else 5 - # include <asm-generic/nommu_context.h> 6 - #endif
-59
arch/microblaze/include/asm/page.h
··· 20 20 #ifdef __KERNEL__ 21 21 22 22 /* PAGE_SHIFT determines the page size */ 23 - #if defined(CONFIG_MICROBLAZE_64K_PAGES) 24 - #define PAGE_SHIFT 16 25 - #elif defined(CONFIG_MICROBLAZE_16K_PAGES) 26 - #define PAGE_SHIFT 14 27 - #else 28 23 #define PAGE_SHIFT 12 29 - #endif 30 24 #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT) 31 25 #define PAGE_MASK (~(PAGE_SIZE-1)) 32 26 ··· 38 44 #define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1))) 39 45 #define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1))) 40 46 41 - #ifndef CONFIG_MMU 42 - /* 43 - * PAGE_OFFSET -- the first address of the first page of memory. When not 44 - * using MMU this corresponds to the first free page in physical memory (aligned 45 - * on a page boundary). 46 - */ 47 - extern unsigned int __page_offset; 48 - #define PAGE_OFFSET __page_offset 49 - 50 - #else /* CONFIG_MMU */ 51 - 52 47 /* 53 48 * PAGE_OFFSET -- the first address of the first page of memory. With MMU 54 49 * it is set to the kernel start address (aligned on a page boundary). ··· 53 70 typedef unsigned long pte_basic_t; 54 71 #define PTE_FMT "%.8lx" 55 72 56 - #endif /* CONFIG_MMU */ 57 - 58 73 # define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) 59 74 # define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) 60 75 ··· 67 86 typedef struct { unsigned long pte; } pte_t; 68 87 typedef struct { unsigned long pgprot; } pgprot_t; 69 88 /* FIXME this can depend on linux kernel version */ 70 - # ifdef CONFIG_MMU 71 89 typedef struct { unsigned long pgd; } pgd_t; 72 - # else /* CONFIG_MMU */ 73 - typedef struct { unsigned long ste[64]; } pmd_t; 74 - typedef struct { pmd_t pue[1]; } pud_t; 75 - typedef struct { pud_t p4e[1]; } p4d_t; 76 - typedef struct { p4d_t pge[1]; } pgd_t; 77 - # endif /* CONFIG_MMU */ 78 90 79 91 # define pte_val(x) ((x).pte) 80 92 # define pgprot_val(x) ((x).pgprot) 81 93 82 - # ifdef CONFIG_MMU 83 94 # define pgd_val(x) ((x).pgd) 84 - # else /* CONFIG_MMU */ 85 - # define pmd_val(x) ((x).ste[0]) 86 - # define pud_val(x) ((x).pue[0]) 87 - # define pgd_val(x) ((x).pge[0]) 88 - # endif /* CONFIG_MMU */ 89 95 90 96 # define __pte(x) ((pte_t) { (x) }) 91 97 # define __pgd(x) ((pgd_t) { (x) }) ··· 110 142 # define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr)))) 111 143 # define pfn_to_virt(pfn) __va(pfn_to_phys((pfn))) 112 144 113 - # ifdef CONFIG_MMU 114 - 115 145 # define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)) 116 146 # define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) 117 147 # define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 118 148 119 - # else /* CONFIG_MMU */ 120 - # define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) 121 - # define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) 122 - # define page_to_phys(page) (pfn_to_phys(page_to_pfn(page))) 123 - # define page_to_bus(page) (page_to_phys(page)) 124 - # define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr))) 125 - # endif /* CONFIG_MMU */ 126 - 127 - # ifndef CONFIG_MMU 128 - # define pfn_valid(pfn) (((pfn) >= min_low_pfn) && \ 129 - ((pfn) <= (min_low_pfn + max_mapnr))) 130 - # define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) 131 - # else /* CONFIG_MMU */ 132 149 # define ARCH_PFN_OFFSET (memory_start >> PAGE_SHIFT) 133 150 # define pfn_valid(pfn) ((pfn) < (max_mapnr + ARCH_PFN_OFFSET)) 134 - # endif /* CONFIG_MMU */ 135 151 136 152 # endif /* __ASSEMBLY__ */ 137 153 ··· 126 174 127 175 /* Convert between virtual and physical address for MMU. */ 128 176 /* Handle MicroBlaze processor with virtual memory. */ 129 - #ifndef CONFIG_MMU 130 - #define __virt_to_phys(addr) addr 131 - #define __phys_to_virt(addr) addr 132 - #define tophys(rd, rs) addik rd, rs, 0 133 - #define tovirt(rd, rs) addik rd, rs, 0 134 - #else 135 177 #define __virt_to_phys(addr) \ 136 178 ((addr) + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START) 137 179 #define __phys_to_virt(addr) \ ··· 134 188 addik rd, rs, (CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START) 135 189 #define tovirt(rd, rs) \ 136 190 addik rd, rs, (CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR) 137 - #endif /* CONFIG_MMU */ 138 191 139 192 #define TOPHYS(addr) __virt_to_phys(addr) 140 - 141 - #ifdef CONFIG_MMU 142 - 143 - #endif /* CONFIG_MMU */ 144 193 145 194 #endif /* __KERNEL__ */ 146 195
-4
arch/microblaze/include/asm/pgalloc.h
··· 8 8 #ifndef _ASM_MICROBLAZE_PGALLOC_H 9 9 #define _ASM_MICROBLAZE_PGALLOC_H 10 10 11 - #ifdef CONFIG_MMU 12 - 13 11 #include <linux/kernel.h> /* For min/max macros */ 14 12 #include <linux/highmem.h> 15 13 #include <linux/pgtable.h> ··· 39 41 40 42 #define pmd_populate_kernel(mm, pmd, pte) \ 41 43 (pmd_val(*(pmd)) = (unsigned long) (pte)) 42 - 43 - #endif /* CONFIG_MMU */ 44 44 45 45 #endif /* _ASM_MICROBLAZE_PGALLOC_H */
-43
arch/microblaze/include/asm/pgtable.h
··· 14 14 extern int mem_init_done; 15 15 #endif 16 16 17 - #ifndef CONFIG_MMU 18 - 19 - #define pgd_present(pgd) (1) /* pages are always present on non MMU */ 20 - #define pgd_none(pgd) (0) 21 - #define pgd_bad(pgd) (0) 22 - #define pgd_clear(pgdp) 23 - #define kern_addr_valid(addr) (1) 24 - 25 - #define PAGE_NONE __pgprot(0) /* these mean nothing to non MMU */ 26 - #define PAGE_SHARED __pgprot(0) /* these mean nothing to non MMU */ 27 - #define PAGE_COPY __pgprot(0) /* these mean nothing to non MMU */ 28 - #define PAGE_READONLY __pgprot(0) /* these mean nothing to non MMU */ 29 - #define PAGE_KERNEL __pgprot(0) /* these mean nothing to non MMU */ 30 - 31 - #define pgprot_noncached(x) (x) 32 - #define pgprot_writecombine pgprot_noncached 33 - #define pgprot_device pgprot_noncached 34 - 35 - #define __swp_type(x) (0) 36 - #define __swp_offset(x) (0) 37 - #define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) }) 38 - #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 39 - #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 40 - 41 - #define ZERO_PAGE(vaddr) ({ BUG(); NULL; }) 42 - 43 - #define swapper_pg_dir ((pgd_t *) NULL) 44 - 45 - #define arch_enter_lazy_cpu_mode() do {} while (0) 46 - 47 - #define pgprot_noncached_wc(prot) prot 48 - 49 - /* 50 - * All 32bit addresses are effectively valid for vmalloc... 51 - * Sort of meaningless for non-VM targets. 52 - */ 53 - #define VMALLOC_START 0 54 - #define VMALLOC_END 0xffffffff 55 - 56 - #else /* CONFIG_MMU */ 57 - 58 17 #include <asm-generic/pgtable-nopmd.h> 59 18 60 19 #ifdef __KERNEL__ ··· 449 490 450 491 #endif /* __ASSEMBLY__ */ 451 492 #endif /* __KERNEL__ */ 452 - 453 - #endif /* CONFIG_MMU */ 454 493 455 494 #ifndef __ASSEMBLY__ 456 495 extern unsigned long ioremap_bot, ioremap_base;
-37
arch/microblaze/include/asm/processor.h
··· 31 31 32 32 # endif /* __ASSEMBLY__ */ 33 33 34 - # ifndef CONFIG_MMU 35 - /* 36 - * User space process size: memory size 37 - * 38 - * TASK_SIZE on MMU cpu is usually 1GB. However, on no-MMU arch, both 39 - * user processes and the kernel is on the same memory region. They 40 - * both share the memory space and that is limited by the amount of 41 - * physical memory. thus, we set TASK_SIZE == amount of total memory. 42 - */ 43 - # define TASK_SIZE (0x81000000 - 0x80000000) 44 - 45 - /* 46 - * This decides where the kernel will search for a free chunk of vm 47 - * space during mmap's. We won't be using it 48 - */ 49 - # define TASK_UNMAPPED_BASE 0 50 - 51 - /* definition in include/linux/sched.h */ 52 - struct task_struct; 53 - 54 - /* thread_struct is gone. use thread_info instead. */ 55 - struct thread_struct { }; 56 - # define INIT_THREAD { } 57 - 58 - /* Free all resources held by a thread. */ 59 - static inline void release_thread(struct task_struct *dead_task) 60 - { 61 - } 62 - 63 - extern unsigned long get_wchan(struct task_struct *p); 64 - 65 - # define KSTK_EIP(tsk) (0) 66 - # define KSTK_ESP(tsk) (0) 67 - 68 - # else /* CONFIG_MMU */ 69 - 70 34 /* 71 35 * This is used to define STACK_TOP, and with MMU it must be below 72 36 * kernel base to select the correct PGD when handling MMU exceptions. ··· 94 130 #endif 95 131 96 132 # endif /* __ASSEMBLY__ */ 97 - # endif /* CONFIG_MMU */ 98 133 #endif /* _ASM_MICROBLAZE_PROCESSOR_H */
-2
arch/microblaze/include/asm/registers.h
··· 27 27 #define FSR_UF (1<<1) /* Underflow */ 28 28 #define FSR_DO (1<<0) /* Denormalized operand error */ 29 29 30 - # ifdef CONFIG_MMU 31 30 /* Machine State Register (MSR) Fields */ 32 31 # define MSR_UM (1<<11) /* User Mode */ 33 32 # define MSR_UMS (1<<12) /* User Mode Save */ ··· 42 43 # define ESR_DIZ (1<<11) /* Zone Protection */ 43 44 # define ESR_S (1<<10) /* Store instruction */ 44 45 45 - # endif /* CONFIG_MMU */ 46 46 #endif /* _ASM_MICROBLAZE_REGISTERS_H */
-2
arch/microblaze/include/asm/setup.h
··· 14 14 15 15 extern char *klimit; 16 16 17 - # ifdef CONFIG_MMU 18 17 extern void mmu_reset(void); 19 - # endif /* CONFIG_MMU */ 20 18 21 19 void time_init(void); 22 20 void init_IRQ(void);
+2
arch/microblaze/include/asm/thread_info.h
··· 107 107 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 108 108 /* restore singlestep on return to user mode */ 109 109 #define TIF_SINGLESTEP 4 110 + #define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */ 110 111 #define TIF_MEMDIE 6 /* is terminating due to OOM killer */ 111 112 #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ 112 113 #define TIF_SECCOMP 10 /* secure computing */ ··· 120 119 #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 121 120 #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 122 121 #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) 122 + #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) 123 123 #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 124 124 #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 125 125 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
-14
arch/microblaze/include/asm/tlbflush.h
··· 8 8 #ifndef _ASM_MICROBLAZE_TLBFLUSH_H 9 9 #define _ASM_MICROBLAZE_TLBFLUSH_H 10 10 11 - #ifdef CONFIG_MMU 12 - 13 11 #include <linux/sched.h> 14 12 #include <linux/threads.h> 15 13 #include <asm/processor.h> /* For TASK_SIZE */ ··· 47 49 */ 48 50 static inline void flush_tlb_pgtables(struct mm_struct *mm, 49 51 unsigned long start, unsigned long end) { } 50 - 51 - #else /* CONFIG_MMU */ 52 - 53 - #define flush_tlb() BUG() 54 - #define flush_tlb_all() BUG() 55 - #define flush_tlb_mm(mm) BUG() 56 - #define flush_tlb_page(vma, addr) BUG() 57 - #define flush_tlb_range(mm, start, end) BUG() 58 - #define flush_tlb_pgtables(mm, start, end) BUG() 59 - #define flush_tlb_kernel_range(start, end) BUG() 60 - 61 - #endif /* CONFIG_MMU */ 62 52 63 53 #endif /* _ASM_MICROBLAZE_TLBFLUSH_H */
-27
arch/microblaze/include/asm/uaccess.h
··· 30 30 */ 31 31 # define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 32 32 33 - # ifndef CONFIG_MMU 34 - # define KERNEL_DS MAKE_MM_SEG(0) 35 - # define USER_DS KERNEL_DS 36 - # else 37 33 # define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) 38 34 # define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) 39 - # endif 40 35 41 36 # define get_fs() (current_thread_info()->addr_limit) 42 37 # define set_fs(val) (current_thread_info()->addr_limit = (val)) 43 38 44 39 # define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) 45 - 46 - #ifndef CONFIG_MMU 47 - 48 - /* Check against bounds of physical memory */ 49 - static inline int ___range_ok(unsigned long addr, unsigned long size) 50 - { 51 - return ((addr < memory_start) || 52 - ((addr + size - 1) > (memory_start + memory_size - 1))); 53 - } 54 - 55 - #define __range_ok(addr, size) \ 56 - ___range_ok((unsigned long)(addr), (unsigned long)(size)) 57 - 58 - #define access_ok(addr, size) (__range_ok((addr), (size)) == 0) 59 - 60 - #else 61 40 62 41 static inline int access_ok(const void __user *addr, unsigned long size) 63 42 { ··· 56 77 (u32)get_fs().seg); 57 78 return 1; 58 79 } 59 - #endif 60 80 61 - #ifdef CONFIG_MMU 62 81 # define __FIXUP_SECTION ".section .fixup,\"ax\"\n" 63 82 # define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n" 64 - #else 65 - # define __FIXUP_SECTION ".section .discard,\"ax\"\n" 66 - # define __EX_TABLE_SECTION ".section .discard,\"ax\"\n" 67 - #endif 68 83 69 84 extern unsigned long __copy_tofrom_user(void __user *to, 70 85 const void __user *from, unsigned long size);
+2 -2
arch/microblaze/kernel/Makefile
··· 22 22 obj-y += cpu/ 23 23 24 24 obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o 25 - obj-$(CONFIG_MMU) += misc.o 25 + obj-y += misc.o 26 26 obj-$(CONFIG_STACKTRACE) += stacktrace.o 27 27 obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount.o 28 28 obj-$(CONFIG_KGDB) += kgdb.o 29 29 30 - obj-y += entry$(MMU).o 30 + obj-y += entry.o
-2
arch/microblaze/kernel/asm-offsets.c
··· 70 70 71 71 /* struct task_struct */ 72 72 DEFINE(TS_THREAD_INFO, offsetof(struct task_struct, stack)); 73 - #ifdef CONFIG_MMU 74 73 DEFINE(TASK_STATE, offsetof(struct task_struct, state)); 75 74 DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags)); 76 75 DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); ··· 83 84 84 85 DEFINE(PGDIR, offsetof(struct thread_struct, pgdir)); 85 86 BLANK(); 86 - #endif 87 87 88 88 /* struct thread_info */ 89 89 DEFINE(TI_TASK, offsetof(struct thread_info, task));
-622
arch/microblaze/kernel/entry-nommu.S
··· 1 - /* 2 - * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> 3 - * Copyright (C) 2007-2009 PetaLogix 4 - * Copyright (C) 2006 Atmark Techno, Inc. 5 - * 6 - * This file is subject to the terms and conditions of the GNU General Public 7 - * License. See the file "COPYING" in the main directory of this archive 8 - * for more details. 9 - */ 10 - 11 - #include <linux/linkage.h> 12 - #include <asm/thread_info.h> 13 - #include <linux/errno.h> 14 - #include <asm/entry.h> 15 - #include <asm/asm-offsets.h> 16 - #include <asm/registers.h> 17 - #include <asm/unistd.h> 18 - #include <asm/percpu.h> 19 - #include <asm/signal.h> 20 - 21 - #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 22 - .macro disable_irq 23 - msrclr r0, MSR_IE 24 - .endm 25 - 26 - .macro enable_irq 27 - msrset r0, MSR_IE 28 - .endm 29 - 30 - .macro clear_bip 31 - msrclr r0, MSR_BIP 32 - .endm 33 - #else 34 - .macro disable_irq 35 - mfs r11, rmsr 36 - andi r11, r11, ~MSR_IE 37 - mts rmsr, r11 38 - .endm 39 - 40 - .macro enable_irq 41 - mfs r11, rmsr 42 - ori r11, r11, MSR_IE 43 - mts rmsr, r11 44 - .endm 45 - 46 - .macro clear_bip 47 - mfs r11, rmsr 48 - andi r11, r11, ~MSR_BIP 49 - mts rmsr, r11 50 - .endm 51 - #endif 52 - 53 - ENTRY(_interrupt) 54 - swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */ 55 - swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */ 56 - lwi r11, r0, PER_CPU(KM) /* load mode indicator */ 57 - beqid r11, 1f 58 - nop 59 - brid 2f /* jump over */ 60 - addik r1, r1, (-PT_SIZE) /* room for pt_regs (delay slot) */ 61 - 1: /* switch to kernel stack */ 62 - lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */ 63 - lwi r1, r1, TS_THREAD_INFO /* get the thread info */ 64 - /* calculate kernel stack pointer */ 65 - addik r1, r1, THREAD_SIZE - PT_SIZE 66 - 2: 67 - swi r11, r1, PT_MODE /* store the mode */ 68 - lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */ 69 - swi r2, r1, PT_R2 70 - swi r3, r1, PT_R3 71 - swi r4, r1, PT_R4 72 - swi r5, r1, PT_R5 73 - swi r6, r1, PT_R6 74 - swi r7, r1, PT_R7 75 - swi r8, r1, PT_R8 76 - swi r9, r1, PT_R9 77 - swi r10, r1, PT_R10 78 - swi r11, r1, PT_R11 79 - swi r12, r1, PT_R12 80 - swi r13, r1, PT_R13 81 - swi r14, r1, PT_R14 82 - swi r14, r1, PT_PC 83 - swi r15, r1, PT_R15 84 - swi r16, r1, PT_R16 85 - swi r17, r1, PT_R17 86 - swi r18, r1, PT_R18 87 - swi r19, r1, PT_R19 88 - swi r20, r1, PT_R20 89 - swi r21, r1, PT_R21 90 - swi r22, r1, PT_R22 91 - swi r23, r1, PT_R23 92 - swi r24, r1, PT_R24 93 - swi r25, r1, PT_R25 94 - swi r26, r1, PT_R26 95 - swi r27, r1, PT_R27 96 - swi r28, r1, PT_R28 97 - swi r29, r1, PT_R29 98 - swi r30, r1, PT_R30 99 - swi r31, r1, PT_R31 100 - /* special purpose registers */ 101 - mfs r11, rmsr 102 - swi r11, r1, PT_MSR 103 - mfs r11, rear 104 - swi r11, r1, PT_EAR 105 - mfs r11, resr 106 - swi r11, r1, PT_ESR 107 - mfs r11, rfsr 108 - swi r11, r1, PT_FSR 109 - /* reload original stack pointer and save it */ 110 - lwi r11, r0, PER_CPU(ENTRY_SP) 111 - swi r11, r1, PT_R1 112 - /* update mode indicator we are in kernel mode */ 113 - addik r11, r0, 1 114 - swi r11, r0, PER_CPU(KM) 115 - /* restore r31 */ 116 - lwi r31, r0, PER_CPU(CURRENT_SAVE) 117 - /* prepare the link register, the argument and jump */ 118 - addik r15, r0, ret_from_intr - 8 119 - addk r6, r0, r15 120 - braid do_IRQ 121 - add r5, r0, r1 122 - 123 - ret_from_intr: 124 - lwi r11, r1, PT_MODE 125 - bneid r11, no_intr_resched 126 - 127 - 3: 128 - lwi r6, r31, TS_THREAD_INFO /* get thread info */ 129 - lwi r19, r6, TI_FLAGS /* get flags in thread info */ 130 - /* do an extra work if any bits are set */ 131 - 132 - andi r11, r19, _TIF_NEED_RESCHED 133 - beqi r11, 1f 134 - bralid r15, schedule 135 - nop 136 - bri 3b 137 - 1: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME 138 - beqid r11, no_intr_resched 139 - addk r5, r1, r0 140 - bralid r15, do_notify_resume 141 - addk r6, r0, r0 142 - bri 3b 143 - 144 - no_intr_resched: 145 - /* Disable interrupts, we are now committed to the state restore */ 146 - disable_irq 147 - 148 - /* save mode indicator */ 149 - lwi r11, r1, PT_MODE 150 - swi r11, r0, PER_CPU(KM) 151 - 152 - /* save r31 */ 153 - swi r31, r0, PER_CPU(CURRENT_SAVE) 154 - restore_context: 155 - /* special purpose registers */ 156 - lwi r11, r1, PT_FSR 157 - mts rfsr, r11 158 - lwi r11, r1, PT_ESR 159 - mts resr, r11 160 - lwi r11, r1, PT_EAR 161 - mts rear, r11 162 - lwi r11, r1, PT_MSR 163 - mts rmsr, r11 164 - 165 - lwi r31, r1, PT_R31 166 - lwi r30, r1, PT_R30 167 - lwi r29, r1, PT_R29 168 - lwi r28, r1, PT_R28 169 - lwi r27, r1, PT_R27 170 - lwi r26, r1, PT_R26 171 - lwi r25, r1, PT_R25 172 - lwi r24, r1, PT_R24 173 - lwi r23, r1, PT_R23 174 - lwi r22, r1, PT_R22 175 - lwi r21, r1, PT_R21 176 - lwi r20, r1, PT_R20 177 - lwi r19, r1, PT_R19 178 - lwi r18, r1, PT_R18 179 - lwi r17, r1, PT_R17 180 - lwi r16, r1, PT_R16 181 - lwi r15, r1, PT_R15 182 - lwi r14, r1, PT_PC 183 - lwi r13, r1, PT_R13 184 - lwi r12, r1, PT_R12 185 - lwi r11, r1, PT_R11 186 - lwi r10, r1, PT_R10 187 - lwi r9, r1, PT_R9 188 - lwi r8, r1, PT_R8 189 - lwi r7, r1, PT_R7 190 - lwi r6, r1, PT_R6 191 - lwi r5, r1, PT_R5 192 - lwi r4, r1, PT_R4 193 - lwi r3, r1, PT_R3 194 - lwi r2, r1, PT_R2 195 - lwi r1, r1, PT_R1 196 - rtid r14, 0 197 - nop 198 - 199 - ENTRY(_reset) 200 - brai 0; 201 - 202 - ENTRY(_user_exception) 203 - swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */ 204 - swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */ 205 - lwi r11, r0, PER_CPU(KM) /* load mode indicator */ 206 - beqid r11, 1f /* Already in kernel mode? */ 207 - nop 208 - brid 2f /* jump over */ 209 - addik r1, r1, (-PT_SIZE) /* Room for pt_regs (delay slot) */ 210 - 1: /* Switch to kernel stack */ 211 - lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */ 212 - lwi r1, r1, TS_THREAD_INFO /* get the thread info */ 213 - /* calculate kernel stack pointer */ 214 - addik r1, r1, THREAD_SIZE - PT_SIZE 215 - 2: 216 - swi r11, r1, PT_MODE /* store the mode */ 217 - lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */ 218 - /* save them on stack */ 219 - swi r2, r1, PT_R2 220 - swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */ 221 - swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */ 222 - swi r5, r1, PT_R5 223 - swi r6, r1, PT_R6 224 - swi r7, r1, PT_R7 225 - swi r8, r1, PT_R8 226 - swi r9, r1, PT_R9 227 - swi r10, r1, PT_R10 228 - swi r11, r1, PT_R11 229 - /* r12: _always_ in clobber list; see unistd.h */ 230 - swi r12, r1, PT_R12 231 - swi r13, r1, PT_R13 232 - /* r14: _always_ in clobber list; see unistd.h */ 233 - swi r14, r1, PT_R14 234 - /* but we want to return to the next inst. */ 235 - addik r14, r14, 0x4 236 - swi r14, r1, PT_PC /* increment by 4 and store in pc */ 237 - swi r15, r1, PT_R15 238 - swi r16, r1, PT_R16 239 - swi r17, r1, PT_R17 240 - swi r18, r1, PT_R18 241 - swi r19, r1, PT_R19 242 - swi r20, r1, PT_R20 243 - swi r21, r1, PT_R21 244 - swi r22, r1, PT_R22 245 - swi r23, r1, PT_R23 246 - swi r24, r1, PT_R24 247 - swi r25, r1, PT_R25 248 - swi r26, r1, PT_R26 249 - swi r27, r1, PT_R27 250 - swi r28, r1, PT_R28 251 - swi r29, r1, PT_R29 252 - swi r30, r1, PT_R30 253 - swi r31, r1, PT_R31 254 - 255 - disable_irq 256 - nop /* make sure IE bit is in effect */ 257 - clear_bip /* once IE is in effect it is safe to clear BIP */ 258 - nop 259 - 260 - /* special purpose registers */ 261 - mfs r11, rmsr 262 - swi r11, r1, PT_MSR 263 - mfs r11, rear 264 - swi r11, r1, PT_EAR 265 - mfs r11, resr 266 - swi r11, r1, PT_ESR 267 - mfs r11, rfsr 268 - swi r11, r1, PT_FSR 269 - /* reload original stack pointer and save it */ 270 - lwi r11, r0, PER_CPU(ENTRY_SP) 271 - swi r11, r1, PT_R1 272 - /* update mode indicator we are in kernel mode */ 273 - addik r11, r0, 1 274 - swi r11, r0, PER_CPU(KM) 275 - /* restore r31 */ 276 - lwi r31, r0, PER_CPU(CURRENT_SAVE) 277 - /* re-enable interrupts now we are in kernel mode */ 278 - enable_irq 279 - 280 - /* See if the system call number is valid. */ 281 - addi r11, r12, -__NR_syscalls 282 - bgei r11, 1f /* return to user if not valid */ 283 - /* Figure out which function to use for this system call. */ 284 - /* Note Microblaze barrel shift is optional, so don't rely on it */ 285 - add r12, r12, r12 /* convert num -> ptr */ 286 - addik r30, r0, 1 /* restarts allowed */ 287 - add r12, r12, r12 288 - lwi r12, r12, sys_call_table /* Get function pointer */ 289 - addik r15, r0, ret_to_user-8 /* set return address */ 290 - bra r12 /* Make the system call. */ 291 - bri 0 /* won't reach here */ 292 - 1: 293 - brid ret_to_user /* jump to syscall epilogue */ 294 - addi r3, r0, -ENOSYS /* set errno in delay slot */ 295 - 296 - /* 297 - * Debug traps are like a system call, but entered via brki r14, 0x60 298 - * All we need to do is send the SIGTRAP signal to current, ptrace and 299 - * do_notify_resume will handle the rest 300 - */ 301 - ENTRY(_debug_exception) 302 - swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */ 303 - lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */ 304 - lwi r1, r1, TS_THREAD_INFO /* get the thread info */ 305 - addik r1, r1, THREAD_SIZE - PT_SIZE /* get the kernel stack */ 306 - swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */ 307 - lwi r11, r0, PER_CPU(KM) /* load mode indicator */ 308 - //save_context: 309 - swi r11, r1, PT_MODE /* store the mode */ 310 - lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */ 311 - /* save them on stack */ 312 - swi r2, r1, PT_R2 313 - swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */ 314 - swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */ 315 - swi r5, r1, PT_R5 316 - swi r6, r1, PT_R6 317 - swi r7, r1, PT_R7 318 - swi r8, r1, PT_R8 319 - swi r9, r1, PT_R9 320 - swi r10, r1, PT_R10 321 - swi r11, r1, PT_R11 322 - /* r12: _always_ in clobber list; see unistd.h */ 323 - swi r12, r1, PT_R12 324 - swi r13, r1, PT_R13 325 - /* r14: _always_ in clobber list; see unistd.h */ 326 - swi r14, r1, PT_R14 327 - swi r14, r1, PT_PC /* Will return to interrupted instruction */ 328 - swi r15, r1, PT_R15 329 - swi r16, r1, PT_R16 330 - swi r17, r1, PT_R17 331 - swi r18, r1, PT_R18 332 - swi r19, r1, PT_R19 333 - swi r20, r1, PT_R20 334 - swi r21, r1, PT_R21 335 - swi r22, r1, PT_R22 336 - swi r23, r1, PT_R23 337 - swi r24, r1, PT_R24 338 - swi r25, r1, PT_R25 339 - swi r26, r1, PT_R26 340 - swi r27, r1, PT_R27 341 - swi r28, r1, PT_R28 342 - swi r29, r1, PT_R29 343 - swi r30, r1, PT_R30 344 - swi r31, r1, PT_R31 345 - 346 - disable_irq 347 - nop /* make sure IE bit is in effect */ 348 - clear_bip /* once IE is in effect it is safe to clear BIP */ 349 - nop 350 - 351 - /* special purpose registers */ 352 - mfs r11, rmsr 353 - swi r11, r1, PT_MSR 354 - mfs r11, rear 355 - swi r11, r1, PT_EAR 356 - mfs r11, resr 357 - swi r11, r1, PT_ESR 358 - mfs r11, rfsr 359 - swi r11, r1, PT_FSR 360 - /* reload original stack pointer and save it */ 361 - lwi r11, r0, PER_CPU(ENTRY_SP) 362 - swi r11, r1, PT_R1 363 - /* update mode indicator we are in kernel mode */ 364 - addik r11, r0, 1 365 - swi r11, r0, PER_CPU(KM) 366 - /* restore r31 */ 367 - lwi r31, r0, PER_CPU(CURRENT_SAVE) 368 - /* re-enable interrupts now we are in kernel mode */ 369 - enable_irq 370 - 371 - addi r5, r0, SIGTRAP /* sending the trap signal */ 372 - add r6, r0, r31 /* to current */ 373 - bralid r15, send_sig 374 - add r7, r0, r0 /* 3rd param zero */ 375 - 376 - addik r30, r0, 1 /* restarts allowed ??? */ 377 - /* Restore r3/r4 to work around how ret_to_user works */ 378 - lwi r3, r1, PT_R3 379 - lwi r4, r1, PT_R4 380 - bri ret_to_user 381 - 382 - ENTRY(_break) 383 - bri 0 384 - 385 - /* struct task_struct *_switch_to(struct thread_info *prev, 386 - struct thread_info *next); */ 387 - ENTRY(_switch_to) 388 - /* prepare return value */ 389 - addk r3, r0, r31 390 - 391 - /* save registers in cpu_context */ 392 - /* use r11 and r12, volatile registers, as temp register */ 393 - addik r11, r5, TI_CPU_CONTEXT 394 - swi r1, r11, CC_R1 395 - swi r2, r11, CC_R2 396 - /* skip volatile registers. 397 - * they are saved on stack when we jumped to _switch_to() */ 398 - /* dedicated registers */ 399 - swi r13, r11, CC_R13 400 - swi r14, r11, CC_R14 401 - swi r15, r11, CC_R15 402 - swi r16, r11, CC_R16 403 - swi r17, r11, CC_R17 404 - swi r18, r11, CC_R18 405 - /* save non-volatile registers */ 406 - swi r19, r11, CC_R19 407 - swi r20, r11, CC_R20 408 - swi r21, r11, CC_R21 409 - swi r22, r11, CC_R22 410 - swi r23, r11, CC_R23 411 - swi r24, r11, CC_R24 412 - swi r25, r11, CC_R25 413 - swi r26, r11, CC_R26 414 - swi r27, r11, CC_R27 415 - swi r28, r11, CC_R28 416 - swi r29, r11, CC_R29 417 - swi r30, r11, CC_R30 418 - /* special purpose registers */ 419 - mfs r12, rmsr 420 - swi r12, r11, CC_MSR 421 - mfs r12, rear 422 - swi r12, r11, CC_EAR 423 - mfs r12, resr 424 - swi r12, r11, CC_ESR 425 - mfs r12, rfsr 426 - swi r12, r11, CC_FSR 427 - 428 - /* update r31, the current */ 429 - lwi r31, r6, TI_TASK 430 - swi r31, r0, PER_CPU(CURRENT_SAVE) 431 - 432 - /* get new process' cpu context and restore */ 433 - addik r11, r6, TI_CPU_CONTEXT 434 - 435 - /* special purpose registers */ 436 - lwi r12, r11, CC_FSR 437 - mts rfsr, r12 438 - lwi r12, r11, CC_ESR 439 - mts resr, r12 440 - lwi r12, r11, CC_EAR 441 - mts rear, r12 442 - lwi r12, r11, CC_MSR 443 - mts rmsr, r12 444 - /* non-volatile registers */ 445 - lwi r30, r11, CC_R30 446 - lwi r29, r11, CC_R29 447 - lwi r28, r11, CC_R28 448 - lwi r27, r11, CC_R27 449 - lwi r26, r11, CC_R26 450 - lwi r25, r11, CC_R25 451 - lwi r24, r11, CC_R24 452 - lwi r23, r11, CC_R23 453 - lwi r22, r11, CC_R22 454 - lwi r21, r11, CC_R21 455 - lwi r20, r11, CC_R20 456 - lwi r19, r11, CC_R19 457 - /* dedicated registers */ 458 - lwi r18, r11, CC_R18 459 - lwi r17, r11, CC_R17 460 - lwi r16, r11, CC_R16 461 - lwi r15, r11, CC_R15 462 - lwi r14, r11, CC_R14 463 - lwi r13, r11, CC_R13 464 - /* skip volatile registers */ 465 - lwi r2, r11, CC_R2 466 - lwi r1, r11, CC_R1 467 - 468 - rtsd r15, 8 469 - nop 470 - 471 - ENTRY(ret_from_fork) 472 - addk r5, r0, r3 473 - brlid r15, schedule_tail 474 - nop 475 - swi r31, r1, PT_R31 /* save r31 in user context. */ 476 - /* will soon be restored to r31 in ret_to_user */ 477 - addk r3, r0, r0 478 - brid ret_to_user 479 - nop 480 - 481 - ENTRY(ret_from_kernel_thread) 482 - brlid r15, schedule_tail 483 - addk r5, r0, r3 484 - brald r15, r20 485 - addk r5, r0, r19 486 - brid ret_to_user 487 - addk r3, r0, r0 488 - 489 - work_pending: 490 - lwi r11, r1, PT_MODE 491 - bneid r11, 2f 492 - 3: 493 - enable_irq 494 - andi r11, r19, _TIF_NEED_RESCHED 495 - beqi r11, 1f 496 - bralid r15, schedule 497 - nop 498 - bri 4f 499 - 1: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME 500 - beqi r11, no_work_pending 501 - addk r5, r30, r0 502 - bralid r15, do_notify_resume 503 - addik r6, r0, 1 504 - addk r30, r0, r0 /* no restarts from now on */ 505 - 4: 506 - disable_irq 507 - lwi r6, r31, TS_THREAD_INFO /* get thread info */ 508 - lwi r19, r6, TI_FLAGS /* get flags in thread info */ 509 - bri 3b 510 - 511 - ENTRY(ret_to_user) 512 - disable_irq 513 - 514 - swi r4, r1, PT_R4 /* return val */ 515 - swi r3, r1, PT_R3 /* return val */ 516 - 517 - lwi r6, r31, TS_THREAD_INFO /* get thread info */ 518 - lwi r19, r6, TI_FLAGS /* get flags in thread info */ 519 - bnei r19, work_pending /* do an extra work if any bits are set */ 520 - no_work_pending: 521 - disable_irq 522 - 523 - 2: 524 - /* save r31 */ 525 - swi r31, r0, PER_CPU(CURRENT_SAVE) 526 - /* save mode indicator */ 527 - lwi r18, r1, PT_MODE 528 - swi r18, r0, PER_CPU(KM) 529 - //restore_context: 530 - /* special purpose registers */ 531 - lwi r18, r1, PT_FSR 532 - mts rfsr, r18 533 - lwi r18, r1, PT_ESR 534 - mts resr, r18 535 - lwi r18, r1, PT_EAR 536 - mts rear, r18 537 - lwi r18, r1, PT_MSR 538 - mts rmsr, r18 539 - 540 - lwi r31, r1, PT_R31 541 - lwi r30, r1, PT_R30 542 - lwi r29, r1, PT_R29 543 - lwi r28, r1, PT_R28 544 - lwi r27, r1, PT_R27 545 - lwi r26, r1, PT_R26 546 - lwi r25, r1, PT_R25 547 - lwi r24, r1, PT_R24 548 - lwi r23, r1, PT_R23 549 - lwi r22, r1, PT_R22 550 - lwi r21, r1, PT_R21 551 - lwi r20, r1, PT_R20 552 - lwi r19, r1, PT_R19 553 - lwi r18, r1, PT_R18 554 - lwi r17, r1, PT_R17 555 - lwi r16, r1, PT_R16 556 - lwi r15, r1, PT_R15 557 - lwi r14, r1, PT_PC 558 - lwi r13, r1, PT_R13 559 - lwi r12, r1, PT_R12 560 - lwi r11, r1, PT_R11 561 - lwi r10, r1, PT_R10 562 - lwi r9, r1, PT_R9 563 - lwi r8, r1, PT_R8 564 - lwi r7, r1, PT_R7 565 - lwi r6, r1, PT_R6 566 - lwi r5, r1, PT_R5 567 - lwi r4, r1, PT_R4 /* return val */ 568 - lwi r3, r1, PT_R3 /* return val */ 569 - lwi r2, r1, PT_R2 570 - lwi r1, r1, PT_R1 571 - 572 - rtid r14, 0 573 - nop 574 - 575 - sys_rt_sigreturn_wrapper: 576 - addk r30, r0, r0 /* no restarts for this one */ 577 - brid sys_rt_sigreturn 578 - addk r5, r1, r0 579 - 580 - /* Interrupt vector table */ 581 - .section .init.ivt, "ax" 582 - .org 0x0 583 - brai _reset 584 - brai _user_exception 585 - brai _interrupt 586 - brai _break 587 - brai _hw_exception_handler 588 - .org 0x60 589 - brai _debug_exception 590 - 591 - .section .rodata,"a" 592 - #include "syscall_table.S" 593 - 594 - syscall_table_size=(.-sys_call_table) 595 - 596 - type_SYSCALL: 597 - .ascii "SYSCALL\0" 598 - type_IRQ: 599 - .ascii "IRQ\0" 600 - type_IRQ_PREEMPT: 601 - .ascii "IRQ (PREEMPTED)\0" 602 - type_SYSCALL_PREEMPT: 603 - .ascii " SYSCALL (PREEMPTED)\0" 604 - 605 - /* 606 - * Trap decoding for stack unwinder 607 - * Tuples are (start addr, end addr, string) 608 - * If return address lies on [start addr, end addr], 609 - * unwinder displays 'string' 610 - */ 611 - 612 - .align 4 613 - .global microblaze_trap_handlers 614 - microblaze_trap_handlers: 615 - /* Exact matches come first */ 616 - .word ret_to_user ; .word ret_to_user ; .word type_SYSCALL 617 - .word ret_from_intr; .word ret_from_intr ; .word type_IRQ 618 - /* Fuzzy matches go here */ 619 - .word ret_from_intr; .word no_intr_resched; .word type_IRQ_PREEMPT 620 - .word work_pending ; .word no_work_pending; .word type_SYSCALL_PREEMPT 621 - /* End of table */ 622 - .word 0 ; .word 0 ; .word 0
-5
arch/microblaze/kernel/exceptions.c
··· 69 69 asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, 70 70 int fsr, int addr) 71 71 { 72 - #ifdef CONFIG_MMU 73 72 addr = regs->pc; 74 - #endif 75 73 76 74 #if 0 77 75 pr_warn("Exception %02x in %s mode, FSR=%08x PC=%08x ESR=%08x\n", ··· 130 132 fsr = FPE_FLTRES; 131 133 _exception(SIGFPE, regs, fsr, addr); 132 134 break; 133 - 134 - #ifdef CONFIG_MMU 135 135 case MICROBLAZE_PRIVILEGED_EXCEPTION: 136 136 pr_debug("Privileged exception\n"); 137 137 _exception(SIGILL, regs, ILL_PRVOPC, addr); 138 138 break; 139 - #endif 140 139 default: 141 140 /* FIXME what to do in unexpected exception */ 142 141 pr_warn("Unexpected exception %02x PC=%08x in %s mode\n",
-12
arch/microblaze/kernel/head.S
··· 34 34 #include <asm/page.h> 35 35 #include <linux/of_fdt.h> /* for OF_DT_HEADER */ 36 36 37 - #ifdef CONFIG_MMU 38 37 #include <asm/setup.h> /* COMMAND_LINE_SIZE */ 39 38 #include <asm/mmu.h> 40 39 #include <asm/processor.h> ··· 46 47 .global swapper_pg_dir 47 48 swapper_pg_dir: 48 49 .space PAGE_SIZE 49 - 50 - #endif /* CONFIG_MMU */ 51 50 52 51 .section .rodata 53 52 .align 4 ··· 104 107 bgtid r3, _copy_fdt /* loop for all entries */ 105 108 addik r3, r3, -4 /* descrement loop */ 106 109 no_fdt_arg: 107 - 108 - #ifdef CONFIG_MMU 109 110 110 111 #ifndef CONFIG_CMDLINE_BOOL 111 112 /* ··· 324 329 nop 325 330 326 331 start_here: 327 - #endif /* CONFIG_MMU */ 328 332 329 333 /* Initialize small data anchors */ 330 334 addik r13, r0, _KERNEL_SDA_BASE_ ··· 339 345 brald r15, r11 340 346 nop 341 347 342 - #ifndef CONFIG_MMU 343 - addik r15, r0, machine_halt 344 - braid start_kernel 345 - nop 346 - #else 347 348 /* 348 349 * Initialize the MMU. 349 350 */ ··· 372 383 nop 373 384 rted r17, 0 /* enable MMU and jump to start_kernel */ 374 385 nop 375 - #endif /* CONFIG_MMU */
+1 -129
arch/microblaze/kernel/hw_exception_handler.S
··· 80 80 /* Helpful Macros */ 81 81 #define NUM_TO_REG(num) r ## num 82 82 83 - #ifdef CONFIG_MMU 84 83 #define RESTORE_STATE \ 85 84 lwi r5, r1, 0; \ 86 85 mts rmsr, r5; \ ··· 91 92 lwi r11, r1, PT_R11; \ 92 93 lwi r31, r1, PT_R31; \ 93 94 lwi r1, r1, PT_R1; 94 - #endif /* CONFIG_MMU */ 95 95 96 96 #define LWREG_NOP \ 97 97 bri ex_handler_unhandled; \ ··· 99 101 #define SWREG_NOP \ 100 102 bri ex_handler_unhandled; \ 101 103 nop; 102 - 103 - /* FIXME this is weird - for noMMU kernel is not possible to use brid 104 - * instruction which can shorten executed time 105 - */ 106 104 107 105 /* r3 is the source */ 108 106 #define R3_TO_LWREG_V(regnum) \ ··· 120 126 or r3, r0, NUM_TO_REG (regnum); \ 121 127 bri ex_sw_tail; 122 128 123 - #ifdef CONFIG_MMU 124 129 #define R3_TO_LWREG_VM_V(regnum) \ 125 130 brid ex_lw_end_vm; \ 126 131 swi r3, r7, 4 * regnum; ··· 186 193 .endm 187 194 #endif 188 195 189 - #endif /* CONFIG_MMU */ 190 196 191 197 .extern other_exception_handler /* Defined in exception.c */ 192 198 ··· 243 251 */ 244 252 245 253 /* wrappers to restore state before coming to entry.S */ 246 - #ifdef CONFIG_MMU 247 254 .section .data 248 255 .align 4 249 256 pt_pool_space: ··· 307 316 .long TOPHYS(ex_handler_unhandled) 308 317 .long TOPHYS(ex_handler_unhandled) 309 318 .long TOPHYS(ex_handler_unhandled) 310 - #endif 311 319 312 320 .global _hw_exception_handler 313 321 .section .text 314 322 .align 4 315 323 .ent _hw_exception_handler 316 324 _hw_exception_handler: 317 - #ifndef CONFIG_MMU 318 - addik r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */ 319 - #else 320 325 swi r1, r0, TOPHYS(pt_pool_space + PT_R1); /* GET_SP */ 321 326 /* Save date to kernel memory. Here is the problem 322 327 * when you came from user space */ 323 328 ori r1, r0, TOPHYS(pt_pool_space); 324 - #endif 325 329 swi r3, r1, PT_R3 326 330 swi r4, r1, PT_R4 327 331 swi r5, r1, PT_R5 328 332 swi r6, r1, PT_R6 329 333 330 - #ifdef CONFIG_MMU 331 334 swi r11, r1, PT_R11 332 335 swi r31, r1, PT_R31 333 336 lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)) /* get saved current */ 334 - #endif 335 337 336 338 mfs r5, rmsr; 337 339 nop ··· 334 350 mfs r3, rear; 335 351 nop 336 352 337 - #ifndef CONFIG_MMU 338 - andi r5, r4, 0x1000; /* Check ESR[DS] */ 339 - beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */ 340 - mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ 341 - nop 342 - not_in_delay_slot: 343 - swi r17, r1, PT_R17 344 - #endif 345 - 346 353 andi r5, r4, 0x1F; /* Extract ESR[EXC] */ 347 354 348 - #ifdef CONFIG_MMU 349 355 /* Calculate exception vector offset = r5 << 2 */ 350 356 addk r6, r5, r5; /* << 1 */ 351 357 addk r6, r6, r6; /* << 2 */ ··· 357 383 full_exception_trapw: 358 384 RESTORE_STATE 359 385 bri full_exception_trap 360 - #else 361 - /* Exceptions enabled here. This will allow nested exceptions */ 362 - mfs r6, rmsr; 363 - nop 364 - swi r6, r1, 0; /* RMSR_OFFSET */ 365 - ori r6, r6, 0x100; /* Turn ON the EE bit */ 366 - andi r6, r6, ~2; /* Disable interrupts */ 367 - mts rmsr, r6; 368 - nop 369 - 370 - xori r6, r5, 1; /* 00001 = Unaligned Exception */ 371 - /* Jump to unalignment exception handler */ 372 - beqi r6, handle_unaligned_ex; 373 - 374 - handle_other_ex: /* Handle Other exceptions here */ 375 - /* Save other volatiles before we make procedure calls below */ 376 - swi r7, r1, PT_R7 377 - swi r8, r1, PT_R8 378 - swi r9, r1, PT_R9 379 - swi r10, r1, PT_R10 380 - swi r11, r1, PT_R11 381 - swi r12, r1, PT_R12 382 - swi r14, r1, PT_R14 383 - swi r15, r1, PT_R15 384 - swi r18, r1, PT_R18 385 - 386 - or r5, r1, r0 387 - andi r6, r4, 0x1F; /* Load ESR[EC] */ 388 - lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */ 389 - swi r7, r1, PT_MODE 390 - mfs r7, rfsr 391 - nop 392 - addk r8, r17, r0; /* Load exception address */ 393 - bralid r15, full_exception; /* Branch to the handler */ 394 - nop; 395 - mts rfsr, r0; /* Clear sticky fsr */ 396 - nop 397 - 398 - /* 399 - * Trigger execution of the signal handler by enabling 400 - * interrupts and calling an invalid syscall. 401 - */ 402 - mfs r5, rmsr; 403 - nop 404 - ori r5, r5, 2; 405 - mts rmsr, r5; /* enable interrupt */ 406 - nop 407 - addi r12, r0, __NR_syscalls; 408 - brki r14, 0x08; 409 - mfs r5, rmsr; /* disable interrupt */ 410 - nop 411 - andi r5, r5, ~2; 412 - mts rmsr, r5; 413 - nop 414 - 415 - lwi r7, r1, PT_R7 416 - lwi r8, r1, PT_R8 417 - lwi r9, r1, PT_R9 418 - lwi r10, r1, PT_R10 419 - lwi r11, r1, PT_R11 420 - lwi r12, r1, PT_R12 421 - lwi r14, r1, PT_R14 422 - lwi r15, r1, PT_R15 423 - lwi r18, r1, PT_R18 424 - 425 - bri ex_handler_done; /* Complete exception handling */ 426 - #endif 427 386 428 387 /* 0x01 - Unaligned data access exception 429 388 * This occurs when a word access is not aligned on a word boundary, ··· 370 463 * R4 = ESR 371 464 * R3 = EAR 372 465 */ 373 - #ifdef CONFIG_MMU 374 466 andi r6, r4, 0x1000 /* Check ESR[DS] */ 375 467 beqi r6, _no_delayslot /* Branch if ESR[DS] not set */ 376 468 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ ··· 378 472 /* jump to high level unaligned handler */ 379 473 RESTORE_STATE; 380 474 bri unaligned_data_trap 381 - #endif 475 + 382 476 andi r6, r4, 0x3E0; /* Mask and extract the register operand */ 383 477 srl r6, r6; /* r6 >> 5 */ 384 478 srl r6, r6; ··· 464 558 ex_sw_end: /* Exception handling of store word, ends. */ 465 559 466 560 ex_handler_done: 467 - #ifndef CONFIG_MMU 468 - lwi r5, r1, 0 /* RMSR */ 469 - mts rmsr, r5 470 - nop 471 - lwi r3, r1, PT_R3 472 - lwi r4, r1, PT_R4 473 - lwi r5, r1, PT_R5 474 - lwi r6, r1, PT_R6 475 - lwi r17, r1, PT_R17 476 - 477 - rted r17, 0 478 - addik r1, r1, (EX_HANDLER_STACK_SIZ); /* Restore stack frame */ 479 - #else 480 561 RESTORE_STATE; 481 562 rted r17, 0 482 563 nop 483 - #endif 484 564 485 - #ifdef CONFIG_MMU 486 565 /* Exception vector entry code. This code runs with address translation 487 566 * turned off (i.e. using physical addresses). */ 488 567 ··· 773 882 * bits 20 and 21 are zero. 774 883 */ 775 884 andi r3, r3, PAGE_MASK 776 - #ifdef CONFIG_MICROBLAZE_64K_PAGES 777 - ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_64K) 778 - #elif CONFIG_MICROBLAZE_16K_PAGES 779 - ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_16K) 780 - #else 781 885 ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_4K) 782 - #endif 783 886 mts rtlbhi, r3 /* Load TLB HI */ 784 887 nop 785 888 ··· 811 926 rtsd r15,8 812 927 nop 813 928 814 - #endif 815 929 .end _hw_exception_handler 816 930 817 - #ifdef CONFIG_MMU 818 931 /* Unaligned data access exception last on a 4k page for MMU. 819 932 * When this is called, we are in virtual mode with exceptions enabled 820 933 * and registers 1-13,15,17,18 saved. ··· 927 1044 .word store6,ex_unaligned_fixup; 928 1045 .previous; 929 1046 .end _unaligned_data_exception 930 - #endif /* CONFIG_MMU */ 931 1047 932 1048 .global ex_handler_unhandled 933 1049 ex_handler_unhandled: ··· 975 1093 lw_r28: R3_TO_LWREG (28); 976 1094 lw_r29: R3_TO_LWREG (29); 977 1095 lw_r30: R3_TO_LWREG (30); 978 - #ifdef CONFIG_MMU 979 1096 lw_r31: R3_TO_LWREG_V (31); 980 - #else 981 - lw_r31: R3_TO_LWREG (31); 982 - #endif 983 1097 984 1098 sw_table: 985 1099 sw_r0: SWREG_TO_R3 (0); ··· 1009 1131 sw_r28: SWREG_TO_R3 (28); 1010 1132 sw_r29: SWREG_TO_R3 (29); 1011 1133 sw_r30: SWREG_TO_R3 (30); 1012 - #ifdef CONFIG_MMU 1013 1134 sw_r31: SWREG_TO_R3_V (31); 1014 - #else 1015 - sw_r31: SWREG_TO_R3 (31); 1016 - #endif 1017 1135 1018 - #ifdef CONFIG_MMU 1019 1136 lw_table_vm: 1020 1137 lw_r0_vm: R3_TO_LWREG_VM (0); 1021 1138 lw_r1_vm: R3_TO_LWREG_VM_V (1); ··· 1078 1205 sw_r29_vm: SWREG_TO_R3_VM_V (29); 1079 1206 sw_r30_vm: SWREG_TO_R3_VM_V (30); 1080 1207 sw_r31_vm: SWREG_TO_R3_VM_V (31); 1081 - #endif /* CONFIG_MMU */ 1082 1208 1083 1209 /* Temporary data structures used in the handler */ 1084 1210 .section .data
-2
arch/microblaze/kernel/microblaze_ksyms.c
··· 33 33 EXPORT_SYMBOL(memmove); 34 34 #endif 35 35 36 - #ifdef CONFIG_MMU 37 36 EXPORT_SYMBOL(empty_zero_page); 38 - #endif 39 37 40 38 EXPORT_SYMBOL(mbc); 41 39
-10
arch/microblaze/kernel/process.c
··· 69 69 ti->cpu_context.r19 = (unsigned long)arg; 70 70 childregs->pt_mode = 1; 71 71 local_save_flags(childregs->msr); 72 - #ifdef CONFIG_MMU 73 72 ti->cpu_context.msr = childregs->msr & ~MSR_IE; 74 - #endif 75 73 ti->cpu_context.r15 = (unsigned long)ret_from_kernel_thread - 8; 76 74 return 0; 77 75 } ··· 79 81 80 82 memset(&ti->cpu_context, 0, sizeof(struct cpu_context)); 81 83 ti->cpu_context.r1 = (unsigned long)childregs; 82 - #ifndef CONFIG_MMU 83 - ti->cpu_context.msr = (unsigned long)childregs->msr; 84 - #else 85 84 childregs->msr |= MSR_UMS; 86 85 87 86 /* we should consider the fact that childregs is a copy of the parent ··· 100 105 ti->cpu_context.msr = (childregs->msr|MSR_VM); 101 106 ti->cpu_context.msr &= ~MSR_UMS; /* switch_to to kernel mode */ 102 107 ti->cpu_context.msr &= ~MSR_IE; 103 - #endif 104 108 ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8; 105 109 106 110 /* ··· 124 130 regs->pc = pc; 125 131 regs->r1 = usp; 126 132 regs->pt_mode = 0; 127 - #ifdef CONFIG_MMU 128 133 regs->msr |= MSR_UMS; 129 134 regs->msr &= ~MSR_VM; 130 - #endif 131 135 } 132 136 133 - #ifdef CONFIG_MMU 134 137 #include <linux/elfcore.h> 135 138 /* 136 139 * Set up a thread for executing a new program ··· 136 145 { 137 146 return 0; /* MicroBlaze has no separate FPU registers */ 138 147 } 139 - #endif /* CONFIG_MMU */ 140 148 141 149 void arch_cpu_idle(void) 142 150 {
+1 -3
arch/microblaze/kernel/setup.c
··· 9 9 */ 10 10 11 11 #include <linux/init.h> 12 - #include <linux/clk-provider.h> 12 + #include <linux/of_clk.h> 13 13 #include <linux/clocksource.h> 14 14 #include <linux/string.h> 15 15 #include <linux/seq_file.h> ··· 190 190 } 191 191 arch_initcall(microblaze_debugfs_init); 192 192 193 - # ifdef CONFIG_MMU 194 193 static int __init debugfs_tlb(void) 195 194 { 196 195 debugfs_create_u32("tlb_skip", S_IRUGO, of_debugfs_root, &tlb_skip); 197 196 return 0; 198 197 } 199 198 device_initcall(debugfs_tlb); 200 - # endif 201 199 #endif
+2 -8
arch/microblaze/kernel/signal.c
··· 157 157 struct rt_sigframe __user *frame; 158 158 int err = 0, sig = ksig->sig; 159 159 unsigned long address = 0; 160 - #ifdef CONFIG_MMU 161 160 pmd_t *pmdp; 162 161 pte_t *ptep; 163 - #endif 164 162 165 163 frame = get_sigframe(ksig, regs, sizeof(*frame)); 166 164 ··· 190 192 regs->r15 = ((unsigned long)frame->tramp)-8; 191 193 192 194 address = ((unsigned long)frame->tramp); 193 - #ifdef CONFIG_MMU 194 195 pmdp = pmd_off(current->mm, address); 195 196 196 197 preempt_disable(); ··· 205 208 } 206 209 pte_unmap(ptep); 207 210 preempt_enable(); 208 - #else 209 - flush_icache_range(address, address + 8); 210 - flush_dcache_range(address, address + 8); 211 - #endif 212 211 if (err) 213 212 return -EFAULT; 214 213 ··· 306 313 307 314 asmlinkage void do_notify_resume(struct pt_regs *regs, int in_syscall) 308 315 { 309 - if (test_thread_flag(TIF_SIGPENDING)) 316 + if (test_thread_flag(TIF_SIGPENDING) || 317 + test_thread_flag(TIF_NOTIFY_SIGNAL)) 310 318 do_signal(regs, in_syscall); 311 319 312 320 if (test_thread_flag(TIF_NOTIFY_RESUME))
-19
arch/microblaze/kernel/unwind.c
··· 161 161 * unwind_trap - Unwind through a system trap, that stored previous state 162 162 * on the stack. 163 163 */ 164 - #ifdef CONFIG_MMU 165 164 static inline void unwind_trap(struct task_struct *task, unsigned long pc, 166 165 unsigned long fp, struct stack_trace *trace, 167 166 const char *loglvl) 168 167 { 169 168 /* To be implemented */ 170 169 } 171 - #else 172 - static inline void unwind_trap(struct task_struct *task, unsigned long pc, 173 - unsigned long fp, struct stack_trace *trace, 174 - const char *loglvl) 175 - { 176 - const struct pt_regs *regs = (const struct pt_regs *) fp; 177 - microblaze_unwind_inner(task, regs->pc, regs->r1, regs->r15, trace, loglvl); 178 - } 179 - #endif 180 170 181 171 /** 182 172 * microblaze_unwind_inner - Unwind the stack from the specified point ··· 205 215 * HW exception handler doesn't save all registers, 206 216 * so we open-code a special case of unwind_trap() 207 217 */ 208 - #ifndef CONFIG_MMU 209 - const struct pt_regs *regs = 210 - (const struct pt_regs *) fp; 211 - #endif 212 218 printk("%sHW EXCEPTION\n", loglvl); 213 - #ifndef CONFIG_MMU 214 - microblaze_unwind_inner(task, regs->r17 - 4, 215 - fp + EX_HANDLER_STACK_SIZ, 216 - regs->r15, trace, loglvl); 217 - #endif 218 219 return; 219 220 } 220 221
+1 -3
arch/microblaze/mm/Makefile
··· 3 3 # Makefile 4 4 # 5 5 6 - obj-y := consistent.o init.o 7 - 8 - obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o 6 + obj-y := consistent.o init.o pgtable.o mmu_context.o fault.o
-29
arch/microblaze/mm/consistent.c
··· 21 21 22 22 flush_dcache_range(paddr, paddr + size); 23 23 } 24 - 25 - #ifndef CONFIG_MMU 26 - /* 27 - * Consistent memory allocators. Used for DMA devices that want to share 28 - * uncached memory with the processor core. My crufty no-MMU approach is 29 - * simple. In the HW platform we can optionally mirror the DDR up above the 30 - * processor cacheable region. So, memory accessed in this mirror region will 31 - * not be cached. It's alloced from the same pool as normal memory, but the 32 - * handle we return is shifted up into the uncached region. This will no doubt 33 - * cause big problems if memory allocated here is not also freed properly. -- JW 34 - * 35 - * I have to use dcache values because I can't relate on ram size: 36 - */ 37 - #ifdef CONFIG_XILINX_UNCACHED_SHADOW 38 - #define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1) 39 - #else 40 - #define UNCACHED_SHADOW_MASK 0 41 - #endif /* CONFIG_XILINX_UNCACHED_SHADOW */ 42 - 43 - void *arch_dma_set_uncached(void *ptr, size_t size) 44 - { 45 - unsigned long addr = (unsigned long)ptr; 46 - 47 - addr |= UNCACHED_SHADOW_MASK; 48 - if (addr > cpuinfo.dcache_base && addr < cpuinfo.dcache_high) 49 - pr_warn("ERROR: Your cache coherent area is CACHED!!!\n"); 50 - return (void *)addr; 51 - } 52 - #endif /* CONFIG_MMU */
-49
arch/microblaze/mm/init.c
··· 29 29 /* Use for MMU and noMMU because of PCI generic code */ 30 30 int mem_init_done; 31 31 32 - #ifndef CONFIG_MMU 33 - unsigned int __page_offset; 34 - EXPORT_SYMBOL(__page_offset); 35 - #endif /* CONFIG_MMU */ 36 - 37 32 char *klimit = _end; 38 33 39 34 /* ··· 72 77 static void __init paging_init(void) 73 78 { 74 79 unsigned long zones_size[MAX_NR_ZONES]; 75 - #ifdef CONFIG_MMU 76 80 int idx; 77 81 78 82 /* Setup fixmaps */ 79 83 for (idx = 0; idx < __end_of_fixed_addresses; idx++) 80 84 clear_fixmap(idx); 81 - #endif 82 85 83 86 /* Clean every zones */ 84 87 memset(zones_size, 0, sizeof(zones_size)); ··· 96 103 97 104 void __init setup_memory(void) 98 105 { 99 - #ifndef CONFIG_MMU 100 - u32 kernel_align_start, kernel_align_size; 101 - phys_addr_t start, end; 102 - u64 i; 103 - 104 - /* Find main memory where is the kernel */ 105 - for_each_mem_range(i, &start, &end) { 106 - memory_start = start; 107 - lowmem_size = end - start; 108 - if ((memory_start <= (u32)_text) && 109 - ((u32)_text <= (memory_start + lowmem_size - 1))) { 110 - memory_size = lowmem_size; 111 - PAGE_OFFSET = memory_start; 112 - pr_info("%s: Main mem: 0x%x, size 0x%08x\n", 113 - __func__, (u32) memory_start, 114 - (u32) memory_size); 115 - break; 116 - } 117 - } 118 - 119 - if (!memory_start || !memory_size) { 120 - panic("%s: Missing memory setting 0x%08x, size=0x%08x\n", 121 - __func__, (u32) memory_start, (u32) memory_size); 122 - } 123 - 124 - /* reservation of region where is the kernel */ 125 - kernel_align_start = PAGE_DOWN((u32)_text); 126 - /* ALIGN can be remove because _end in vmlinux.lds.S is align */ 127 - kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start; 128 - pr_info("%s: kernel addr:0x%08x-0x%08x size=0x%08x\n", 129 - __func__, kernel_align_start, kernel_align_start 130 - + kernel_align_size, kernel_align_size); 131 - memblock_reserve(kernel_align_start, kernel_align_size); 132 - #endif 133 106 /* 134 107 * Kernel: 135 108 * start: base phys address of kernel - page align ··· 135 176 mem_init_done = 1; 136 177 } 137 178 138 - #ifndef CONFIG_MMU 139 - int page_is_ram(unsigned long pfn) 140 - { 141 - return __range_ok(pfn, 0); 142 - } 143 - #else 144 179 int page_is_ram(unsigned long pfn) 145 180 { 146 181 return pfn < max_low_pfn; ··· 277 324 MEMBLOCK_LOW_LIMIT, memory_start + kernel_tlb, 278 325 NUMA_NO_NODE); 279 326 } 280 - 281 - #endif /* CONFIG_MMU */ 282 327 283 328 void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask) 284 329 {
-2
arch/microblaze/pci/pci-common.c
··· 325 325 * memory, effectively behaving just like /dev/zero 326 326 */ 327 327 if ((offset + size) > hose->isa_mem_size) { 328 - #ifdef CONFIG_MMU 329 328 pr_debug("Process %s (pid:%d) mapped non-existing PCI", 330 329 current->comm, current->pid); 331 330 pr_debug("legacy memory for 0%04x:%02x\n", 332 331 pci_domain_nr(bus), bus->number); 333 - #endif 334 332 if (vma->vm_flags & VM_SHARED) 335 333 return shmem_zero_setup(vma); 336 334 return 0;