Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm

Pull more ARM updates from Russell King.

This got a fair number of conflicts with the <asm/system.h> split, but
also with some other sparse-irq and header file include cleanups. They
all looked pretty trivial, though.

* 'for-linus' of git://git.linaro.org/people/rmk/linux-arm: (59 commits)
ARM: fix Kconfig warning for HAVE_BPF_JIT
ARM: 7361/1: provide XIP_VIRT_ADDR for no-MMU builds
ARM: 7349/1: integrator: convert to sparse irqs
ARM: 7259/3: net: JIT compiler for packet filters
ARM: 7334/1: add jump label support
ARM: 7333/2: jump label: detect %c support for ARM
ARM: 7338/1: add support for early console output via semihosting
ARM: use set_current_blocked() and block_sigmask()
ARM: exec: remove redundant set_fs(USER_DS)
ARM: 7332/1: extract out code patch function from kprobes
ARM: 7331/1: extract out insn generation code from ftrace
ARM: 7330/1: ftrace: use canonical Thumb-2 wide instruction format
ARM: 7351/1: ftrace: remove useless memory checks
ARM: 7316/1: kexec: EOI active and mask all interrupts in kexec crash path
ARM: Versatile Express: add NO_IOPORT
ARM: get rid of asm/irq.h in asm/prom.h
ARM: 7319/1: Print debug info for SIGBUS in user faults
ARM: 7318/1: gic: refactor irq_start assignment
ARM: 7317/1: irq: avoid NULL check in for_each_irq_desc loop
ARM: 7315/1: perf: add support for the Cortex-A7 PMU
...

+2236 -534
+6 -17
arch/arm/Kconfig
··· 9 9 select SYS_SUPPORTS_APM_EMULATION 10 10 select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI) 11 11 select HAVE_OPROFILE if (HAVE_PERF_EVENTS) 12 + select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL 12 13 select HAVE_ARCH_KGDB 13 14 select HAVE_KPROBES if !XIP_KERNEL 14 15 select HAVE_KRETPROBES if (HAVE_KPROBES) ··· 22 21 select HAVE_KERNEL_GZIP 23 22 select HAVE_KERNEL_LZO 24 23 select HAVE_KERNEL_LZMA 24 + select HAVE_KERNEL_XZ 25 25 select HAVE_IRQ_WORK 26 26 select HAVE_PERF_EVENTS 27 27 select PERF_USE_VMALLOC ··· 30 28 select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) 31 29 select HAVE_C_RECORDMCOUNT 32 30 select HAVE_GENERIC_HARDIRQS 33 - select HAVE_SPARSE_IRQ 34 31 select GENERIC_IRQ_SHOW 35 32 select CPU_PM if (SUSPEND || CPU_IDLE) 36 33 select GENERIC_PCI_IOMAP 34 + select HAVE_BPF_JIT if NET 37 35 help 38 36 The ARM series is a line of low-power-consumption RISC chip designs 39 37 licensed by ARM Ltd and targeted at embedded applications and ··· 52 50 bool 53 51 54 52 config SYS_SUPPORTS_APM_EMULATION 55 - bool 56 - 57 - config HAVE_SCHED_CLOCK 58 53 bool 59 54 60 55 config GENERIC_GPIO ··· 268 269 select PLAT_VERSATILE 269 270 select PLAT_VERSATILE_FPGA_IRQ 270 271 select NEED_MACH_MEMORY_H 272 + select SPARSE_IRQ 271 273 help 272 274 Support for ARM's Integrator platform. 273 275 ··· 315 315 select HAVE_CLK 316 316 select HAVE_PATA_PLATFORM 317 317 select ICST 318 + select NO_IOPORT 318 319 select PLAT_VERSATILE 319 320 select PLAT_VERSATILE_CLCD 320 321 help ··· 355 354 select GENERIC_CLOCKEVENTS 356 355 select HAVE_ARM_SCU 357 356 select HAVE_SMP 357 + select SPARSE_IRQ 358 358 select USE_OF 359 359 help 360 360 Support for the Calxeda Highbank SoC based boards. ··· 444 442 select CLKDEV_LOOKUP 445 443 select CLKSRC_MMIO 446 444 select GENERIC_IRQ_CHIP 447 - select HAVE_SCHED_CLOCK 448 445 select MULTI_IRQ_HANDLER 449 446 help 450 447 Support for Freescale MXC/iMX-based family of processors ··· 538 537 select CPU_XSCALE 539 538 select GENERIC_GPIO 540 539 select GENERIC_CLOCKEVENTS 541 - select HAVE_SCHED_CLOCK 542 540 select MIGHT_HAVE_PCI 543 541 select DMABOUNCE if PCI 544 542 help ··· 608 608 select CLKDEV_LOOKUP 609 609 select GENERIC_CLOCKEVENTS 610 610 select GPIO_PXA 611 - select HAVE_SCHED_CLOCK 612 611 select TICK_ONESHOT 613 612 select PLAT_PXA 614 613 select SPARSE_IRQ ··· 648 649 select GENERIC_CLOCKEVENTS 649 650 select GENERIC_GPIO 650 651 select HAVE_CLK 651 - select HAVE_SCHED_CLOCK 652 652 select HAVE_SMP 653 653 select MIGHT_HAVE_CACHE_L2X0 654 654 select ARCH_HAS_CPUFREQ ··· 664 666 select DW_APB_TIMER 665 667 select GENERIC_CLOCKEVENTS 666 668 select GENERIC_GPIO 667 - select HAVE_SCHED_CLOCK 668 669 select HAVE_TCM 669 670 select NO_IOPORT 670 671 select SPARSE_IRQ ··· 691 694 select ARCH_REQUIRE_GPIOLIB 692 695 select GENERIC_CLOCKEVENTS 693 696 select GPIO_PXA 694 - select HAVE_SCHED_CLOCK 695 697 select TICK_ONESHOT 696 698 select PLAT_PXA 697 699 select SPARSE_IRQ ··· 757 761 select CPU_FREQ 758 762 select GENERIC_CLOCKEVENTS 759 763 select CLKDEV_LOOKUP 760 - select HAVE_SCHED_CLOCK 761 764 select TICK_ONESHOT 762 765 select ARCH_REQUIRE_GPIOLIB 763 766 select HAVE_IDE ··· 813 818 select CLKSRC_MMIO 814 819 select HAVE_S3C2410_WATCHDOG if WATCHDOG 815 820 select GENERIC_CLOCKEVENTS 816 - select HAVE_SCHED_CLOCK 817 821 select HAVE_S3C2410_I2C if I2C 818 822 select HAVE_S3C_RTC if RTC_CLASS 819 823 help ··· 843 849 select CLKSRC_MMIO 844 850 select ARCH_HAS_CPUFREQ 845 851 select GENERIC_CLOCKEVENTS 846 - select HAVE_SCHED_CLOCK 847 852 select HAVE_S3C2410_I2C if I2C 848 853 select HAVE_S3C_RTC if RTC_CLASS 849 854 select HAVE_S3C2410_WATCHDOG if WATCHDOG ··· 885 892 depends on MMU 886 893 select CLKSRC_MMIO 887 894 select CPU_ARM926T 888 - select HAVE_SCHED_CLOCK 889 895 select HAVE_TCM 890 896 select ARM_AMBA 891 897 select ARM_PATCH_PHYS_VIRT ··· 943 951 select ARCH_HAS_CPUFREQ 944 952 select CLKSRC_MMIO 945 953 select GENERIC_CLOCKEVENTS 946 - select HAVE_SCHED_CLOCK 947 954 select ARCH_HAS_HOLES_MEMORYMODEL 948 955 help 949 956 Support for TI's OMAP platform (OMAP1/2/3/4). ··· 1106 1115 config PLAT_IOP 1107 1116 bool 1108 1117 select GENERIC_CLOCKEVENTS 1109 - select HAVE_SCHED_CLOCK 1110 1118 1111 1119 config PLAT_ORION 1112 1120 bool 1113 1121 select CLKSRC_MMIO 1114 1122 select GENERIC_IRQ_CHIP 1115 - select HAVE_SCHED_CLOCK 1116 1123 1117 1124 config PLAT_PXA 1118 1125 bool
+16
arch/arm/Kconfig.debug
··· 292 292 Note that the system will appear to hang during boot if there 293 293 is nothing connected to read from the DCC. 294 294 295 + config DEBUG_SEMIHOSTING 296 + bool "Kernel low-level debug output via semihosting I" 297 + help 298 + Semihosting enables code running on an ARM target to use 299 + the I/O facilities on a host debugger/emulator through a 300 + simple SVC calls. The host debugger or emulator must have 301 + semihosting enabled for the special svc call to be trapped 302 + otherwise the kernel will crash. 303 + 304 + This is known to work with OpenOCD, as wellas 305 + ARM's Fast Models, or any other controlling environment 306 + that implements semihosting. 307 + 308 + For more details about semihosting, please see 309 + chapter 8 of DUI0203I_rvct_developer_guide.pdf from ARM Ltd. 310 + 295 311 endchoice 296 312 297 313 config EARLY_PRINTK
+1
arch/arm/Makefile
··· 253 253 254 254 # If we have a machine-specific directory, then include it in the build. 255 255 core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/ 256 + core-y += arch/arm/net/ 256 257 core-y += $(machdirs) $(platdirs) 257 258 258 259 drivers-$(CONFIG_OPROFILE) += arch/arm/oprofile/
+2
arch/arm/boot/compressed/.gitignore
··· 1 + ashldi3.S 1 2 font.c 2 3 lib1funcs.S 3 4 piggy.gzip 4 5 piggy.lzo 5 6 piggy.lzma 7 + piggy.xzkern 6 8 vmlinux 7 9 vmlinux.lds 8 10
+12 -3
arch/arm/boot/compressed/Makefile
··· 92 92 suffix_$(CONFIG_KERNEL_GZIP) = gzip 93 93 suffix_$(CONFIG_KERNEL_LZO) = lzo 94 94 suffix_$(CONFIG_KERNEL_LZMA) = lzma 95 + suffix_$(CONFIG_KERNEL_XZ) = xzkern 95 96 96 97 # Borrowed libfdt files for the ATAG compatibility mode 97 98 ··· 113 112 114 113 targets := vmlinux vmlinux.lds \ 115 114 piggy.$(suffix_y) piggy.$(suffix_y).o \ 116 - lib1funcs.o lib1funcs.S font.o font.c head.o misc.o $(OBJS) 115 + lib1funcs.o lib1funcs.S ashldi3.o ashldi3.S \ 116 + font.o font.c head.o misc.o $(OBJS) 117 117 118 118 # Make sure files are removed during clean 119 - extra-y += piggy.gzip piggy.lzo piggy.lzma lib1funcs.S $(libfdt) $(libfdt_hdrs) 119 + extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \ 120 + lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) 120 121 121 122 ifeq ($(CONFIG_FUNCTION_TRACER),y) 122 123 ORIG_CFLAGS := $(KBUILD_CFLAGS) ··· 154 151 $(obj)/lib1funcs.S: $(srctree)/arch/$(SRCARCH)/lib/lib1funcs.S 155 152 $(call cmd,shipped) 156 153 154 + # For __aeabi_llsl 155 + ashldi3 = $(obj)/ashldi3.o 156 + 157 + $(obj)/ashldi3.S: $(srctree)/arch/$(SRCARCH)/lib/ashldi3.S 158 + $(call cmd,shipped) 159 + 157 160 # We need to prevent any GOTOFF relocs being used with references 158 161 # to symbols in the .bss section since we cannot relocate them 159 162 # independently from the rest at run time. This can be achieved by ··· 181 172 fi 182 173 183 174 $(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.$(suffix_y).o \ 184 - $(addprefix $(obj)/, $(OBJS)) $(lib1funcs) FORCE 175 + $(addprefix $(obj)/, $(OBJS)) $(lib1funcs) $(ashldi3) FORCE 185 176 @$(check_for_multiple_zreladdr) 186 177 $(call if_changed,ld) 187 178 @$(check_for_bad_syms)
+6
arch/arm/boot/compressed/decompress.c
··· 44 44 #include "../../../../lib/decompress_unlzma.c" 45 45 #endif 46 46 47 + #ifdef CONFIG_KERNEL_XZ 48 + #define memmove memmove 49 + #define memcpy memcpy 50 + #include "../../../../lib/decompress_unxz.c" 51 + #endif 52 + 47 53 int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)) 48 54 { 49 55 return decompress(input, len, NULL, NULL, output, NULL, error);
+6
arch/arm/boot/compressed/piggy.xzkern.S
··· 1 + .section .piggydata,#alloc 2 + .globl input_data 3 + input_data: 4 + .incbin "arch/arm/boot/compressed/piggy.xzkern" 5 + .globl input_data_end 6 + input_data_end:
+6 -7
arch/arm/common/gic.c
··· 686 686 * For primary GICs, skip over SGIs. 687 687 * For secondary GICs, skip over PPIs, too. 688 688 */ 689 - hwirq_base = 32; 690 - if (gic_nr == 0) { 691 - if ((irq_start & 31) > 0) { 692 - hwirq_base = 16; 693 - if (irq_start != -1) 694 - irq_start = (irq_start & ~31) + 16; 695 - } 689 + if (gic_nr == 0 && (irq_start & 31) > 0) { 690 + hwirq_base = 16; 691 + if (irq_start != -1) 692 + irq_start = (irq_start & ~31) + 16; 693 + } else { 694 + hwirq_base = 32; 696 695 } 697 696 698 697 /*
+8
arch/arm/configs/integrator_defconfig
··· 57 57 CONFIG_NET_ETHERNET=y 58 58 CONFIG_NET_PCI=y 59 59 CONFIG_E100=y 60 + CONFIG_SMC91X=y 60 61 # CONFIG_KEYBOARD_ATKBD is not set 61 62 # CONFIG_SERIO_SERPORT is not set 62 63 CONFIG_SERIAL_AMBA_PL010=y 63 64 CONFIG_SERIAL_AMBA_PL010_CONSOLE=y 64 65 CONFIG_FB=y 65 66 CONFIG_FB_MODE_HELPERS=y 67 + CONFIG_FB_ARMCLCD=y 66 68 CONFIG_FB_MATROX=y 67 69 CONFIG_FB_MATROX_MILLENIUM=y 68 70 CONFIG_FB_MATROX_MYSTIQUE=y 71 + # CONFIG_VGA_CONSOLE is not set 72 + CONFIG_MMC=y 73 + CONFIG_MMC_ARMMMCI=y 69 74 CONFIG_RTC_CLASS=y 70 75 CONFIG_RTC_DRV_PL030=y 71 76 CONFIG_EXT2_FS=y 77 + CONFIG_VFAT_FS=y 72 78 CONFIG_TMPFS=y 73 79 CONFIG_JFFS2_FS=y 74 80 CONFIG_CRAMFS=y ··· 84 78 CONFIG_NFSD=y 85 79 CONFIG_NFSD_V3=y 86 80 CONFIG_PARTITION_ADVANCED=y 81 + CONFIG_NLS_CODEPAGE_437=y 82 + CONFIG_NLS_ISO8859_1=y 87 83 CONFIG_MAGIC_SYSRQ=y 88 84 CONFIG_DEBUG_KERNEL=y
-4
arch/arm/include/asm/elf.h
··· 130 130 extern unsigned long arch_randomize_brk(struct mm_struct *mm); 131 131 #define arch_randomize_brk arch_randomize_brk 132 132 133 - extern int vectors_user_mapping(void); 134 - #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping() 135 - #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 136 - 137 133 #endif
+3 -3
arch/arm/include/asm/hardware/cache-l2x0.h
··· 103 103 #define L2X0_ADDR_FILTER_EN 1 104 104 105 105 #ifndef __ASSEMBLY__ 106 - extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask); 106 + extern void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask); 107 107 #if defined(CONFIG_CACHE_L2X0) && defined(CONFIG_OF) 108 - extern int l2x0_of_init(__u32 aux_val, __u32 aux_mask); 108 + extern int l2x0_of_init(u32 aux_val, u32 aux_mask); 109 109 #else 110 - static inline int l2x0_of_init(__u32 aux_val, __u32 aux_mask) 110 + static inline int l2x0_of_init(u32 aux_val, u32 aux_mask) 111 111 { 112 112 return -ENODEV; 113 113 }
+3
arch/arm/include/asm/hardware/it8152.h
··· 9 9 10 10 #ifndef __ASM_HARDWARE_IT8152_H 11 11 #define __ASM_HARDWARE_IT8152_H 12 + 13 + #include <mach/irqs.h> 14 + 12 15 extern void __iomem *it8152_base_address; 13 16 14 17 #define IT8152_IO_BASE (it8152_base_address + 0x03e00000)
+6 -2
arch/arm/include/asm/irq.h
··· 1 1 #ifndef __ASM_ARM_IRQ_H 2 2 #define __ASM_ARM_IRQ_H 3 3 4 + #define NR_IRQS_LEGACY 16 5 + 6 + #ifndef CONFIG_SPARSE_IRQ 4 7 #include <mach/irqs.h> 8 + #else 9 + #define NR_IRQS NR_IRQS_LEGACY 10 + #endif 5 11 6 12 #ifndef irq_canonicalize 7 13 #define irq_canonicalize(i) (i) 8 14 #endif 9 - 10 - #define NR_IRQS_LEGACY 16 11 15 12 16 /* 13 17 * Use this value to indicate lack of interrupt
+41
arch/arm/include/asm/jump_label.h
··· 1 + #ifndef _ASM_ARM_JUMP_LABEL_H 2 + #define _ASM_ARM_JUMP_LABEL_H 3 + 4 + #ifdef __KERNEL__ 5 + 6 + #include <linux/types.h> 7 + #include <asm/system.h> 8 + 9 + #define JUMP_LABEL_NOP_SIZE 4 10 + 11 + #ifdef CONFIG_THUMB2_KERNEL 12 + #define JUMP_LABEL_NOP "nop.w" 13 + #else 14 + #define JUMP_LABEL_NOP "nop" 15 + #endif 16 + 17 + static __always_inline bool arch_static_branch(struct jump_label_key *key) 18 + { 19 + asm goto("1:\n\t" 20 + JUMP_LABEL_NOP "\n\t" 21 + ".pushsection __jump_table, \"aw\"\n\t" 22 + ".word 1b, %l[l_yes], %c0\n\t" 23 + ".popsection\n\t" 24 + : : "i" (key) : : l_yes); 25 + 26 + return false; 27 + l_yes: 28 + return true; 29 + } 30 + 31 + #endif /* __KERNEL__ */ 32 + 33 + typedef u32 jump_label_t; 34 + 35 + struct jump_entry { 36 + jump_label_t code; 37 + jump_label_t target; 38 + jump_label_t key; 39 + }; 40 + 41 + #endif
+3 -1
arch/arm/include/asm/mc146818rtc.h
··· 5 5 #define _ASM_MC146818RTC_H 6 6 7 7 #include <linux/io.h> 8 - #include <mach/irqs.h> 8 + #include <linux/kernel.h> 9 + 10 + #define RTC_IRQ BUILD_BUG_ON(1) 9 11 10 12 #ifndef RTC_PORT 11 13 #define RTC_PORT(x) (0x70 + (x))
+2
arch/arm/include/asm/memory.h
··· 116 116 #define MODULES_END (END_MEM) 117 117 #define MODULES_VADDR (PHYS_OFFSET) 118 118 119 + #define XIP_VIRT_ADDR(physaddr) (physaddr) 120 + 119 121 #endif /* !CONFIG_MMU */ 120 122 121 123 /*
+1 -28
arch/arm/include/asm/mmu_context.h
··· 18 18 #include <asm/cacheflush.h> 19 19 #include <asm/cachetype.h> 20 20 #include <asm/proc-fns.h> 21 + #include <asm-generic/mm_hooks.h> 21 22 22 23 void __check_kvm_seq(struct mm_struct *mm); 23 24 ··· 133 132 134 133 #define deactivate_mm(tsk,mm) do { } while (0) 135 134 #define activate_mm(prev,next) switch_mm(prev, next, NULL) 136 - 137 - /* 138 - * We are inserting a "fake" vma for the user-accessible vector page so 139 - * gdb and friends can get to it through ptrace and /proc/<pid>/mem. 140 - * But we also want to remove it before the generic code gets to see it 141 - * during process exit or the unmapping of it would cause total havoc. 142 - * (the macro is used as remove_vma() is static to mm/mmap.c) 143 - */ 144 - #define arch_exit_mmap(mm) \ 145 - do { \ 146 - struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \ 147 - if (high_vma) { \ 148 - BUG_ON(high_vma->vm_next); /* it should be last */ \ 149 - if (high_vma->vm_prev) \ 150 - high_vma->vm_prev->vm_next = NULL; \ 151 - else \ 152 - mm->mmap = NULL; \ 153 - rb_erase(&high_vma->vm_rb, &mm->mm_rb); \ 154 - mm->mmap_cache = NULL; \ 155 - mm->map_count--; \ 156 - remove_vma(high_vma); \ 157 - } \ 158 - } while (0) 159 - 160 - static inline void arch_dup_mmap(struct mm_struct *oldmm, 161 - struct mm_struct *mm) 162 - { 163 - } 164 135 165 136 #endif
+59
arch/arm/include/asm/opcodes.h
··· 17 17 #define ARM_OPCODE_CONDTEST_PASS 1 18 18 #define ARM_OPCODE_CONDTEST_UNCOND 2 19 19 20 + 21 + /* 22 + * Opcode byteswap helpers 23 + * 24 + * These macros help with converting instructions between a canonical integer 25 + * format and in-memory representation, in an endianness-agnostic manner. 26 + * 27 + * __mem_to_opcode_*() convert from in-memory representation to canonical form. 28 + * __opcode_to_mem_*() convert from canonical form to in-memory representation. 29 + * 30 + * 31 + * Canonical instruction representation: 32 + * 33 + * ARM: 0xKKLLMMNN 34 + * Thumb 16-bit: 0x0000KKLL, where KK < 0xE8 35 + * Thumb 32-bit: 0xKKLLMMNN, where KK >= 0xE8 36 + * 37 + * There is no way to distinguish an ARM instruction in canonical representation 38 + * from a Thumb instruction (just as these cannot be distinguished in memory). 39 + * Where this distinction is important, it needs to be tracked separately. 40 + * 41 + * Note that values in the range 0x0000E800..0xE7FFFFFF intentionally do not 42 + * represent any valid Thumb-2 instruction. For this range, 43 + * __opcode_is_thumb32() and __opcode_is_thumb16() will both be false. 44 + */ 45 + 46 + #ifndef __ASSEMBLY__ 47 + 48 + #include <linux/types.h> 49 + #include <linux/swab.h> 50 + 51 + #ifdef CONFIG_CPU_ENDIAN_BE8 52 + #define __opcode_to_mem_arm(x) swab32(x) 53 + #define __opcode_to_mem_thumb16(x) swab16(x) 54 + #define __opcode_to_mem_thumb32(x) swahb32(x) 55 + #else 56 + #define __opcode_to_mem_arm(x) ((u32)(x)) 57 + #define __opcode_to_mem_thumb16(x) ((u16)(x)) 58 + #define __opcode_to_mem_thumb32(x) swahw32(x) 59 + #endif 60 + 61 + #define __mem_to_opcode_arm(x) __opcode_to_mem_arm(x) 62 + #define __mem_to_opcode_thumb16(x) __opcode_to_mem_thumb16(x) 63 + #define __mem_to_opcode_thumb32(x) __opcode_to_mem_thumb32(x) 64 + 65 + /* Operations specific to Thumb opcodes */ 66 + 67 + /* Instruction size checks: */ 68 + #define __opcode_is_thumb32(x) ((u32)(x) >= 0xE8000000UL) 69 + #define __opcode_is_thumb16(x) ((u32)(x) < 0xE800UL) 70 + 71 + /* Operations to construct or split 32-bit Thumb instructions: */ 72 + #define __opcode_thumb32_first(x) ((u16)((x) >> 16)) 73 + #define __opcode_thumb32_second(x) ((u16)(x)) 74 + #define __opcode_thumb32_compose(first, second) \ 75 + (((u32)(u16)(first) << 16) | (u32)(u16)(second)) 76 + 77 + #endif /* __ASSEMBLY__ */ 78 + 20 79 #endif /* __ASM_ARM_OPCODES_H */
+2
arch/arm/include/asm/page.h
··· 151 151 #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) 152 152 extern void copy_page(void *to, const void *from); 153 153 154 + #define __HAVE_ARCH_GATE_AREA 1 155 + 154 156 #ifdef CONFIG_ARM_LPAE 155 157 #include <asm/pgtable-3level-types.h> 156 158 #else
+1
arch/arm/include/asm/perf_event.h
··· 22 22 ARM_PERF_PMU_ID_CA9, 23 23 ARM_PERF_PMU_ID_CA5, 24 24 ARM_PERF_PMU_ID_CA15, 25 + ARM_PERF_PMU_ID_CA7, 25 26 ARM_NUM_PMU_IDS, 26 27 }; 27 28
-1
arch/arm/include/asm/processor.h
··· 55 55 #define start_thread(regs,pc,sp) \ 56 56 ({ \ 57 57 unsigned long *stack = (unsigned long *)sp; \ 58 - set_fs(USER_DS); \ 59 58 memset(regs->uregs, 0, sizeof(regs->uregs)); \ 60 59 if (current->personality & ADDR_LIMIT_32BIT) \ 61 60 regs->ARM_cpsr = USR_MODE; \
-2
arch/arm/include/asm/prom.h
··· 13 13 14 14 #ifdef CONFIG_OF 15 15 16 - #include <asm/irq.h> 17 - 18 16 extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys); 19 17 extern void arm_dt_memblock_reserve(void); 20 18
+57 -77
arch/arm/include/asm/tlbflush.h
··· 318 318 319 319 #define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f))) 320 320 321 + #define __tlb_op(f, insnarg, arg) \ 322 + do { \ 323 + if (always_tlb_flags & (f)) \ 324 + asm("mcr " insnarg \ 325 + : : "r" (arg) : "cc"); \ 326 + else if (possible_tlb_flags & (f)) \ 327 + asm("tst %1, %2\n\t" \ 328 + "mcrne " insnarg \ 329 + : : "r" (arg), "r" (__tlb_flag), "Ir" (f) \ 330 + : "cc"); \ 331 + } while (0) 332 + 333 + #define tlb_op(f, regs, arg) __tlb_op(f, "p15, 0, %0, " regs, arg) 334 + #define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg) 335 + 321 336 static inline void local_flush_tlb_all(void) 322 337 { 323 338 const int zero = 0; ··· 341 326 if (tlb_flag(TLB_WB)) 342 327 dsb(); 343 328 344 - if (tlb_flag(TLB_V3_FULL)) 345 - asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); 346 - if (tlb_flag(TLB_V4_U_FULL | TLB_V6_U_FULL)) 347 - asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc"); 348 - if (tlb_flag(TLB_V4_D_FULL | TLB_V6_D_FULL)) 349 - asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc"); 350 - if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL)) 351 - asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); 352 - if (tlb_flag(TLB_V7_UIS_FULL)) 353 - asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc"); 329 + tlb_op(TLB_V3_FULL, "c6, c0, 0", zero); 330 + tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero); 331 + tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero); 332 + tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); 333 + tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero); 354 334 355 335 if (tlb_flag(TLB_BARRIER)) { 356 336 dsb(); ··· 362 352 if (tlb_flag(TLB_WB)) 363 353 dsb(); 364 354 365 - if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { 366 - if (tlb_flag(TLB_V3_FULL)) 367 - asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); 368 - if (tlb_flag(TLB_V4_U_FULL)) 369 - asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc"); 370 - if (tlb_flag(TLB_V4_D_FULL)) 371 - asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc"); 372 - if (tlb_flag(TLB_V4_I_FULL)) 373 - asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); 355 + if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { 356 + if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { 357 + tlb_op(TLB_V3_FULL, "c6, c0, 0", zero); 358 + tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero); 359 + tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero); 360 + tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero); 361 + } 362 + put_cpu(); 374 363 } 375 - put_cpu(); 376 364 377 - if (tlb_flag(TLB_V6_U_ASID)) 378 - asm("mcr p15, 0, %0, c8, c7, 2" : : "r" (asid) : "cc"); 379 - if (tlb_flag(TLB_V6_D_ASID)) 380 - asm("mcr p15, 0, %0, c8, c6, 2" : : "r" (asid) : "cc"); 381 - if (tlb_flag(TLB_V6_I_ASID)) 382 - asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc"); 383 - if (tlb_flag(TLB_V7_UIS_ASID)) 365 + tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid); 366 + tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid); 367 + tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid); 384 368 #ifdef CONFIG_ARM_ERRATA_720789 385 - asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc"); 369 + tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", zero); 386 370 #else 387 - asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc"); 371 + tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", asid); 388 372 #endif 389 373 390 374 if (tlb_flag(TLB_BARRIER)) ··· 396 392 if (tlb_flag(TLB_WB)) 397 393 dsb(); 398 394 399 - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 400 - if (tlb_flag(TLB_V3_PAGE)) 401 - asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc"); 402 - if (tlb_flag(TLB_V4_U_PAGE)) 403 - asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc"); 404 - if (tlb_flag(TLB_V4_D_PAGE)) 405 - asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc"); 406 - if (tlb_flag(TLB_V4_I_PAGE)) 407 - asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc"); 395 + if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && 396 + cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 397 + tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr); 398 + tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr); 399 + tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr); 400 + tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr); 408 401 if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) 409 402 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); 410 403 } 411 404 412 - if (tlb_flag(TLB_V6_U_PAGE)) 413 - asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc"); 414 - if (tlb_flag(TLB_V6_D_PAGE)) 415 - asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc"); 416 - if (tlb_flag(TLB_V6_I_PAGE)) 417 - asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc"); 418 - if (tlb_flag(TLB_V7_UIS_PAGE)) 405 + tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr); 406 + tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr); 407 + tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr); 419 408 #ifdef CONFIG_ARM_ERRATA_720789 420 - asm("mcr p15, 0, %0, c8, c3, 3" : : "r" (uaddr & PAGE_MASK) : "cc"); 409 + tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK); 421 410 #else 422 - asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc"); 411 + tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", uaddr); 423 412 #endif 424 413 425 414 if (tlb_flag(TLB_BARRIER)) ··· 429 432 if (tlb_flag(TLB_WB)) 430 433 dsb(); 431 434 432 - if (tlb_flag(TLB_V3_PAGE)) 433 - asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (kaddr) : "cc"); 434 - if (tlb_flag(TLB_V4_U_PAGE)) 435 - asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc"); 436 - if (tlb_flag(TLB_V4_D_PAGE)) 437 - asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc"); 438 - if (tlb_flag(TLB_V4_I_PAGE)) 439 - asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc"); 435 + tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr); 436 + tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr); 437 + tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr); 438 + tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); 440 439 if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) 441 440 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); 442 441 443 - if (tlb_flag(TLB_V6_U_PAGE)) 444 - asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc"); 445 - if (tlb_flag(TLB_V6_D_PAGE)) 446 - asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc"); 447 - if (tlb_flag(TLB_V6_I_PAGE)) 448 - asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc"); 449 - if (tlb_flag(TLB_V7_UIS_PAGE)) 450 - asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (kaddr) : "cc"); 442 + tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr); 443 + tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr); 444 + tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr); 445 + tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr); 451 446 452 447 if (tlb_flag(TLB_BARRIER)) { 453 448 dsb(); ··· 464 475 { 465 476 const unsigned int __tlb_flag = __cpu_tlb_flags; 466 477 467 - if (tlb_flag(TLB_DCLEAN)) 468 - asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd" 469 - : : "r" (pmd) : "cc"); 470 - 471 - if (tlb_flag(TLB_L2CLEAN_FR)) 472 - asm("mcr p15, 1, %0, c15, c9, 1 @ L2 flush_pmd" 473 - : : "r" (pmd) : "cc"); 478 + tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd); 479 + tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd); 474 480 475 481 if (tlb_flag(TLB_WB)) 476 482 dsb(); ··· 475 491 { 476 492 const unsigned int __tlb_flag = __cpu_tlb_flags; 477 493 478 - if (tlb_flag(TLB_DCLEAN)) 479 - asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd" 480 - : : "r" (pmd) : "cc"); 481 - 482 - if (tlb_flag(TLB_L2CLEAN_FR)) 483 - asm("mcr p15, 1, %0, c15, c9, 1 @ L2 flush_pmd" 484 - : : "r" (pmd) : "cc"); 494 + tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd); 495 + tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd); 485 496 } 486 497 498 + #undef tlb_op 487 499 #undef tlb_flag 488 500 #undef always_tlb_flags 489 501 #undef possible_tlb_flags
+1 -1
arch/arm/include/asm/traps.h
··· 46 46 return in ? : __in_irqentry_text(ptr); 47 47 } 48 48 49 - extern void __init early_trap_init(void); 49 + extern void __init early_trap_init(void *); 50 50 extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame); 51 51 extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs); 52 52
+8 -6
arch/arm/kernel/Makefile
··· 7 7 8 8 ifdef CONFIG_FUNCTION_TRACER 9 9 CFLAGS_REMOVE_ftrace.o = -pg 10 + CFLAGS_REMOVE_insn.o = -pg 11 + CFLAGS_REMOVE_patch.o = -pg 10 12 endif 11 13 12 14 CFLAGS_REMOVE_return_address.o = -pg ··· 16 14 # Object file lists. 17 15 18 16 obj-y := elf.o entry-armv.o entry-common.o irq.o opcodes.o \ 19 - process.o ptrace.o return_address.o setup.o signal.o \ 20 - sys_arm.o stacktrace.o time.o traps.o 17 + process.o ptrace.o return_address.o sched_clock.o \ 18 + setup.o signal.o stacktrace.o sys_arm.o time.o traps.o 21 19 22 20 obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += compat.o 23 21 ··· 31 29 obj-$(CONFIG_ISA_DMA) += dma-isa.o 32 30 obj-$(CONFIG_PCI) += bios32.o isa.o 33 31 obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o 34 - obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o 35 32 obj-$(CONFIG_SMP) += smp.o smp_tlb.o 36 33 obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o 37 34 obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o 38 - obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 39 - obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 35 + obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o 36 + obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o 37 + obj-$(CONFIG_JUMP_LABEL) += jump_label.o insn.o patch.o 40 38 obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o 41 - obj-$(CONFIG_KPROBES) += kprobes.o kprobes-common.o 39 + obj-$(CONFIG_KPROBES) += kprobes.o kprobes-common.o patch.o 42 40 ifdef CONFIG_THUMB2_KERNEL 43 41 obj-$(CONFIG_KPROBES) += kprobes-thumb.o 44 42 else
+24 -1
arch/arm/kernel/debug.S
··· 100 100 101 101 #endif /* CONFIG_CPU_V6 */ 102 102 103 - #else 103 + #elif !defined(CONFIG_DEBUG_SEMIHOSTING) 104 104 #include <mach/debug-macro.S> 105 105 #endif /* CONFIG_DEBUG_ICEDCC */ 106 106 ··· 155 155 156 156 .ltorg 157 157 158 + #ifndef CONFIG_DEBUG_SEMIHOSTING 159 + 158 160 ENTRY(printascii) 159 161 addruart_current r3, r1, r2 160 162 b 2f ··· 179 177 mov r0, #0 180 178 b 1b 181 179 ENDPROC(printch) 180 + 181 + #else 182 + 183 + ENTRY(printascii) 184 + mov r1, r0 185 + mov r0, #0x04 @ SYS_WRITE0 186 + ARM( svc #0x123456 ) 187 + THUMB( svc #0xab ) 188 + mov pc, lr 189 + ENDPROC(printascii) 190 + 191 + ENTRY(printch) 192 + adr r1, hexbuf 193 + strb r0, [r1] 194 + mov r0, #0x03 @ SYS_WRITEC 195 + ARM( svc #0x123456 ) 196 + THUMB( svc #0xab ) 197 + mov pc, lr 198 + ENDPROC(printch) 199 + 200 + #endif
+28 -72
arch/arm/kernel/ftrace.c
··· 16 16 #include <linux/uaccess.h> 17 17 18 18 #include <asm/cacheflush.h> 19 + #include <asm/opcodes.h> 19 20 #include <asm/ftrace.h> 20 21 22 + #include "insn.h" 23 + 21 24 #ifdef CONFIG_THUMB2_KERNEL 22 - #define NOP 0xeb04f85d /* pop.w {lr} */ 25 + #define NOP 0xf85deb04 /* pop.w {lr} */ 23 26 #else 24 27 #define NOP 0xe8bd4000 /* pop {lr} */ 25 28 #endif ··· 63 60 } 64 61 #endif 65 62 66 - #ifdef CONFIG_THUMB2_KERNEL 67 - static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr, 68 - bool link) 69 - { 70 - unsigned long s, j1, j2, i1, i2, imm10, imm11; 71 - unsigned long first, second; 72 - long offset; 73 - 74 - offset = (long)addr - (long)(pc + 4); 75 - if (offset < -16777216 || offset > 16777214) { 76 - WARN_ON_ONCE(1); 77 - return 0; 78 - } 79 - 80 - s = (offset >> 24) & 0x1; 81 - i1 = (offset >> 23) & 0x1; 82 - i2 = (offset >> 22) & 0x1; 83 - imm10 = (offset >> 12) & 0x3ff; 84 - imm11 = (offset >> 1) & 0x7ff; 85 - 86 - j1 = (!i1) ^ s; 87 - j2 = (!i2) ^ s; 88 - 89 - first = 0xf000 | (s << 10) | imm10; 90 - second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11; 91 - if (link) 92 - second |= 1 << 14; 93 - 94 - return (second << 16) | first; 95 - } 96 - #else 97 - static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr, 98 - bool link) 99 - { 100 - unsigned long opcode = 0xea000000; 101 - long offset; 102 - 103 - if (link) 104 - opcode |= 1 << 24; 105 - 106 - offset = (long)addr - (long)(pc + 8); 107 - if (unlikely(offset < -33554432 || offset > 33554428)) { 108 - /* Can't generate branches that far (from ARM ARM). Ftrace 109 - * doesn't generate branches outside of kernel text. 110 - */ 111 - WARN_ON_ONCE(1); 112 - return 0; 113 - } 114 - 115 - offset = (offset >> 2) & 0x00ffffff; 116 - 117 - return opcode | offset; 118 - } 119 - #endif 120 - 121 63 static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) 122 64 { 123 - return ftrace_gen_branch(pc, addr, true); 65 + return arm_gen_branch_link(pc, addr); 124 66 } 125 67 126 68 static int ftrace_modify_code(unsigned long pc, unsigned long old, 127 - unsigned long new) 69 + unsigned long new, bool validate) 128 70 { 129 71 unsigned long replaced; 130 72 131 - if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE)) 132 - return -EFAULT; 73 + if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) { 74 + old = __opcode_to_mem_thumb32(old); 75 + new = __opcode_to_mem_thumb32(new); 76 + } else { 77 + old = __opcode_to_mem_arm(old); 78 + new = __opcode_to_mem_arm(new); 79 + } 133 80 134 - if (replaced != old) 135 - return -EINVAL; 81 + if (validate) { 82 + if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE)) 83 + return -EFAULT; 84 + 85 + if (replaced != old) 86 + return -EINVAL; 87 + } 136 88 137 89 if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE)) 138 90 return -EPERM; ··· 99 141 100 142 int ftrace_update_ftrace_func(ftrace_func_t func) 101 143 { 102 - unsigned long pc, old; 144 + unsigned long pc; 103 145 unsigned long new; 104 146 int ret; 105 147 106 148 pc = (unsigned long)&ftrace_call; 107 - memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE); 108 149 new = ftrace_call_replace(pc, (unsigned long)func); 109 150 110 - ret = ftrace_modify_code(pc, old, new); 151 + ret = ftrace_modify_code(pc, 0, new, false); 111 152 112 153 #ifdef CONFIG_OLD_MCOUNT 113 154 if (!ret) { 114 155 pc = (unsigned long)&ftrace_call_old; 115 - memcpy(&old, &ftrace_call_old, MCOUNT_INSN_SIZE); 116 156 new = ftrace_call_replace(pc, (unsigned long)func); 117 157 118 - ret = ftrace_modify_code(pc, old, new); 158 + ret = ftrace_modify_code(pc, 0, new, false); 119 159 } 120 160 #endif 121 161 ··· 128 172 old = ftrace_nop_replace(rec); 129 173 new = ftrace_call_replace(ip, adjust_address(rec, addr)); 130 174 131 - return ftrace_modify_code(rec->ip, old, new); 175 + return ftrace_modify_code(rec->ip, old, new, true); 132 176 } 133 177 134 178 int ftrace_make_nop(struct module *mod, ··· 141 185 142 186 old = ftrace_call_replace(ip, adjust_address(rec, addr)); 143 187 new = ftrace_nop_replace(rec); 144 - ret = ftrace_modify_code(ip, old, new); 188 + ret = ftrace_modify_code(ip, old, new, true); 145 189 146 190 #ifdef CONFIG_OLD_MCOUNT 147 191 if (ret == -EINVAL && addr == MCOUNT_ADDR) { ··· 149 193 150 194 old = ftrace_call_replace(ip, adjust_address(rec, addr)); 151 195 new = ftrace_nop_replace(rec); 152 - ret = ftrace_modify_code(ip, old, new); 196 + ret = ftrace_modify_code(ip, old, new, true); 153 197 } 154 198 #endif 155 199 ··· 205 249 { 206 250 unsigned long caller_fn = (unsigned long) func; 207 251 unsigned long pc = (unsigned long) callsite; 208 - unsigned long branch = ftrace_gen_branch(pc, caller_fn, false); 252 + unsigned long branch = arm_gen_branch(pc, caller_fn); 209 253 unsigned long nop = 0xe1a00000; /* mov r0, r0 */ 210 254 unsigned long old = enable ? nop : branch; 211 255 unsigned long new = enable ? branch : nop; 212 256 213 - return ftrace_modify_code(pc, old, new); 257 + return ftrace_modify_code(pc, old, new, true); 214 258 } 215 259 216 260 static int ftrace_modify_graph_caller(bool enable)
+4 -4
arch/arm/kernel/head.S
··· 265 265 str r6, [r3] 266 266 267 267 #ifdef CONFIG_DEBUG_LL 268 - #ifndef CONFIG_DEBUG_ICEDCC 268 + #if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING) 269 269 /* 270 270 * Map in IO space for serial debugging. 271 271 * This allows debug messages to be output ··· 297 297 cmp r0, r6 298 298 blo 1b 299 299 300 - #else /* CONFIG_DEBUG_ICEDCC */ 301 - /* we don't need any serial debugging mappings for ICEDCC */ 300 + #else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */ 301 + /* we don't need any serial debugging mappings */ 302 302 ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags 303 - #endif /* !CONFIG_DEBUG_ICEDCC */ 303 + #endif 304 304 305 305 #if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS) 306 306 /*
+61
arch/arm/kernel/insn.c
··· 1 + #include <linux/kernel.h> 2 + #include <asm/opcodes.h> 3 + 4 + static unsigned long 5 + __arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link) 6 + { 7 + unsigned long s, j1, j2, i1, i2, imm10, imm11; 8 + unsigned long first, second; 9 + long offset; 10 + 11 + offset = (long)addr - (long)(pc + 4); 12 + if (offset < -16777216 || offset > 16777214) { 13 + WARN_ON_ONCE(1); 14 + return 0; 15 + } 16 + 17 + s = (offset >> 24) & 0x1; 18 + i1 = (offset >> 23) & 0x1; 19 + i2 = (offset >> 22) & 0x1; 20 + imm10 = (offset >> 12) & 0x3ff; 21 + imm11 = (offset >> 1) & 0x7ff; 22 + 23 + j1 = (!i1) ^ s; 24 + j2 = (!i2) ^ s; 25 + 26 + first = 0xf000 | (s << 10) | imm10; 27 + second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11; 28 + if (link) 29 + second |= 1 << 14; 30 + 31 + return __opcode_thumb32_compose(first, second); 32 + } 33 + 34 + static unsigned long 35 + __arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link) 36 + { 37 + unsigned long opcode = 0xea000000; 38 + long offset; 39 + 40 + if (link) 41 + opcode |= 1 << 24; 42 + 43 + offset = (long)addr - (long)(pc + 8); 44 + if (unlikely(offset < -33554432 || offset > 33554428)) { 45 + WARN_ON_ONCE(1); 46 + return 0; 47 + } 48 + 49 + offset = (offset >> 2) & 0x00ffffff; 50 + 51 + return opcode | offset; 52 + } 53 + 54 + unsigned long 55 + __arm_gen_branch(unsigned long pc, unsigned long addr, bool link) 56 + { 57 + if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) 58 + return __arm_gen_branch_thumb2(pc, addr, link); 59 + else 60 + return __arm_gen_branch_arm(pc, addr, link); 61 + }
+29
arch/arm/kernel/insn.h
··· 1 + #ifndef __ASM_ARM_INSN_H 2 + #define __ASM_ARM_INSN_H 3 + 4 + static inline unsigned long 5 + arm_gen_nop(void) 6 + { 7 + #ifdef CONFIG_THUMB2_KERNEL 8 + return 0xf3af8000; /* nop.w */ 9 + #else 10 + return 0xe1a00000; /* mov r0, r0 */ 11 + #endif 12 + } 13 + 14 + unsigned long 15 + __arm_gen_branch(unsigned long pc, unsigned long addr, bool link); 16 + 17 + static inline unsigned long 18 + arm_gen_branch(unsigned long pc, unsigned long addr) 19 + { 20 + return __arm_gen_branch(pc, addr, false); 21 + } 22 + 23 + static inline unsigned long 24 + arm_gen_branch_link(unsigned long pc, unsigned long addr) 25 + { 26 + return __arm_gen_branch(pc, addr, true); 27 + } 28 + 29 + #endif
+1 -4
arch/arm/kernel/irq.c
··· 180 180 local_irq_save(flags); 181 181 182 182 for_each_irq_desc(i, desc) { 183 - bool affinity_broken = false; 184 - 185 - if (!desc) 186 - continue; 183 + bool affinity_broken; 187 184 188 185 raw_spin_lock(&desc->lock); 189 186 affinity_broken = migrate_one_irq(desc);
+39
arch/arm/kernel/jump_label.c
··· 1 + #include <linux/kernel.h> 2 + #include <linux/jump_label.h> 3 + 4 + #include "insn.h" 5 + #include "patch.h" 6 + 7 + #ifdef HAVE_JUMP_LABEL 8 + 9 + static void __arch_jump_label_transform(struct jump_entry *entry, 10 + enum jump_label_type type, 11 + bool is_static) 12 + { 13 + void *addr = (void *)entry->code; 14 + unsigned int insn; 15 + 16 + if (type == JUMP_LABEL_ENABLE) 17 + insn = arm_gen_branch(entry->code, entry->target); 18 + else 19 + insn = arm_gen_nop(); 20 + 21 + if (is_static) 22 + __patch_text(addr, insn); 23 + else 24 + patch_text(addr, insn); 25 + } 26 + 27 + void arch_jump_label_transform(struct jump_entry *entry, 28 + enum jump_label_type type) 29 + { 30 + __arch_jump_label_transform(entry, type, false); 31 + } 32 + 33 + void arch_jump_label_transform_static(struct jump_entry *entry, 34 + enum jump_label_type type) 35 + { 36 + __arch_jump_label_transform(entry, type, true); 37 + } 38 + 39 + #endif
+24 -62
arch/arm/kernel/kprobes.c
··· 29 29 #include <asm/cacheflush.h> 30 30 31 31 #include "kprobes.h" 32 + #include "patch.h" 32 33 33 34 #define MIN_STACK_SIZE(addr) \ 34 35 min((unsigned long)MAX_STACK_SIZE, \ ··· 104 103 return 0; 105 104 } 106 105 107 - #ifdef CONFIG_THUMB2_KERNEL 108 - 109 - /* 110 - * For a 32-bit Thumb breakpoint spanning two memory words we need to take 111 - * special precautions to insert the breakpoint atomically, especially on SMP 112 - * systems. This is achieved by calling this arming function using stop_machine. 113 - */ 114 - static int __kprobes set_t32_breakpoint(void *addr) 115 - { 116 - ((u16 *)addr)[0] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION >> 16; 117 - ((u16 *)addr)[1] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION & 0xffff; 118 - flush_insns(addr, 2*sizeof(u16)); 119 - return 0; 120 - } 121 - 122 106 void __kprobes arch_arm_kprobe(struct kprobe *p) 123 107 { 124 - uintptr_t addr = (uintptr_t)p->addr & ~1; /* Remove any Thumb flag */ 108 + unsigned int brkp; 109 + void *addr; 125 110 126 - if (!is_wide_instruction(p->opcode)) { 127 - *(u16 *)addr = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION; 128 - flush_insns(addr, sizeof(u16)); 129 - } else if (addr & 2) { 130 - /* A 32-bit instruction spanning two words needs special care */ 131 - stop_machine(set_t32_breakpoint, (void *)addr, &cpu_online_map); 111 + if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) { 112 + /* Remove any Thumb flag */ 113 + addr = (void *)((uintptr_t)p->addr & ~1); 114 + 115 + if (is_wide_instruction(p->opcode)) 116 + brkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION; 117 + else 118 + brkp = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION; 132 119 } else { 133 - /* Word aligned 32-bit instruction can be written atomically */ 134 - u32 bkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION; 135 - #ifndef __ARMEB__ /* Swap halfwords for little-endian */ 136 - bkp = (bkp >> 16) | (bkp << 16); 137 - #endif 138 - *(u32 *)addr = bkp; 139 - flush_insns(addr, sizeof(u32)); 120 + kprobe_opcode_t insn = p->opcode; 121 + 122 + addr = p->addr; 123 + brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION; 124 + 125 + if (insn >= 0xe0000000) 126 + brkp |= 0xe0000000; /* Unconditional instruction */ 127 + else 128 + brkp |= insn & 0xf0000000; /* Copy condition from insn */ 140 129 } 130 + 131 + patch_text(addr, brkp); 141 132 } 142 - 143 - #else /* !CONFIG_THUMB2_KERNEL */ 144 - 145 - void __kprobes arch_arm_kprobe(struct kprobe *p) 146 - { 147 - kprobe_opcode_t insn = p->opcode; 148 - kprobe_opcode_t brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION; 149 - if (insn >= 0xe0000000) 150 - brkp |= 0xe0000000; /* Unconditional instruction */ 151 - else 152 - brkp |= insn & 0xf0000000; /* Copy condition from insn */ 153 - *p->addr = brkp; 154 - flush_insns(p->addr, sizeof(p->addr[0])); 155 - } 156 - 157 - #endif /* !CONFIG_THUMB2_KERNEL */ 158 133 159 134 /* 160 135 * The actual disarming is done here on each CPU and synchronized using ··· 143 166 int __kprobes __arch_disarm_kprobe(void *p) 144 167 { 145 168 struct kprobe *kp = p; 146 - #ifdef CONFIG_THUMB2_KERNEL 147 - u16 *addr = (u16 *)((uintptr_t)kp->addr & ~1); 148 - kprobe_opcode_t insn = kp->opcode; 149 - unsigned int len; 169 + void *addr = (void *)((uintptr_t)kp->addr & ~1); 150 170 151 - if (is_wide_instruction(insn)) { 152 - ((u16 *)addr)[0] = insn>>16; 153 - ((u16 *)addr)[1] = insn; 154 - len = 2*sizeof(u16); 155 - } else { 156 - ((u16 *)addr)[0] = insn; 157 - len = sizeof(u16); 158 - } 159 - flush_insns(addr, len); 171 + __patch_text(addr, kp->opcode); 160 172 161 - #else /* !CONFIG_THUMB2_KERNEL */ 162 - *kp->addr = kp->opcode; 163 - flush_insns(kp->addr, sizeof(kp->addr[0])); 164 - #endif 165 173 return 0; 166 174 } 167 175
+25
arch/arm/kernel/machine_kexec.c
··· 7 7 #include <linux/delay.h> 8 8 #include <linux/reboot.h> 9 9 #include <linux/io.h> 10 + #include <linux/irq.h> 10 11 #include <asm/pgtable.h> 11 12 #include <asm/pgalloc.h> 12 13 #include <asm/mmu_context.h> ··· 54 53 cpu_relax(); 55 54 } 56 55 56 + static void machine_kexec_mask_interrupts(void) 57 + { 58 + unsigned int i; 59 + struct irq_desc *desc; 60 + 61 + for_each_irq_desc(i, desc) { 62 + struct irq_chip *chip; 63 + 64 + chip = irq_desc_get_chip(desc); 65 + if (!chip) 66 + continue; 67 + 68 + if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data)) 69 + chip->irq_eoi(&desc->irq_data); 70 + 71 + if (chip->irq_mask) 72 + chip->irq_mask(&desc->irq_data); 73 + 74 + if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) 75 + chip->irq_disable(&desc->irq_data); 76 + } 77 + } 78 + 57 79 void machine_crash_shutdown(struct pt_regs *regs) 58 80 { 59 81 unsigned long msecs; ··· 94 70 printk(KERN_WARNING "Non-crashing CPUs did not react to IPI\n"); 95 71 96 72 crash_save_cpu(regs, smp_processor_id()); 73 + machine_kexec_mask_interrupts(); 97 74 98 75 printk(KERN_INFO "Loading crashdump kernel...\n"); 99 76 }
+75
arch/arm/kernel/patch.c
··· 1 + #include <linux/kernel.h> 2 + #include <linux/kprobes.h> 3 + #include <linux/stop_machine.h> 4 + 5 + #include <asm/cacheflush.h> 6 + #include <asm/smp_plat.h> 7 + #include <asm/opcodes.h> 8 + 9 + #include "patch.h" 10 + 11 + struct patch { 12 + void *addr; 13 + unsigned int insn; 14 + }; 15 + 16 + void __kprobes __patch_text(void *addr, unsigned int insn) 17 + { 18 + bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL); 19 + int size; 20 + 21 + if (thumb2 && __opcode_is_thumb16(insn)) { 22 + *(u16 *)addr = __opcode_to_mem_thumb16(insn); 23 + size = sizeof(u16); 24 + } else if (thumb2 && ((uintptr_t)addr & 2)) { 25 + u16 first = __opcode_thumb32_first(insn); 26 + u16 second = __opcode_thumb32_second(insn); 27 + u16 *addrh = addr; 28 + 29 + addrh[0] = __opcode_to_mem_thumb16(first); 30 + addrh[1] = __opcode_to_mem_thumb16(second); 31 + 32 + size = sizeof(u32); 33 + } else { 34 + if (thumb2) 35 + insn = __opcode_to_mem_thumb32(insn); 36 + else 37 + insn = __opcode_to_mem_arm(insn); 38 + 39 + *(u32 *)addr = insn; 40 + size = sizeof(u32); 41 + } 42 + 43 + flush_icache_range((uintptr_t)(addr), 44 + (uintptr_t)(addr) + size); 45 + } 46 + 47 + static int __kprobes patch_text_stop_machine(void *data) 48 + { 49 + struct patch *patch = data; 50 + 51 + __patch_text(patch->addr, patch->insn); 52 + 53 + return 0; 54 + } 55 + 56 + void __kprobes patch_text(void *addr, unsigned int insn) 57 + { 58 + struct patch patch = { 59 + .addr = addr, 60 + .insn = insn, 61 + }; 62 + 63 + if (cache_ops_need_broadcast()) { 64 + stop_machine(patch_text_stop_machine, &patch, cpu_online_mask); 65 + } else { 66 + bool straddles_word = IS_ENABLED(CONFIG_THUMB2_KERNEL) 67 + && __opcode_is_thumb32(insn) 68 + && ((uintptr_t)addr & 2); 69 + 70 + if (straddles_word) 71 + stop_machine(patch_text_stop_machine, &patch, NULL); 72 + else 73 + __patch_text(addr, insn); 74 + } 75 + }
+7
arch/arm/kernel/patch.h
··· 1 + #ifndef _ARM_KERNEL_PATCH_H 2 + #define _ARM_KERNEL_PATCH_H 3 + 4 + void patch_text(void *addr, unsigned int insn); 5 + void __patch_text(void *addr, unsigned int insn); 6 + 7 + #endif
+3
arch/arm/kernel/perf_event.c
··· 738 738 case 0xC0F0: /* Cortex-A15 */ 739 739 cpu_pmu = armv7_a15_pmu_init(); 740 740 break; 741 + case 0xC070: /* Cortex-A7 */ 742 + cpu_pmu = armv7_a7_pmu_init(); 743 + break; 741 744 } 742 745 /* Intel CPUs [xscale]. */ 743 746 } else if (0x69 == implementor) {
+145
arch/arm/kernel/perf_event_v7.c
··· 610 610 }; 611 611 612 612 /* 613 + * Cortex-A7 HW events mapping 614 + */ 615 + static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = { 616 + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, 617 + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, 618 + [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, 619 + [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, 620 + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, 621 + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, 622 + [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES, 623 + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED, 624 + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED, 625 + }; 626 + 627 + static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 628 + [PERF_COUNT_HW_CACHE_OP_MAX] 629 + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 630 + [C(L1D)] = { 631 + /* 632 + * The performance counters don't differentiate between read 633 + * and write accesses/misses so this isn't strictly correct, 634 + * but it's the best we can do. Writes and reads get 635 + * combined. 636 + */ 637 + [C(OP_READ)] = { 638 + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, 639 + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, 640 + }, 641 + [C(OP_WRITE)] = { 642 + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, 643 + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, 644 + }, 645 + [C(OP_PREFETCH)] = { 646 + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 647 + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 648 + }, 649 + }, 650 + [C(L1I)] = { 651 + [C(OP_READ)] = { 652 + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, 653 + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 654 + }, 655 + [C(OP_WRITE)] = { 656 + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, 657 + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 658 + }, 659 + [C(OP_PREFETCH)] = { 660 + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 661 + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 662 + }, 663 + }, 664 + [C(LL)] = { 665 + [C(OP_READ)] = { 666 + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS, 667 + [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL, 668 + }, 669 + [C(OP_WRITE)] = { 670 + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS, 671 + [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL, 672 + }, 673 + [C(OP_PREFETCH)] = { 674 + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 675 + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 676 + }, 677 + }, 678 + [C(DTLB)] = { 679 + [C(OP_READ)] = { 680 + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 681 + [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, 682 + }, 683 + [C(OP_WRITE)] = { 684 + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 685 + [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, 686 + }, 687 + [C(OP_PREFETCH)] = { 688 + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 689 + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 690 + }, 691 + }, 692 + [C(ITLB)] = { 693 + [C(OP_READ)] = { 694 + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 695 + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, 696 + }, 697 + [C(OP_WRITE)] = { 698 + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 699 + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, 700 + }, 701 + [C(OP_PREFETCH)] = { 702 + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 703 + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 704 + }, 705 + }, 706 + [C(BPU)] = { 707 + [C(OP_READ)] = { 708 + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, 709 + [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, 710 + }, 711 + [C(OP_WRITE)] = { 712 + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, 713 + [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, 714 + }, 715 + [C(OP_PREFETCH)] = { 716 + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 717 + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 718 + }, 719 + }, 720 + [C(NODE)] = { 721 + [C(OP_READ)] = { 722 + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 723 + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 724 + }, 725 + [C(OP_WRITE)] = { 726 + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 727 + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 728 + }, 729 + [C(OP_PREFETCH)] = { 730 + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 731 + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 732 + }, 733 + }, 734 + }; 735 + 736 + /* 613 737 * Perf Events' indices 614 738 */ 615 739 #define ARMV7_IDX_CYCLE_COUNTER 0 ··· 1228 1104 &armv7_a15_perf_cache_map, 0xFF); 1229 1105 } 1230 1106 1107 + static int armv7_a7_map_event(struct perf_event *event) 1108 + { 1109 + return map_cpu_event(event, &armv7_a7_perf_map, 1110 + &armv7_a7_perf_cache_map, 0xFF); 1111 + } 1112 + 1231 1113 static struct arm_pmu armv7pmu = { 1232 1114 .handle_irq = armv7pmu_handle_irq, 1233 1115 .enable = armv7pmu_enable_event, ··· 1294 1164 armv7pmu.set_event_filter = armv7pmu_set_event_filter; 1295 1165 return &armv7pmu; 1296 1166 } 1167 + 1168 + static struct arm_pmu *__init armv7_a7_pmu_init(void) 1169 + { 1170 + armv7pmu.id = ARM_PERF_PMU_ID_CA7; 1171 + armv7pmu.name = "ARMv7 Cortex-A7"; 1172 + armv7pmu.map_event = armv7_a7_map_event; 1173 + armv7pmu.num_events = armv7_read_num_pmnc_events(); 1174 + armv7pmu.set_event_filter = armv7pmu_set_event_filter; 1175 + return &armv7pmu; 1176 + } 1297 1177 #else 1298 1178 static struct arm_pmu *__init armv7_a8_pmu_init(void) 1299 1179 { ··· 1321 1181 } 1322 1182 1323 1183 static struct arm_pmu *__init armv7_a15_pmu_init(void) 1184 + { 1185 + return NULL; 1186 + } 1187 + 1188 + static struct arm_pmu *__init armv7_a7_pmu_init(void) 1324 1189 { 1325 1190 return NULL; 1326 1191 }
+27 -9
arch/arm/kernel/process.c
··· 528 528 #ifdef CONFIG_MMU 529 529 /* 530 530 * The vectors page is always readable from user space for the 531 - * atomic helpers and the signal restart code. Let's declare a mapping 532 - * for it so it is visible through ptrace and /proc/<pid>/mem. 531 + * atomic helpers and the signal restart code. Insert it into the 532 + * gate_vma so that it is visible through ptrace and /proc/<pid>/mem. 533 533 */ 534 + static struct vm_area_struct gate_vma; 534 535 535 - int vectors_user_mapping(void) 536 + static int __init gate_vma_init(void) 536 537 { 537 - struct mm_struct *mm = current->mm; 538 - return install_special_mapping(mm, 0xffff0000, PAGE_SIZE, 539 - VM_READ | VM_EXEC | 540 - VM_MAYREAD | VM_MAYEXEC | VM_RESERVED, 541 - NULL); 538 + gate_vma.vm_start = 0xffff0000; 539 + gate_vma.vm_end = 0xffff0000 + PAGE_SIZE; 540 + gate_vma.vm_page_prot = PAGE_READONLY_EXEC; 541 + gate_vma.vm_flags = VM_READ | VM_EXEC | 542 + VM_MAYREAD | VM_MAYEXEC; 543 + return 0; 544 + } 545 + arch_initcall(gate_vma_init); 546 + 547 + struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 548 + { 549 + return &gate_vma; 550 + } 551 + 552 + int in_gate_area(struct mm_struct *mm, unsigned long addr) 553 + { 554 + return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end); 555 + } 556 + 557 + int in_gate_area_no_mm(unsigned long addr) 558 + { 559 + return in_gate_area(NULL, addr); 542 560 } 543 561 544 562 const char *arch_vma_name(struct vm_area_struct *vma) 545 563 { 546 - return (vma->vm_start == 0xffff0000) ? "[vectors]" : NULL; 564 + return (vma == &gate_vma) ? "[vectors]" : NULL; 547 565 } 548 566 #endif
+18
arch/arm/kernel/sched_clock.c
··· 10 10 #include <linux/jiffies.h> 11 11 #include <linux/kernel.h> 12 12 #include <linux/sched.h> 13 + #include <linux/syscore_ops.h> 13 14 #include <linux/timer.h> 14 15 15 16 #include <asm/sched_clock.h> ··· 165 164 166 165 sched_clock_poll(sched_clock_timer.data); 167 166 } 167 + 168 + static int sched_clock_suspend(void) 169 + { 170 + sched_clock_poll(sched_clock_timer.data); 171 + return 0; 172 + } 173 + 174 + static struct syscore_ops sched_clock_ops = { 175 + .suspend = sched_clock_suspend, 176 + }; 177 + 178 + static int __init sched_clock_syscore_init(void) 179 + { 180 + register_syscore_ops(&sched_clock_ops); 181 + return 0; 182 + } 183 + device_initcall(sched_clock_syscore_init);
-1
arch/arm/kernel/setup.c
··· 976 976 conswitchp = &dummy_con; 977 977 #endif 978 978 #endif 979 - early_trap_init(); 980 979 981 980 if (mdesc->init_early) 982 981 mdesc->init_early();
+8 -16
arch/arm/kernel/signal.c
··· 66 66 */ 67 67 asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask) 68 68 { 69 - mask &= _BLOCKABLE; 70 - spin_lock_irq(&current->sighand->siglock); 69 + sigset_t blocked; 70 + 71 71 current->saved_sigmask = current->blocked; 72 - siginitset(&current->blocked, mask); 73 - recalc_sigpending(); 74 - spin_unlock_irq(&current->sighand->siglock); 72 + 73 + mask &= _BLOCKABLE; 74 + siginitset(&blocked, mask); 75 + set_current_blocked(&blocked); 75 76 76 77 current->state = TASK_INTERRUPTIBLE; 77 78 schedule(); ··· 281 280 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); 282 281 if (err == 0) { 283 282 sigdelsetmask(&set, ~_BLOCKABLE); 284 - spin_lock_irq(&current->sighand->siglock); 285 - current->blocked = set; 286 - recalc_sigpending(); 287 - spin_unlock_irq(&current->sighand->siglock); 283 + set_current_blocked(&set); 288 284 } 289 285 290 286 __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); ··· 634 636 /* 635 637 * Block the signal if we were successful. 636 638 */ 637 - spin_lock_irq(&tsk->sighand->siglock); 638 - sigorsets(&tsk->blocked, &tsk->blocked, 639 - &ka->sa.sa_mask); 640 - if (!(ka->sa.sa_flags & SA_NODEFER)) 641 - sigaddset(&tsk->blocked, sig); 642 - recalc_sigpending(); 643 - spin_unlock_irq(&tsk->sighand->siglock); 639 + block_sigmask(ka, sig); 644 640 645 641 return 0; 646 642 }
+6 -11
arch/arm/kernel/smp.c
··· 58 58 IPI_CPU_STOP, 59 59 }; 60 60 61 + static DECLARE_COMPLETION(cpu_running); 62 + 61 63 int __cpuinit __cpu_up(unsigned int cpu) 62 64 { 63 65 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); ··· 100 98 */ 101 99 ret = boot_secondary(cpu, idle); 102 100 if (ret == 0) { 103 - unsigned long timeout; 104 - 105 101 /* 106 102 * CPU was successfully started, wait for it 107 103 * to come online or time out. 108 104 */ 109 - timeout = jiffies + HZ; 110 - while (time_before(jiffies, timeout)) { 111 - if (cpu_online(cpu)) 112 - break; 113 - 114 - udelay(10); 115 - barrier(); 116 - } 105 + wait_for_completion_timeout(&cpu_running, 106 + msecs_to_jiffies(1000)); 117 107 118 108 if (!cpu_online(cpu)) { 119 109 pr_crit("CPU%u: failed to come online\n", cpu); ··· 282 288 /* 283 289 * OK, now it's safe to let the boot CPU continue. Wait for 284 290 * the CPU migration code to notice that the CPU is online 285 - * before we continue. 291 + * before we continue - which happens after __cpu_up returns. 286 292 */ 287 293 set_cpu_online(cpu, true); 294 + complete(&cpu_running); 288 295 289 296 /* 290 297 * Setup the percpu timer for this CPU.
-4
arch/arm/kernel/time.c
··· 25 25 #include <linux/timer.h> 26 26 #include <linux/irq.h> 27 27 28 - #include <linux/mc146818rtc.h> 29 - 30 28 #include <asm/leds.h> 31 29 #include <asm/thread_info.h> 32 30 #include <asm/sched_clock.h> ··· 147 149 { 148 150 system_timer = machine_desc->timer; 149 151 system_timer->init(); 150 - #ifdef CONFIG_HAVE_SCHED_CLOCK 151 152 sched_clock_postinit(); 152 - #endif 153 153 } 154 154
+11 -8
arch/arm/kernel/traps.c
··· 227 227 #else 228 228 #define S_SMP "" 229 229 #endif 230 + #ifdef CONFIG_THUMB2_KERNEL 231 + #define S_ISA " THUMB2" 232 + #else 233 + #define S_ISA " ARM" 234 + #endif 230 235 231 236 static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) 232 237 { ··· 239 234 static int die_counter; 240 235 int ret; 241 236 242 - printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", 243 - str, err, ++die_counter); 237 + printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP 238 + S_ISA "\n", str, err, ++die_counter); 244 239 245 240 /* trap and error numbers are mostly meaningless on ARM */ 246 241 ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV); ··· 789 784 memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4); 790 785 } 791 786 792 - void __init early_trap_init(void) 787 + void __init early_trap_init(void *vectors_base) 793 788 { 794 - #if defined(CONFIG_CPU_USE_DOMAINS) 795 - unsigned long vectors = CONFIG_VECTORS_BASE; 796 - #else 797 - unsigned long vectors = (unsigned long)vectors_page; 798 - #endif 789 + unsigned long vectors = (unsigned long)vectors_base; 799 790 extern char __stubs_start[], __stubs_end[]; 800 791 extern char __vectors_start[], __vectors_end[]; 801 792 extern char __kuser_helper_start[], __kuser_helper_end[]; 802 793 int kuser_sz = __kuser_helper_end - __kuser_helper_start; 794 + 795 + vectors_page = vectors_base; 803 796 804 797 /* 805 798 * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
+10 -18
arch/arm/mach-davinci/time.c
··· 19 19 #include <linux/err.h> 20 20 #include <linux/platform_device.h> 21 21 22 - #include <mach/hardware.h> 22 + #include <asm/sched_clock.h> 23 23 #include <asm/mach/irq.h> 24 24 #include <asm/mach/time.h> 25 + 25 26 #include <mach/cputype.h> 27 + #include <mach/hardware.h> 26 28 #include <mach/time.h> 29 + 27 30 #include "clock.h" 28 31 29 32 static struct clock_event_device clockevent_davinci; ··· 275 272 return (cycles_t)timer32_read(t); 276 273 } 277 274 278 - /* 279 - * Kernel assumes that sched_clock can be called early but may not have 280 - * things ready yet. 281 - */ 282 - static cycle_t read_dummy(struct clocksource *cs) 283 - { 284 - return 0; 285 - } 286 - 287 - 288 275 static struct clocksource clocksource_davinci = { 289 276 .rating = 300, 290 - .read = read_dummy, 277 + .read = read_cycles, 291 278 .mask = CLOCKSOURCE_MASK(32), 292 279 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 293 280 }; ··· 285 292 /* 286 293 * Overwrite weak default sched_clock with something more precise 287 294 */ 288 - unsigned long long notrace sched_clock(void) 295 + static u32 notrace davinci_read_sched_clock(void) 289 296 { 290 - const cycle_t cyc = clocksource_davinci.read(&clocksource_davinci); 291 - 292 - return clocksource_cyc2ns(cyc, clocksource_davinci.mult, 293 - clocksource_davinci.shift); 297 + return timer32_read(&timers[TID_CLOCKSOURCE]); 294 298 } 295 299 296 300 /* ··· 387 397 davinci_clock_tick_rate = clk_get_rate(timer_clk); 388 398 389 399 /* setup clocksource */ 390 - clocksource_davinci.read = read_cycles; 391 400 clocksource_davinci.name = id_to_name[clocksource_id]; 392 401 if (clocksource_register_hz(&clocksource_davinci, 393 402 davinci_clock_tick_rate)) 394 403 printk(err, clocksource_davinci.name); 404 + 405 + setup_sched_clock(davinci_read_sched_clock, 32, 406 + davinci_clock_tick_rate); 395 407 396 408 /* setup clockevent */ 397 409 clockevent_davinci.name = id_to_name[timers[TID_CLOCKEVENT].id];
-1
arch/arm/mach-highbank/highbank.c
··· 35 35 #include <asm/mach/arch.h> 36 36 #include <asm/mach/map.h> 37 37 #include <asm/mach/time.h> 38 - #include <mach/irqs.h> 39 38 40 39 #include "core.h" 41 40 #include "sysregs.h"
-6
arch/arm/mach-highbank/include/mach/irqs.h
··· 1 - #ifndef __MACH_IRQS_H 2 - #define __MACH_IRQS_H 3 - 4 - #define NR_IRQS 192 5 - 6 - #endif
+2 -1
arch/arm/mach-integrator/core.c
··· 25 25 26 26 #include <mach/hardware.h> 27 27 #include <mach/platform.h> 28 - #include <asm/irq.h> 29 28 #include <mach/cm.h> 29 + #include <mach/irqs.h> 30 + 30 31 #include <asm/leds.h> 31 32 #include <asm/mach-types.h> 32 33 #include <asm/mach/time.h>
+2 -1
arch/arm/mach-integrator/include/mach/irqs.h
··· 78 78 #define IRQ_SIC_CP_LMINT7 46 79 79 #define IRQ_SIC_END 46 80 80 81 - #define NR_IRQS 47 81 + #define NR_IRQS_INTEGRATOR_AP 34 82 + #define NR_IRQS_INTEGRATOR_CP 47 82 83
+9 -1
arch/arm/mach-integrator/integrator_ap.c
··· 38 38 #include <mach/hardware.h> 39 39 #include <mach/platform.h> 40 40 #include <asm/hardware/arm_timer.h> 41 - #include <asm/irq.h> 42 41 #include <asm/setup.h> 43 42 #include <asm/param.h> /* HZ */ 44 43 #include <asm/mach-types.h> 44 + #include <asm/sched_clock.h> 45 45 46 46 #include <mach/lm.h> 47 + #include <mach/irqs.h> 47 48 48 49 #include <asm/mach/arch.h> 49 50 #include <asm/mach/irq.h> ··· 326 325 327 326 static unsigned long timer_reload; 328 327 328 + static u32 notrace integrator_read_sched_clock(void) 329 + { 330 + return -readl((void __iomem *) TIMER2_VA_BASE + TIMER_VALUE); 331 + } 332 + 329 333 static void integrator_clocksource_init(unsigned long inrate) 330 334 { 331 335 void __iomem *base = (void __iomem *)TIMER2_VA_BASE; ··· 347 341 348 342 clocksource_mmio_init(base + TIMER_VALUE, "timer2", 349 343 rate, 200, 16, clocksource_mmio_readl_down); 344 + setup_sched_clock(integrator_read_sched_clock, 16, rate); 350 345 } 351 346 352 347 static void __iomem * const clkevt_base = (void __iomem *)TIMER1_VA_BASE; ··· 475 468 .atag_offset = 0x100, 476 469 .reserve = integrator_reserve, 477 470 .map_io = ap_map_io, 471 + .nr_irqs = NR_IRQS_INTEGRATOR_AP, 478 472 .init_early = integrator_init_early, 479 473 .init_irq = ap_init_irq, 480 474 .timer = &ap_timer,
+2 -1
arch/arm/mach-integrator/integrator_cp.c
··· 26 26 27 27 #include <mach/hardware.h> 28 28 #include <mach/platform.h> 29 - #include <asm/irq.h> 30 29 #include <asm/setup.h> 31 30 #include <asm/mach-types.h> 32 31 #include <asm/hardware/arm_timer.h> ··· 33 34 34 35 #include <mach/cm.h> 35 36 #include <mach/lm.h> 37 + #include <mach/irqs.h> 36 38 37 39 #include <asm/mach/arch.h> 38 40 #include <asm/mach/irq.h> ··· 464 464 .atag_offset = 0x100, 465 465 .reserve = integrator_reserve, 466 466 .map_io = intcp_map_io, 467 + .nr_irqs = NR_IRQS_INTEGRATOR_CP, 467 468 .init_early = intcp_init_early, 468 469 .init_irq = intcp_init_irq, 469 470 .timer = &cp_timer,
+2 -1
arch/arm/mach-integrator/pci.c
··· 26 26 #include <linux/interrupt.h> 27 27 #include <linux/init.h> 28 28 29 - #include <asm/irq.h> 30 29 #include <asm/mach/pci.h> 31 30 #include <asm/mach-types.h> 31 + 32 + #include <mach/irqs.h> 32 33 33 34 /* 34 35 * A small note about bridges and interrupts. The DECchip 21050 (and
+2 -1
arch/arm/mach-integrator/pci_v3.c
··· 30 30 31 31 #include <mach/hardware.h> 32 32 #include <mach/platform.h> 33 - #include <asm/irq.h> 33 + #include <mach/irqs.h> 34 + 34 35 #include <asm/signal.h> 35 36 #include <asm/mach/pci.h> 36 37 #include <asm/irq_regs.h>
+3 -2
arch/arm/mach-mmp/aspenite.c
··· 23 23 #include <mach/addr-map.h> 24 24 #include <mach/mfp-pxa168.h> 25 25 #include <mach/pxa168.h> 26 + #include <mach/irqs.h> 26 27 #include <video/pxa168fb.h> 27 28 #include <linux/input.h> 28 29 #include <plat/pxa27x_keypad.h> ··· 240 239 241 240 MACHINE_START(ASPENITE, "PXA168-based Aspenite Development Platform") 242 241 .map_io = mmp_map_io, 243 - .nr_irqs = IRQ_BOARD_START, 242 + .nr_irqs = MMP_NR_IRQS, 244 243 .init_irq = pxa168_init_irq, 245 244 .timer = &pxa168_timer, 246 245 .init_machine = common_init, ··· 249 248 250 249 MACHINE_START(ZYLONITE2, "PXA168-based Zylonite2 Development Platform") 251 250 .map_io = mmp_map_io, 252 - .nr_irqs = IRQ_BOARD_START, 251 + .nr_irqs = MMP_NR_IRQS, 253 252 .init_irq = pxa168_init_irq, 254 253 .timer = &pxa168_timer, 255 254 .init_machine = common_init,
+1
arch/arm/mach-mmp/avengers_lite.c
··· 43 43 44 44 MACHINE_START(AVENGERS_LITE, "PXA168 Avengers lite Development Platform") 45 45 .map_io = mmp_map_io, 46 + .nr_irqs = MMP_NR_IRQS, 46 47 .init_irq = pxa168_init_irq, 47 48 .timer = &pxa168_timer, 48 49 .init_machine = avengers_lite_init,
+2 -2
arch/arm/mach-mmp/brownstone.c
··· 28 28 29 29 #include "common.h" 30 30 31 - #define BROWNSTONE_NR_IRQS (IRQ_BOARD_START + 40) 31 + #define BROWNSTONE_NR_IRQS (MMP_NR_IRQS + 40) 32 32 33 33 #define GPIO_5V_ENABLE (89) 34 34 ··· 158 158 }; 159 159 160 160 static struct max8925_platform_data brownstone_max8925_info = { 161 - .irq_base = IRQ_BOARD_START, 161 + .irq_base = MMP_NR_IRQS, 162 162 }; 163 163 164 164 static struct i2c_board_info brownstone_twsi1_info[] = {
+2 -1
arch/arm/mach-mmp/flint.c
··· 23 23 #include <mach/addr-map.h> 24 24 #include <mach/mfp-mmp2.h> 25 25 #include <mach/mmp2.h> 26 + #include <mach/irqs.h> 26 27 27 28 #include "common.h" 28 29 29 - #define FLINT_NR_IRQS (IRQ_BOARD_START + 48) 30 + #define FLINT_NR_IRQS (MMP_NR_IRQS + 48) 30 31 31 32 static unsigned long flint_pin_config[] __initdata = { 32 33 /* UART1 */
+1 -1
arch/arm/mach-mmp/gplugd.c
··· 191 191 192 192 MACHINE_START(GPLUGD, "PXA168-based GuruPlug Display (gplugD) Platform") 193 193 .map_io = mmp_map_io, 194 - .nr_irqs = IRQ_BOARD_START, 194 + .nr_irqs = MMP_NR_IRQS, 195 195 .init_irq = pxa168_init_irq, 196 196 .timer = &pxa168_timer, 197 197 .init_machine = gplugd_init,
+1 -2
arch/arm/mach-mmp/include/mach/irqs.h
··· 223 223 #define MMP_GPIO_TO_IRQ(gpio) (IRQ_GPIO_START + (gpio)) 224 224 225 225 #define IRQ_BOARD_START (IRQ_GPIO_START + MMP_NR_BUILTIN_GPIO) 226 - 227 - #define NR_IRQS (IRQ_BOARD_START) 226 + #define MMP_NR_IRQS IRQ_BOARD_START 228 227 229 228 #endif /* __ASM_MACH_IRQS_H */
+1
arch/arm/mach-mmp/irq-mmp2.c
··· 15 15 #include <linux/irq.h> 16 16 #include <linux/io.h> 17 17 18 + #include <mach/irqs.h> 18 19 #include <mach/regs-icu.h> 19 20 #include <mach/mmp2.h> 20 21
+3 -2
arch/arm/mach-mmp/jasper.c
··· 19 19 #include <linux/mfd/max8925.h> 20 20 #include <linux/interrupt.h> 21 21 22 + #include <mach/irqs.h> 22 23 #include <asm/mach-types.h> 23 24 #include <asm/mach/arch.h> 24 25 #include <mach/addr-map.h> ··· 28 27 29 28 #include "common.h" 30 29 31 - #define JASPER_NR_IRQS (IRQ_BOARD_START + 48) 30 + #define JASPER_NR_IRQS (MMP_NR_IRQS + 48) 32 31 33 32 static unsigned long jasper_pin_config[] __initdata = { 34 33 /* UART1 */ ··· 136 135 static struct max8925_platform_data jasper_max8925_info = { 137 136 .backlight = &jasper_backlight_data, 138 137 .power = &jasper_power_data, 139 - .irq_base = IRQ_BOARD_START, 138 + .irq_base = MMP_NR_IRQS, 140 139 }; 141 140 142 141 static struct i2c_board_info jasper_twsi1_info[] = {
+1
arch/arm/mach-mmp/tavorevb.c
··· 101 101 102 102 MACHINE_START(TAVOREVB, "PXA910 Evaluation Board (aka TavorEVB)") 103 103 .map_io = mmp_map_io, 104 + .nr_irqs = MMP_NR_IRQS, 104 105 .init_irq = pxa910_init_irq, 105 106 .timer = &pxa910_timer, 106 107 .init_machine = tavorevb_init,
+2 -1
arch/arm/mach-mmp/teton_bga.c
··· 26 26 #include <mach/mfp-pxa168.h> 27 27 #include <mach/pxa168.h> 28 28 #include <mach/teton_bga.h> 29 + #include <mach/irqs.h> 29 30 30 31 #include "common.h" 31 32 ··· 84 83 85 84 MACHINE_START(TETON_BGA, "PXA168-based Teton BGA Development Platform") 86 85 .map_io = mmp_map_io, 87 - .nr_irqs = IRQ_BOARD_START, 86 + .nr_irqs = MMP_NR_IRQS, 88 87 .init_irq = pxa168_init_irq, 89 88 .timer = &pxa168_timer, 90 89 .init_machine = teton_bga_init,
+2 -2
arch/arm/mach-mmp/ttc_dkb.c
··· 38 38 * 16 board interrupts -- PCA9575 GPIO expander 39 39 * 24 board interrupts -- 88PM860x PMIC 40 40 */ 41 - #define TTCDKB_NR_IRQS (IRQ_BOARD_START + 16 + 16 + 24) 41 + #define TTCDKB_NR_IRQS (MMP_NR_IRQS + 16 + 16 + 24) 42 42 43 43 static unsigned long ttc_dkb_pin_config[] __initdata = { 44 44 /* UART2 */ ··· 131 131 static struct pca953x_platform_data max7312_data[] = { 132 132 { 133 133 .gpio_base = TTCDKB_GPIO_EXT0(0), 134 - .irq_base = IRQ_BOARD_START, 134 + .irq_base = MMP_NR_IRQS, 135 135 }, 136 136 }; 137 137
+10 -2
arch/arm/mach-msm/timer.c
··· 24 24 #include <asm/mach/time.h> 25 25 #include <asm/hardware/gic.h> 26 26 #include <asm/localtimer.h> 27 + #include <asm/sched_clock.h> 27 28 28 29 #include <mach/msm_iomap.h> 29 30 #include <mach/cpu.h> ··· 106 105 107 106 static void __iomem *source_base; 108 107 109 - static cycle_t msm_read_timer_count(struct clocksource *cs) 108 + static notrace cycle_t msm_read_timer_count(struct clocksource *cs) 110 109 { 111 110 return readl_relaxed(source_base + TIMER_COUNT_VAL); 112 111 } 113 112 114 - static cycle_t msm_read_timer_count_shift(struct clocksource *cs) 113 + static notrace cycle_t msm_read_timer_count_shift(struct clocksource *cs) 115 114 { 116 115 /* 117 116 * Shift timer count down by a constant due to unreliable lower bits ··· 166 165 .stop = msm_local_timer_stop, 167 166 }; 168 167 #endif /* CONFIG_LOCAL_TIMERS */ 168 + 169 + static notrace u32 msm_sched_clock_read(void) 170 + { 171 + return msm_clocksource.read(&msm_clocksource); 172 + } 169 173 170 174 static void __init msm_timer_init(void) 171 175 { ··· 238 232 res = clocksource_register_hz(cs, dgt_hz); 239 233 if (res) 240 234 pr_err("clocksource_register failed\n"); 235 + setup_sched_clock(msm_sched_clock_read, 236 + cpu_is_msm7x01() ? 32 - MSM_DGT_SHIFT : 32, dgt_hz); 241 237 } 242 238 243 239 struct sys_timer msm_timer = {
-20
arch/arm/mach-picoxcell/include/mach/irqs.h
··· 1 - /* 2 - * Copyright (c) 2011 Picochip Ltd., Jamie Iles 3 - * 4 - * This program is free software; you can redistribute it and/or modify 5 - * it under the terms of the GNU General Public License as published by 6 - * the Free Software Foundation; either version 2 of the License, or 7 - * (at your option) any later version. 8 - * 9 - * This program is distributed in the hope that it will be useful, 10 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 - * GNU General Public License for more details. 13 - */ 14 - #ifndef __MACH_IRQS_H 15 - #define __MACH_IRQS_H 16 - 17 - /* We dynamically allocate our irq_desc's. */ 18 - #define NR_IRQS 0 19 - 20 - #endif /* __MACH_IRQS_H */
+7 -14
arch/arm/mach-prima2/timer.c
··· 18 18 #include <linux/of.h> 19 19 #include <linux/of_address.h> 20 20 #include <mach/map.h> 21 + #include <asm/sched_clock.h> 21 22 #include <asm/mach/time.h> 22 23 23 24 #define SIRFSOC_TIMER_COUNTER_LO 0x0000 ··· 166 165 }; 167 166 168 167 /* Overwrite weak default sched_clock with more precise one */ 169 - unsigned long long notrace sched_clock(void) 168 + static u32 notrace sirfsoc_read_sched_clock(void) 170 169 { 171 - static int is_mapped; 172 - 173 - /* 174 - * sched_clock is called earlier than .init of sys_timer 175 - * if we map timer memory in .init of sys_timer, system 176 - * will panic due to illegal memory access 177 - */ 178 - if (!is_mapped) { 179 - sirfsoc_of_timer_map(); 180 - is_mapped = 1; 181 - } 182 - 183 - return sirfsoc_timer_read(NULL) * (NSEC_PER_SEC / CLOCK_TICK_RATE); 170 + return (u32)(sirfsoc_timer_read(NULL) & 0xffffffff); 184 171 } 185 172 186 173 static void __init sirfsoc_clockevent_init(void) ··· 199 210 BUG_ON(rate < CLOCK_TICK_RATE); 200 211 BUG_ON(rate % CLOCK_TICK_RATE); 201 212 213 + sirfsoc_of_timer_map(); 214 + 202 215 writel_relaxed(rate / CLOCK_TICK_RATE / 2 - 1, sirfsoc_timer_base + SIRFSOC_TIMER_DIV); 203 216 writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO); 204 217 writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI); 205 218 writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS); 206 219 207 220 BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, CLOCK_TICK_RATE)); 221 + 222 + setup_sched_clock(sirfsoc_read_sched_clock, 32, CLOCK_TICK_RATE); 208 223 209 224 BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq)); 210 225
+1
arch/arm/mach-pxa/capc7117.c
··· 150 150 "Embedian CAPC-7117 evaluation kit based on the MXM-8x10 CoM") 151 151 .atag_offset = 0x100, 152 152 .map_io = pxa3xx_map_io, 153 + .nr_irqs = PXA_NR_IRQS, 153 154 .init_irq = pxa3xx_init_irq, 154 155 .handle_irq = pxa3xx_handle_irq, 155 156 .timer = &pxa_timer,
+1
arch/arm/mach-pxa/cm-x300.c
··· 854 854 MACHINE_START(CM_X300, "CM-X300 module") 855 855 .atag_offset = 0x100, 856 856 .map_io = pxa3xx_map_io, 857 + .nr_irqs = PXA_NR_IRQS, 857 858 .init_irq = pxa3xx_init_irq, 858 859 .handle_irq = pxa3xx_handle_irq, 859 860 .timer = &pxa_timer,
+2
arch/arm/mach-pxa/colibri-pxa270.c
··· 310 310 .atag_offset = 0x100, 311 311 .init_machine = colibri_pxa270_init, 312 312 .map_io = pxa27x_map_io, 313 + .nr_irqs = PXA_NR_IRQS, 313 314 .init_irq = pxa27x_init_irq, 314 315 .handle_irq = pxa27x_handle_irq, 315 316 .timer = &pxa_timer, ··· 321 320 .atag_offset = 0x100, 322 321 .init_machine = colibri_pxa270_income_init, 323 322 .map_io = pxa27x_map_io, 323 + .nr_irqs = PXA_NR_IRQS, 324 324 .init_irq = pxa27x_init_irq, 325 325 .handle_irq = pxa27x_handle_irq, 326 326 .timer = &pxa_timer,
+1
arch/arm/mach-pxa/colibri-pxa300.c
··· 186 186 .atag_offset = 0x100, 187 187 .init_machine = colibri_pxa300_init, 188 188 .map_io = pxa3xx_map_io, 189 + .nr_irqs = PXA_NR_IRQS, 189 190 .init_irq = pxa3xx_init_irq, 190 191 .handle_irq = pxa3xx_handle_irq, 191 192 .timer = &pxa_timer,
+1
arch/arm/mach-pxa/colibri-pxa320.c
··· 256 256 .atag_offset = 0x100, 257 257 .init_machine = colibri_pxa320_init, 258 258 .map_io = pxa3xx_map_io, 259 + .nr_irqs = PXA_NR_IRQS, 259 260 .init_irq = pxa3xx_init_irq, 260 261 .handle_irq = pxa3xx_handle_irq, 261 262 .timer = &pxa_timer,
+3
arch/arm/mach-pxa/corgi.c
··· 729 729 MACHINE_START(CORGI, "SHARP Corgi") 730 730 .fixup = fixup_corgi, 731 731 .map_io = pxa25x_map_io, 732 + .nr_irqs = PXA_NR_IRQS, 732 733 .init_irq = pxa25x_init_irq, 733 734 .handle_irq = pxa25x_handle_irq, 734 735 .init_machine = corgi_init, ··· 742 741 MACHINE_START(SHEPHERD, "SHARP Shepherd") 743 742 .fixup = fixup_corgi, 744 743 .map_io = pxa25x_map_io, 744 + .nr_irqs = PXA_NR_IRQS, 745 745 .init_irq = pxa25x_init_irq, 746 746 .handle_irq = pxa25x_handle_irq, 747 747 .init_machine = corgi_init, ··· 755 753 MACHINE_START(HUSKY, "SHARP Husky") 756 754 .fixup = fixup_corgi, 757 755 .map_io = pxa25x_map_io, 756 + .nr_irqs = PXA_NR_IRQS, 758 757 .init_irq = pxa25x_init_irq, 759 758 .handle_irq = pxa25x_handle_irq, 760 759 .init_machine = corgi_init,
+1
arch/arm/mach-pxa/csb726.c
··· 274 274 MACHINE_START(CSB726, "Cogent CSB726") 275 275 .atag_offset = 0x100, 276 276 .map_io = pxa27x_map_io, 277 + .nr_irqs = PXA_NR_IRQS, 277 278 .init_irq = pxa27x_init_irq, 278 279 .handle_irq = pxa27x_handle_irq, 279 280 .init_machine = csb726_init,
+1
arch/arm/mach-pxa/devices.c
··· 12 12 #include <mach/pxafb.h> 13 13 #include <mach/mmc.h> 14 14 #include <mach/irda.h> 15 + #include <mach/irqs.h> 15 16 #include <mach/ohci.h> 16 17 #include <plat/pxa27x_keypad.h> 17 18 #include <mach/camera.h>
+2
arch/arm/mach-pxa/em-x270.c
··· 1301 1301 MACHINE_START(EM_X270, "Compulab EM-X270") 1302 1302 .atag_offset = 0x100, 1303 1303 .map_io = pxa27x_map_io, 1304 + .nr_irqs = PXA_NR_IRQS, 1304 1305 .init_irq = pxa27x_init_irq, 1305 1306 .handle_irq = pxa27x_handle_irq, 1306 1307 .timer = &pxa_timer, ··· 1312 1311 MACHINE_START(EXEDA, "Compulab eXeda") 1313 1312 .atag_offset = 0x100, 1314 1313 .map_io = pxa27x_map_io, 1314 + .nr_irqs = PXA_NR_IRQS, 1315 1315 .init_irq = pxa27x_init_irq, 1316 1316 .handle_irq = pxa27x_handle_irq, 1317 1317 .timer = &pxa_timer,
+1
arch/arm/mach-pxa/gumstix.c
··· 235 235 MACHINE_START(GUMSTIX, "Gumstix") 236 236 .atag_offset = 0x100, /* match u-boot bi_boot_params */ 237 237 .map_io = pxa25x_map_io, 238 + .nr_irqs = PXA_NR_IRQS, 238 239 .init_irq = pxa25x_init_irq, 239 240 .handle_irq = pxa25x_handle_irq, 240 241 .timer = &pxa_timer,
+1
arch/arm/mach-pxa/h5000.c
··· 205 205 MACHINE_START(H5400, "HP iPAQ H5000") 206 206 .atag_offset = 0x100, 207 207 .map_io = pxa25x_map_io, 208 + .nr_irqs = PXA_NR_IRQS, 208 209 .init_irq = pxa25x_init_irq, 209 210 .handle_irq = pxa25x_handle_irq, 210 211 .timer = &pxa_timer,
+1
arch/arm/mach-pxa/himalaya.c
··· 160 160 MACHINE_START(HIMALAYA, "HTC Himalaya") 161 161 .atag_offset = 0x100, 162 162 .map_io = pxa25x_map_io, 163 + .nr_irqs = PXA_NR_IRQS, 163 164 .init_irq = pxa25x_init_irq, 164 165 .handle_irq = pxa25x_handle_irq, 165 166 .init_machine = himalaya_init,
+1
arch/arm/mach-pxa/icontrol.c
··· 193 193 MACHINE_START(ICONTROL, "iControl/SafeTcam boards using Embedian MXM-8x10 CoM") 194 194 .atag_offset = 0x100, 195 195 .map_io = pxa3xx_map_io, 196 + .nr_irqs = PXA_NR_IRQS, 196 197 .init_irq = pxa3xx_init_irq, 197 198 .handle_irq = pxa3xx_handle_irq, 198 199 .timer = &pxa_timer,
+1
arch/arm/mach-pxa/idp.c
··· 195 195 MACHINE_START(PXA_IDP, "Vibren PXA255 IDP") 196 196 /* Maintainer: Vibren Technologies */ 197 197 .map_io = idp_map_io, 198 + .nr_irqs = PXA_NR_IRQS, 198 199 .init_irq = pxa25x_init_irq, 199 200 .handle_irq = pxa25x_handle_irq, 200 201 .timer = &pxa_timer,
+1 -1
arch/arm/mach-pxa/include/mach/irqs.h
··· 100 100 */ 101 101 #define IRQ_BOARD_START (PXA_GPIO_IRQ_BASE + PXA_NR_BUILTIN_GPIO) 102 102 103 - #define NR_IRQS (IRQ_BOARD_START) 103 + #define PXA_NR_IRQS (IRQ_BOARD_START) 104 104 105 105 #ifndef __ASSEMBLY__ 106 106 struct irq_data;
+2
arch/arm/mach-pxa/include/mach/mainstone.h
··· 13 13 #ifndef ASM_ARCH_MAINSTONE_H 14 14 #define ASM_ARCH_MAINSTONE_H 15 15 16 + #include <mach/irqs.h> 17 + 16 18 #define MST_ETH_PHYS PXA_CS4_PHYS 17 19 18 20 #define MST_FPGA_PHYS PXA_CS2_PHYS
+1
arch/arm/mach-pxa/mioa701.c
··· 758 758 .atag_offset = 0x100, 759 759 .restart_mode = 's', 760 760 .map_io = &pxa27x_map_io, 761 + .nr_irqs = PXA_NR_IRQS, 761 762 .init_irq = &pxa27x_init_irq, 762 763 .handle_irq = &pxa27x_handle_irq, 763 764 .init_machine = mioa701_machine_init,
+1
arch/arm/mach-pxa/mp900.c
··· 95 95 .atag_offset = 0x220100, 96 96 .timer = &pxa_timer, 97 97 .map_io = pxa25x_map_io, 98 + .nr_irqs = PXA_NR_IRQS, 98 99 .init_irq = pxa25x_init_irq, 99 100 .handle_irq = pxa25x_handle_irq, 100 101 .init_machine = mp900c_init,
+1
arch/arm/mach-pxa/palmld.c
··· 344 344 MACHINE_START(PALMLD, "Palm LifeDrive") 345 345 .atag_offset = 0x100, 346 346 .map_io = palmld_map_io, 347 + .nr_irqs = PXA_NR_IRQS, 347 348 .init_irq = pxa27x_init_irq, 348 349 .handle_irq = pxa27x_handle_irq, 349 350 .timer = &pxa_timer,
+1
arch/arm/mach-pxa/palmt5.c
··· 205 205 .atag_offset = 0x100, 206 206 .map_io = pxa27x_map_io, 207 207 .reserve = palmt5_reserve, 208 + .nr_irqs = PXA_NR_IRQS, 208 209 .init_irq = pxa27x_init_irq, 209 210 .handle_irq = pxa27x_handle_irq, 210 211 .timer = &pxa_timer,
+1
arch/arm/mach-pxa/palmtc.c
··· 539 539 MACHINE_START(PALMTC, "Palm Tungsten|C") 540 540 .atag_offset = 0x100, 541 541 .map_io = pxa25x_map_io, 542 + .nr_irqs = PXA_NR_IRQS, 542 543 .init_irq = pxa25x_init_irq, 543 544 .handle_irq = pxa25x_handle_irq, 544 545 .timer = &pxa_timer,
+1
arch/arm/mach-pxa/palmte2.c
··· 358 358 MACHINE_START(PALMTE2, "Palm Tungsten|E2") 359 359 .atag_offset = 0x100, 360 360 .map_io = pxa25x_map_io, 361 + .nr_irqs = PXA_NR_IRQS, 361 362 .init_irq = pxa25x_init_irq, 362 363 .handle_irq = pxa25x_handle_irq, 363 364 .timer = &pxa_timer,
+2
arch/arm/mach-pxa/palmtreo.c
··· 448 448 .atag_offset = 0x100, 449 449 .map_io = pxa27x_map_io, 450 450 .reserve = treo_reserve, 451 + .nr_irqs = PXA_NR_IRQS, 451 452 .init_irq = pxa27x_init_irq, 452 453 .handle_irq = pxa27x_handle_irq, 453 454 .timer = &pxa_timer, ··· 462 461 .atag_offset = 0x100, 463 462 .map_io = pxa27x_map_io, 464 463 .reserve = treo_reserve, 464 + .nr_irqs = PXA_NR_IRQS, 465 465 .init_irq = pxa27x_init_irq, 466 466 .handle_irq = pxa27x_handle_irq, 467 467 .timer = &pxa_timer,
+1
arch/arm/mach-pxa/palmtx.c
··· 366 366 MACHINE_START(PALMTX, "Palm T|X") 367 367 .atag_offset = 0x100, 368 368 .map_io = palmtx_map_io, 369 + .nr_irqs = PXA_NR_IRQS, 369 370 .init_irq = pxa27x_init_irq, 370 371 .handle_irq = pxa27x_handle_irq, 371 372 .timer = &pxa_timer,
+1
arch/arm/mach-pxa/palmz72.c
··· 401 401 MACHINE_START(PALMZ72, "Palm Zire72") 402 402 .atag_offset = 0x100, 403 403 .map_io = pxa27x_map_io, 404 + .nr_irqs = PXA_NR_IRQS, 404 405 .init_irq = pxa27x_init_irq, 405 406 .handle_irq = pxa27x_handle_irq, 406 407 .timer = &pxa_timer,
+1
arch/arm/mach-pxa/pxa3xx.c
··· 31 31 #include <mach/pm.h> 32 32 #include <mach/dma.h> 33 33 #include <mach/smemc.h> 34 + #include <mach/irqs.h> 34 35 35 36 #include "generic.h" 36 37 #include "devices.h"
+3
arch/arm/mach-pxa/raumfeld.c
··· 1090 1090 .atag_offset = 0x100, 1091 1091 .init_machine = raumfeld_controller_init, 1092 1092 .map_io = pxa3xx_map_io, 1093 + .nr_irqs = PXA_NR_IRQS, 1093 1094 .init_irq = pxa3xx_init_irq, 1094 1095 .handle_irq = pxa3xx_handle_irq, 1095 1096 .timer = &pxa_timer, ··· 1103 1102 .atag_offset = 0x100, 1104 1103 .init_machine = raumfeld_connector_init, 1105 1104 .map_io = pxa3xx_map_io, 1105 + .nr_irqs = PXA_NR_IRQS, 1106 1106 .init_irq = pxa3xx_init_irq, 1107 1107 .handle_irq = pxa3xx_handle_irq, 1108 1108 .timer = &pxa_timer, ··· 1116 1114 .atag_offset = 0x100, 1117 1115 .init_machine = raumfeld_speaker_init, 1118 1116 .map_io = pxa3xx_map_io, 1117 + .nr_irqs = PXA_NR_IRQS, 1119 1118 .init_irq = pxa3xx_init_irq, 1120 1119 .handle_irq = pxa3xx_handle_irq, 1121 1120 .timer = &pxa_timer,
+1
arch/arm/mach-pxa/saar.c
··· 598 598 /* Maintainer: Eric Miao <eric.miao@marvell.com> */ 599 599 .atag_offset = 0x100, 600 600 .map_io = pxa3xx_map_io, 601 + .nr_irqs = PXA_NR_IRQS, 601 602 .init_irq = pxa3xx_init_irq, 602 603 .handle_irq = pxa3xx_handle_irq, 603 604 .timer = &pxa_timer,
+3
arch/arm/mach-pxa/spitz.c
··· 984 984 .restart_mode = 'g', 985 985 .fixup = spitz_fixup, 986 986 .map_io = pxa27x_map_io, 987 + .nr_irqs = PXA_NR_IRQS, 987 988 .init_irq = pxa27x_init_irq, 988 989 .handle_irq = pxa27x_handle_irq, 989 990 .init_machine = spitz_init, ··· 998 997 .restart_mode = 'g', 999 998 .fixup = spitz_fixup, 1000 999 .map_io = pxa27x_map_io, 1000 + .nr_irqs = PXA_NR_IRQS, 1001 1001 .init_irq = pxa27x_init_irq, 1002 1002 .handle_irq = pxa27x_handle_irq, 1003 1003 .init_machine = spitz_init, ··· 1012 1010 .restart_mode = 'g', 1013 1011 .fixup = spitz_fixup, 1014 1012 .map_io = pxa27x_map_io, 1013 + .nr_irqs = PXA_NR_IRQS, 1015 1014 .init_irq = pxa27x_init_irq, 1016 1015 .handle_irq = pxa27x_handle_irq, 1017 1016 .init_machine = spitz_init,
+1
arch/arm/mach-pxa/stargate2.c
··· 1006 1006 #ifdef CONFIG_MACH_INTELMOTE2 1007 1007 MACHINE_START(INTELMOTE2, "IMOTE 2") 1008 1008 .map_io = pxa27x_map_io, 1009 + .nr_irqs = PXA_NR_IRQS, 1009 1010 .init_irq = pxa27x_init_irq, 1010 1011 .handle_irq = pxa27x_handle_irq, 1011 1012 .timer = &pxa_timer,
+1
arch/arm/mach-pxa/tavorevb.c
··· 491 491 /* Maintainer: Eric Miao <eric.miao@marvell.com> */ 492 492 .atag_offset = 0x100, 493 493 .map_io = pxa3xx_map_io, 494 + .nr_irqs = PXA_NR_IRQS, 494 495 .init_irq = pxa3xx_init_irq, 495 496 .handle_irq = pxa3xx_handle_irq, 496 497 .timer = &pxa_timer,
+1
arch/arm/mach-pxa/time.c
··· 22 22 #include <asm/mach/time.h> 23 23 #include <asm/sched_clock.h> 24 24 #include <mach/regs-ost.h> 25 + #include <mach/irqs.h> 25 26 26 27 /* 27 28 * This is PXA's sched_clock implementation. This has a resolution
+2
arch/arm/mach-pxa/trizeps4.c
··· 558 558 .atag_offset = 0x100, 559 559 .init_machine = trizeps4_init, 560 560 .map_io = trizeps4_map_io, 561 + .nr_irqs = PXA_NR_IRQS, 561 562 .init_irq = pxa27x_init_irq, 562 563 .handle_irq = pxa27x_handle_irq, 563 564 .timer = &pxa_timer, ··· 570 569 .atag_offset = 0x100, 571 570 .init_machine = trizeps4_init, 572 571 .map_io = trizeps4_map_io, 572 + .nr_irqs = PXA_NR_IRQS, 573 573 .init_irq = pxa27x_init_irq, 574 574 .handle_irq = pxa27x_handle_irq, 575 575 .timer = &pxa_timer,
+1
arch/arm/mach-pxa/viper.c
··· 995 995 /* Maintainer: Marc Zyngier <maz@misterjones.org> */ 996 996 .atag_offset = 0x100, 997 997 .map_io = viper_map_io, 998 + .nr_irqs = PXA_NR_IRQS, 998 999 .init_irq = viper_init_irq, 999 1000 .handle_irq = pxa25x_handle_irq, 1000 1001 .timer = &pxa_timer,
+1
arch/arm/mach-pxa/vpac270.c
··· 718 718 MACHINE_START(VPAC270, "Voipac PXA270") 719 719 .atag_offset = 0x100, 720 720 .map_io = pxa27x_map_io, 721 + .nr_irqs = PXA_NR_IRQS, 721 722 .init_irq = pxa27x_init_irq, 722 723 .handle_irq = pxa27x_handle_irq, 723 724 .timer = &pxa_timer,
+1
arch/arm/mach-pxa/xcep.c
··· 182 182 .atag_offset = 0x100, 183 183 .init_machine = xcep_init, 184 184 .map_io = pxa25x_map_io, 185 + .nr_irqs = PXA_NR_IRQS, 185 186 .init_irq = pxa25x_init_irq, 186 187 .handle_irq = pxa25x_handle_irq, 187 188 .timer = &pxa_timer,
+1
arch/arm/mach-pxa/z2.c
··· 721 721 MACHINE_START(ZIPIT2, "Zipit Z2") 722 722 .atag_offset = 0x100, 723 723 .map_io = pxa27x_map_io, 724 + .nr_irqs = PXA_NR_IRQS, 724 725 .init_irq = pxa27x_init_irq, 725 726 .handle_irq = pxa27x_handle_irq, 726 727 .timer = &pxa_timer,
+4
arch/arm/mach-shmobile/Kconfig
··· 100 100 101 101 comment "SH-Mobile System Configuration" 102 102 103 + config CPU_HAS_INTEVT 104 + bool 105 + default y 106 + 103 107 menu "Memory configuration" 104 108 105 109 config MEMORY_START
+1
arch/arm/mach-shmobile/board-ag5evm.c
··· 43 43 #include <video/sh_mipi_dsi.h> 44 44 #include <sound/sh_fsi.h> 45 45 #include <mach/hardware.h> 46 + #include <mach/irqs.h> 46 47 #include <mach/sh73a0.h> 47 48 #include <mach/common.h> 48 49 #include <asm/mach-types.h>
+1
arch/arm/mach-shmobile/board-bonito.c
··· 35 35 #include <asm/mach/time.h> 36 36 #include <asm/hardware/cache-l2x0.h> 37 37 #include <mach/r8a7740.h> 38 + #include <mach/irqs.h> 38 39 #include <video/sh_mobile_lcdc.h> 39 40 40 41 /*
+1
arch/arm/mach-shmobile/board-g3evm.c
··· 33 33 #include <linux/input.h> 34 34 #include <linux/input/sh_keysc.h> 35 35 #include <linux/dma-mapping.h> 36 + #include <mach/irqs.h> 36 37 #include <mach/sh7367.h> 37 38 #include <mach/common.h> 38 39 #include <asm/mach-types.h>
+1
arch/arm/mach-shmobile/board-g4evm.c
··· 34 34 #include <linux/mmc/sh_mobile_sdhi.h> 35 35 #include <linux/gpio.h> 36 36 #include <linux/dma-mapping.h> 37 + #include <mach/irqs.h> 37 38 #include <mach/sh7377.h> 38 39 #include <mach/common.h> 39 40 #include <asm/mach-types.h>
+1
arch/arm/mach-shmobile/board-kota2.c
··· 39 39 #include <linux/mfd/tmio.h> 40 40 #include <linux/mmc/sh_mobile_sdhi.h> 41 41 #include <mach/hardware.h> 42 + #include <mach/irqs.h> 42 43 #include <mach/sh73a0.h> 43 44 #include <mach/common.h> 44 45 #include <asm/mach-types.h>
+1
arch/arm/mach-shmobile/board-mackerel.c
··· 54 54 #include <sound/sh_fsi.h> 55 55 56 56 #include <mach/common.h> 57 + #include <mach/irqs.h> 57 58 #include <mach/sh7372.h> 58 59 59 60 #include <asm/mach/arch.h>
+1
arch/arm/mach-shmobile/board-marzen.c
··· 31 31 #include <mach/hardware.h> 32 32 #include <mach/r8a7779.h> 33 33 #include <mach/common.h> 34 + #include <mach/irqs.h> 34 35 #include <asm/mach-types.h> 35 36 #include <asm/mach/arch.h> 36 37 #include <asm/hardware/gic.h>
+1 -5
arch/arm/mach-shmobile/include/mach/irqs.h
··· 1 1 #ifndef __ASM_MACH_IRQS_H 2 2 #define __ASM_MACH_IRQS_H 3 3 4 - #define NR_IRQS 1024 4 + #include <linux/sh_intc.h> 5 5 6 6 /* GIC */ 7 7 #define gic_spi(nr) ((nr) + 32) 8 - 9 - /* INTCA */ 10 - #define evt2irq(evt) (((evt) >> 5) - 16) 11 - #define irq2evt(irq) (((irq) + 16) << 5) 12 8 13 9 /* INTCS */ 14 10 #define INTCS_VECT_BASE 0x2200
+1
arch/arm/mach-shmobile/intc-r8a7740.c
··· 25 25 #include <linux/io.h> 26 26 #include <linux/sh_intc.h> 27 27 #include <mach/intc.h> 28 + #include <mach/irqs.h> 28 29 #include <asm/mach-types.h> 29 30 #include <asm/mach/arch.h> 30 31
+1
arch/arm/mach-shmobile/intc-sh7367.c
··· 23 23 #include <linux/io.h> 24 24 #include <linux/sh_intc.h> 25 25 #include <mach/intc.h> 26 + #include <mach/irqs.h> 26 27 #include <asm/mach-types.h> 27 28 #include <asm/mach/arch.h> 28 29
+1
arch/arm/mach-shmobile/intc-sh7372.c
··· 23 23 #include <linux/io.h> 24 24 #include <linux/sh_intc.h> 25 25 #include <mach/intc.h> 26 + #include <mach/irqs.h> 26 27 #include <asm/mach-types.h> 27 28 #include <asm/mach/arch.h> 28 29
+1
arch/arm/mach-shmobile/intc-sh7377.c
··· 23 23 #include <linux/io.h> 24 24 #include <linux/sh_intc.h> 25 25 #include <mach/intc.h> 26 + #include <mach/irqs.h> 26 27 #include <asm/mach-types.h> 27 28 #include <asm/mach/arch.h> 28 29
+1
arch/arm/mach-shmobile/intc-sh73a0.c
··· 24 24 #include <linux/io.h> 25 25 #include <linux/sh_intc.h> 26 26 #include <mach/intc.h> 27 + #include <mach/irqs.h> 27 28 #include <mach/sh73a0.h> 28 29 #include <asm/hardware/gic.h> 29 30 #include <asm/mach-types.h>
+1
arch/arm/mach-shmobile/setup-r8a7740.c
··· 26 26 #include <linux/sh_timer.h> 27 27 #include <mach/r8a7740.h> 28 28 #include <mach/common.h> 29 + #include <mach/irqs.h> 29 30 #include <asm/mach-types.h> 30 31 #include <asm/mach/map.h> 31 32 #include <asm/mach/arch.h>
+1
arch/arm/mach-shmobile/setup-r8a7779.c
··· 29 29 #include <linux/sh_intc.h> 30 30 #include <linux/sh_timer.h> 31 31 #include <mach/hardware.h> 32 + #include <mach/irqs.h> 32 33 #include <mach/r8a7779.h> 33 34 #include <mach/common.h> 34 35 #include <asm/mach-types.h>
+1
arch/arm/mach-shmobile/setup-sh7367.c
··· 30 30 #include <linux/sh_timer.h> 31 31 #include <mach/hardware.h> 32 32 #include <mach/common.h> 33 + #include <mach/irqs.h> 33 34 #include <asm/mach-types.h> 34 35 #include <asm/mach/arch.h> 35 36 #include <asm/mach/map.h>
+1
arch/arm/mach-shmobile/setup-sh7372.c
··· 33 33 #include <linux/pm_domain.h> 34 34 #include <linux/dma-mapping.h> 35 35 #include <mach/hardware.h> 36 + #include <mach/irqs.h> 36 37 #include <mach/sh7372.h> 37 38 #include <mach/common.h> 38 39 #include <asm/mach/map.h>
+1
arch/arm/mach-shmobile/setup-sh7377.c
··· 32 32 #include <mach/hardware.h> 33 33 #include <mach/common.h> 34 34 #include <asm/mach/map.h> 35 + #include <mach/irqs.h> 35 36 #include <asm/mach-types.h> 36 37 #include <asm/mach/arch.h> 37 38 #include <asm/mach/time.h>
+1
arch/arm/mach-shmobile/setup-sh73a0.c
··· 31 31 #include <linux/sh_intc.h> 32 32 #include <linux/sh_timer.h> 33 33 #include <mach/hardware.h> 34 + #include <mach/irqs.h> 34 35 #include <mach/sh73a0.h> 35 36 #include <mach/common.h> 36 37 #include <asm/mach-types.h>
-1
arch/arm/mach-vexpress/include/mach/io.h
··· 20 20 #ifndef __ASM_ARM_ARCH_IO_H 21 21 #define __ASM_ARM_ARCH_IO_H 22 22 23 - #define __io(a) __typesafe_io(a) 24 23 #define __mem_pci(a) (a) 25 24 26 25 #endif
+11 -11
arch/arm/mm/cache-l2x0.c
··· 30 30 31 31 static void __iomem *l2x0_base; 32 32 static DEFINE_RAW_SPINLOCK(l2x0_lock); 33 - static uint32_t l2x0_way_mask; /* Bitmask of active ways */ 34 - static uint32_t l2x0_size; 33 + static u32 l2x0_way_mask; /* Bitmask of active ways */ 34 + static u32 l2x0_size; 35 35 36 36 struct l2x0_regs l2x0_saved_regs; 37 37 38 38 struct l2x0_of_data { 39 - void (*setup)(const struct device_node *, __u32 *, __u32 *); 39 + void (*setup)(const struct device_node *, u32 *, u32 *); 40 40 void (*save)(void); 41 41 void (*resume)(void); 42 42 }; ··· 288 288 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 289 289 } 290 290 291 - static void l2x0_unlock(__u32 cache_id) 291 + static void l2x0_unlock(u32 cache_id) 292 292 { 293 293 int lockregs; 294 294 int i; ··· 307 307 } 308 308 } 309 309 310 - void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) 310 + void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) 311 311 { 312 - __u32 aux; 313 - __u32 cache_id; 314 - __u32 way_size = 0; 312 + u32 aux; 313 + u32 cache_id; 314 + u32 way_size = 0; 315 315 int ways; 316 316 const char *type; 317 317 ··· 388 388 389 389 #ifdef CONFIG_OF 390 390 static void __init l2x0_of_setup(const struct device_node *np, 391 - __u32 *aux_val, __u32 *aux_mask) 391 + u32 *aux_val, u32 *aux_mask) 392 392 { 393 393 u32 data[2] = { 0, 0 }; 394 394 u32 tag = 0; ··· 422 422 } 423 423 424 424 static void __init pl310_of_setup(const struct device_node *np, 425 - __u32 *aux_val, __u32 *aux_mask) 425 + u32 *aux_val, u32 *aux_mask) 426 426 { 427 427 u32 data[3] = { 0, 0, 0 }; 428 428 u32 tag[3] = { 0, 0, 0 }; ··· 548 548 {} 549 549 }; 550 550 551 - int __init l2x0_of_init(__u32 aux_val, __u32 aux_mask) 551 + int __init l2x0_of_init(u32 aux_val, u32 aux_mask) 552 552 { 553 553 struct device_node *np; 554 554 struct l2x0_of_data *data;
+2 -7
arch/arm/mm/copypage-v4mc.c
··· 23 23 24 24 #include "mm.h" 25 25 26 - /* 27 - * 0xffff8000 to 0xffffffff is reserved for any ARM architecture 28 - * specific hacks for copying pages efficiently. 29 - */ 30 26 #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ 31 27 L_PTE_MT_MINICACHE) 32 28 ··· 74 78 75 79 raw_spin_lock(&minicache_lock); 76 80 77 - set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); 78 - flush_tlb_kernel_page(0xffff8000); 81 + set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot)); 79 82 80 - mc_copy_user_page((void *)0xffff8000, kto); 83 + mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); 81 84 82 85 raw_spin_unlock(&minicache_lock); 83 86
+6 -14
arch/arm/mm/copypage-v6.c
··· 24 24 #error FIX ME 25 25 #endif 26 26 27 - #define from_address (0xffff8000) 28 - #define to_address (0xffffc000) 29 - 30 27 static DEFINE_RAW_SPINLOCK(v6_lock); 31 28 32 29 /* ··· 87 90 */ 88 91 raw_spin_lock(&v6_lock); 89 92 90 - set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); 91 - set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); 93 + kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT); 94 + kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT); 92 95 93 - kfrom = from_address + (offset << PAGE_SHIFT); 94 - kto = to_address + (offset << PAGE_SHIFT); 95 - 96 - flush_tlb_kernel_page(kfrom); 97 - flush_tlb_kernel_page(kto); 96 + set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL)); 97 + set_top_pte(kto, mk_pte(to, PAGE_KERNEL)); 98 98 99 99 copy_page((void *)kto, (void *)kfrom); 100 100 ··· 105 111 */ 106 112 static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) 107 113 { 108 - unsigned int offset = CACHE_COLOUR(vaddr); 109 - unsigned long to = to_address + (offset << PAGE_SHIFT); 114 + unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); 110 115 111 116 /* FIXME: not highmem safe */ 112 117 discard_old_kernel_data(page_address(page)); ··· 116 123 */ 117 124 raw_spin_lock(&v6_lock); 118 125 119 - set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); 120 - flush_tlb_kernel_page(to); 126 + set_top_pte(to, mk_pte(page, PAGE_KERNEL)); 121 127 clear_page((void *)to); 122 128 123 129 raw_spin_unlock(&v6_lock);
+1 -8
arch/arm/mm/copypage-xscale.c
··· 23 23 24 24 #include "mm.h" 25 25 26 - /* 27 - * 0xffff8000 to 0xffffffff is reserved for any ARM architecture 28 - * specific hacks for copying pages efficiently. 29 - */ 30 - #define COPYPAGE_MINICACHE 0xffff8000 31 - 32 26 #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ 33 27 L_PTE_MT_MINICACHE) 34 28 ··· 94 100 95 101 raw_spin_lock(&minicache_lock); 96 102 97 - set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); 98 - flush_tlb_kernel_page(COPYPAGE_MINICACHE); 103 + set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot)); 99 104 100 105 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); 101 106
+13 -7
arch/arm/mm/dma-mapping.c
··· 214 214 core_initcall(consistent_init); 215 215 216 216 static void * 217 - __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) 217 + __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, 218 + const void *caller) 218 219 { 219 220 struct arm_vmregion *c; 220 221 size_t align; ··· 242 241 * Allocate a virtual address in the consistent mapping region. 243 242 */ 244 243 c = arm_vmregion_alloc(&consistent_head, align, size, 245 - gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); 244 + gfp & ~(__GFP_DMA | __GFP_HIGHMEM), caller); 246 245 if (c) { 247 246 pte_t *pte; 248 247 int idx = CONSISTENT_PTE_INDEX(c->vm_start); ··· 321 320 322 321 #else /* !CONFIG_MMU */ 323 322 324 - #define __dma_alloc_remap(page, size, gfp, prot) page_address(page) 323 + #define __dma_alloc_remap(page, size, gfp, prot, c) page_address(page) 325 324 #define __dma_free_remap(addr, size) do { } while (0) 326 325 327 326 #endif /* CONFIG_MMU */ 328 327 329 328 static void * 330 329 __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, 331 - pgprot_t prot) 330 + pgprot_t prot, const void *caller) 332 331 { 333 332 struct page *page; 334 333 void *addr; ··· 350 349 return NULL; 351 350 352 351 if (!arch_is_coherent()) 353 - addr = __dma_alloc_remap(page, size, gfp, prot); 352 + addr = __dma_alloc_remap(page, size, gfp, prot, caller); 354 353 else 355 354 addr = page_address(page); 356 355 ··· 375 374 return memory; 376 375 377 376 return __dma_alloc(dev, size, handle, gfp, 378 - pgprot_dmacoherent(pgprot_kernel)); 377 + pgprot_dmacoherent(pgprot_kernel), 378 + __builtin_return_address(0)); 379 379 } 380 380 EXPORT_SYMBOL(dma_alloc_coherent); 381 381 ··· 388 386 dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) 389 387 { 390 388 return __dma_alloc(dev, size, handle, gfp, 391 - pgprot_writecombine(pgprot_kernel)); 389 + pgprot_writecombine(pgprot_kernel), 390 + __builtin_return_address(0)); 392 391 } 393 392 EXPORT_SYMBOL(dma_alloc_writecombine); 394 393 ··· 726 723 727 724 static int __init dma_debug_do_init(void) 728 725 { 726 + #ifdef CONFIG_MMU 727 + arm_vmregion_create_proc("dma-mappings", &consistent_head); 728 + #endif 729 729 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 730 730 return 0; 731 731 }
+2 -1
arch/arm/mm/fault.c
··· 165 165 struct siginfo si; 166 166 167 167 #ifdef CONFIG_DEBUG_USER 168 - if (user_debug & UDBG_SEGV) { 168 + if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) || 169 + ((user_debug & UDBG_BUS) && (sig == SIGBUS))) { 169 170 printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n", 170 171 tsk->comm, sig, addr, fsr); 171 172 show_pte(tsk->mm, addr);
+5 -9
arch/arm/mm/flush.c
··· 22 22 23 23 #ifdef CONFIG_CPU_CACHE_VIPT 24 24 25 - #define ALIAS_FLUSH_START 0xffff4000 26 - 27 25 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) 28 26 { 29 - unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); 27 + unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); 30 28 const int zero = 0; 31 29 32 - set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0); 33 - flush_tlb_kernel_page(to); 30 + set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL)); 34 31 35 32 asm( "mcrr p15, 0, %1, %0, c14\n" 36 33 " mcr p15, 0, %2, c7, c10, 4" ··· 38 41 39 42 static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) 40 43 { 41 - unsigned long colour = CACHE_COLOUR(vaddr); 44 + unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); 42 45 unsigned long offset = vaddr & (PAGE_SIZE - 1); 43 46 unsigned long to; 44 47 45 - set_pte_ext(TOP_PTE(ALIAS_FLUSH_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0); 46 - to = ALIAS_FLUSH_START + (colour << PAGE_SHIFT) + offset; 47 - flush_tlb_kernel_page(to); 48 + set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL)); 49 + to = va + offset; 48 50 flush_icache_range(to, to + len); 49 51 } 50 52
+8 -13
arch/arm/mm/highmem.c
··· 69 69 * With debugging enabled, kunmap_atomic forces that entry to 0. 70 70 * Make sure it was indeed properly unmapped. 71 71 */ 72 - BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); 72 + BUG_ON(!pte_none(get_top_pte(vaddr))); 73 73 #endif 74 - set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0); 75 74 /* 76 75 * When debugging is off, kunmap_atomic leaves the previous mapping 77 - * in place, so this TLB flush ensures the TLB is updated with the 78 - * new mapping. 76 + * in place, so the contained TLB flush ensures the TLB is updated 77 + * with the new mapping. 79 78 */ 80 - local_flush_tlb_kernel_page(vaddr); 79 + set_top_pte(vaddr, mk_pte(page, kmap_prot)); 81 80 82 81 return (void *)vaddr; 83 82 } ··· 95 96 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); 96 97 #ifdef CONFIG_DEBUG_HIGHMEM 97 98 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 98 - set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); 99 - local_flush_tlb_kernel_page(vaddr); 99 + set_top_pte(vaddr, __pte(0)); 100 100 #else 101 101 (void) idx; /* to kill a warning */ 102 102 #endif ··· 119 121 idx = type + KM_TYPE_NR * smp_processor_id(); 120 122 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 121 123 #ifdef CONFIG_DEBUG_HIGHMEM 122 - BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); 124 + BUG_ON(!pte_none(get_top_pte(vaddr))); 123 125 #endif 124 - set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0); 125 - local_flush_tlb_kernel_page(vaddr); 126 + set_top_pte(vaddr, pfn_pte(pfn, kmap_prot)); 126 127 127 128 return (void *)vaddr; 128 129 } ··· 129 132 struct page *kmap_atomic_to_page(const void *ptr) 130 133 { 131 134 unsigned long vaddr = (unsigned long)ptr; 132 - pte_t *pte; 133 135 134 136 if (vaddr < FIXADDR_START) 135 137 return virt_to_page(ptr); 136 138 137 - pte = TOP_PTE(vaddr); 138 - return pte_page(*pte); 139 + return pte_page(get_top_pte(vaddr)); 139 140 }
+4
arch/arm/mm/init.c
··· 658 658 #ifdef CONFIG_HIGHMEM 659 659 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" 660 660 #endif 661 + #ifdef CONFIG_MODULES 661 662 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" 663 + #endif 662 664 " .text : 0x%p" " - 0x%p" " (%4d kB)\n" 663 665 " .init : 0x%p" " - 0x%p" " (%4d kB)\n" 664 666 " .data : 0x%p" " - 0x%p" " (%4d kB)\n" ··· 679 677 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * 680 678 (PAGE_SIZE)), 681 679 #endif 680 + #ifdef CONFIG_MODULES 682 681 MLM(MODULES_VADDR, MODULES_END), 682 + #endif 683 683 684 684 MLK_ROUNDUP(_text, _etext), 685 685 MLK_ROUNDUP(__init_begin, __init_end),
+25 -1
arch/arm/mm/mm.h
··· 3 3 /* the upper-most page table pointer */ 4 4 extern pmd_t *top_pmd; 5 5 6 - #define TOP_PTE(x) pte_offset_kernel(top_pmd, x) 6 + /* 7 + * 0xffff8000 to 0xffffffff is reserved for any ARM architecture 8 + * specific hacks for copying pages efficiently, while 0xffff4000 9 + * is reserved for VIPT aliasing flushing by generic code. 10 + * 11 + * Note that we don't allow VIPT aliasing caches with SMP. 12 + */ 13 + #define COPYPAGE_MINICACHE 0xffff8000 14 + #define COPYPAGE_V6_FROM 0xffff8000 15 + #define COPYPAGE_V6_TO 0xffffc000 16 + /* PFN alias flushing, for VIPT caches */ 17 + #define FLUSH_ALIAS_START 0xffff4000 18 + 19 + static inline void set_top_pte(unsigned long va, pte_t pte) 20 + { 21 + pte_t *ptep = pte_offset_kernel(top_pmd, va); 22 + set_pte_ext(ptep, pte, 0); 23 + local_flush_tlb_kernel_page(va); 24 + } 25 + 26 + static inline pte_t get_top_pte(unsigned long va) 27 + { 28 + pte_t *ptep = pte_offset_kernel(top_pmd, va); 29 + return *ptep; 30 + } 7 31 8 32 static inline pmd_t *pmd_off_k(unsigned long virt) 9 33 {
+5 -2
arch/arm/mm/mmu.c
··· 999 999 { 1000 1000 struct map_desc map; 1001 1001 unsigned long addr; 1002 + void *vectors; 1002 1003 1003 1004 /* 1004 1005 * Allocate the vector page early. 1005 1006 */ 1006 - vectors_page = early_alloc(PAGE_SIZE); 1007 + vectors = early_alloc(PAGE_SIZE); 1008 + 1009 + early_trap_init(vectors); 1007 1010 1008 1011 for (addr = VMALLOC_START; addr; addr += PMD_SIZE) 1009 1012 pmd_clear(pmd_off_k(addr)); ··· 1046 1043 * location (0xffff0000). If we aren't using high-vectors, also 1047 1044 * create a mapping at the low-vectors virtual address. 1048 1045 */ 1049 - map.pfn = __phys_to_pfn(virt_to_phys(vectors_page)); 1046 + map.pfn = __phys_to_pfn(virt_to_phys(vectors)); 1050 1047 map.virtual = 0xffff0000; 1051 1048 map.length = PAGE_SIZE; 1052 1049 map.type = MT_HIGH_VECTORS;
+75 -1
arch/arm/mm/vmregion.c
··· 1 + #include <linux/fs.h> 1 2 #include <linux/spinlock.h> 2 3 #include <linux/list.h> 4 + #include <linux/proc_fs.h> 5 + #include <linux/seq_file.h> 3 6 #include <linux/slab.h> 4 7 5 8 #include "vmregion.h" ··· 39 36 40 37 struct arm_vmregion * 41 38 arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, 42 - size_t size, gfp_t gfp) 39 + size_t size, gfp_t gfp, const void *caller) 43 40 { 44 41 unsigned long start = head->vm_start, addr = head->vm_end; 45 42 unsigned long flags; ··· 54 51 new = kmalloc(sizeof(struct arm_vmregion), gfp); 55 52 if (!new) 56 53 goto out; 54 + 55 + new->caller = caller; 57 56 58 57 spin_lock_irqsave(&head->vm_lock, flags); 59 58 ··· 134 129 135 130 kfree(c); 136 131 } 132 + 133 + #ifdef CONFIG_PROC_FS 134 + static int arm_vmregion_show(struct seq_file *m, void *p) 135 + { 136 + struct arm_vmregion *c = list_entry(p, struct arm_vmregion, vm_list); 137 + 138 + seq_printf(m, "0x%08lx-0x%08lx %7lu", c->vm_start, c->vm_end, 139 + c->vm_end - c->vm_start); 140 + if (c->caller) 141 + seq_printf(m, " %pS", (void *)c->caller); 142 + seq_putc(m, '\n'); 143 + return 0; 144 + } 145 + 146 + static void *arm_vmregion_start(struct seq_file *m, loff_t *pos) 147 + { 148 + struct arm_vmregion_head *h = m->private; 149 + spin_lock_irq(&h->vm_lock); 150 + return seq_list_start(&h->vm_list, *pos); 151 + } 152 + 153 + static void *arm_vmregion_next(struct seq_file *m, void *p, loff_t *pos) 154 + { 155 + struct arm_vmregion_head *h = m->private; 156 + return seq_list_next(p, &h->vm_list, pos); 157 + } 158 + 159 + static void arm_vmregion_stop(struct seq_file *m, void *p) 160 + { 161 + struct arm_vmregion_head *h = m->private; 162 + spin_unlock_irq(&h->vm_lock); 163 + } 164 + 165 + static const struct seq_operations arm_vmregion_ops = { 166 + .start = arm_vmregion_start, 167 + .stop = arm_vmregion_stop, 168 + .next = arm_vmregion_next, 169 + .show = arm_vmregion_show, 170 + }; 171 + 172 + static int arm_vmregion_open(struct inode *inode, struct file *file) 173 + { 174 + struct arm_vmregion_head *h = PDE(inode)->data; 175 + int ret = seq_open(file, &arm_vmregion_ops); 176 + if (!ret) { 177 + struct seq_file *m = file->private_data; 178 + m->private = h; 179 + } 180 + return ret; 181 + } 182 + 183 + static const struct file_operations arm_vmregion_fops = { 184 + .open = arm_vmregion_open, 185 + .read = seq_read, 186 + .llseek = seq_lseek, 187 + .release = seq_release, 188 + }; 189 + 190 + int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h) 191 + { 192 + proc_create_data(path, S_IRUSR, NULL, &arm_vmregion_fops, h); 193 + return 0; 194 + } 195 + #else 196 + int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h) 197 + { 198 + return 0; 199 + } 200 + #endif
+4 -1
arch/arm/mm/vmregion.h
··· 19 19 unsigned long vm_end; 20 20 struct page *vm_pages; 21 21 int vm_active; 22 + const void *caller; 22 23 }; 23 24 24 - struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t); 25 + struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t, const void *); 25 26 struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long); 26 27 struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long); 27 28 void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *); 29 + 30 + int arm_vmregion_create_proc(const char *, struct arm_vmregion_head *); 28 31 29 32 #endif
+3
arch/arm/net/Makefile
··· 1 + # ARM-specific networking code 2 + 3 + obj-$(CONFIG_BPF_JIT) += bpf_jit_32.o
+915
arch/arm/net/bpf_jit_32.c
··· 1 + /* 2 + * Just-In-Time compiler for BPF filters on 32bit ARM 3 + * 4 + * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com> 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms of the GNU General Public License as published by the 8 + * Free Software Foundation; version 2 of the License. 9 + */ 10 + 11 + #include <linux/bitops.h> 12 + #include <linux/compiler.h> 13 + #include <linux/errno.h> 14 + #include <linux/filter.h> 15 + #include <linux/moduleloader.h> 16 + #include <linux/netdevice.h> 17 + #include <linux/string.h> 18 + #include <linux/slab.h> 19 + #include <asm/cacheflush.h> 20 + #include <asm/hwcap.h> 21 + 22 + #include "bpf_jit_32.h" 23 + 24 + /* 25 + * ABI: 26 + * 27 + * r0 scratch register 28 + * r4 BPF register A 29 + * r5 BPF register X 30 + * r6 pointer to the skb 31 + * r7 skb->data 32 + * r8 skb_headlen(skb) 33 + */ 34 + 35 + #define r_scratch ARM_R0 36 + /* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */ 37 + #define r_off ARM_R1 38 + #define r_A ARM_R4 39 + #define r_X ARM_R5 40 + #define r_skb ARM_R6 41 + #define r_skb_data ARM_R7 42 + #define r_skb_hl ARM_R8 43 + 44 + #define SCRATCH_SP_OFFSET 0 45 + #define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + (k)) 46 + 47 + #define SEEN_MEM ((1 << BPF_MEMWORDS) - 1) 48 + #define SEEN_MEM_WORD(k) (1 << (k)) 49 + #define SEEN_X (1 << BPF_MEMWORDS) 50 + #define SEEN_CALL (1 << (BPF_MEMWORDS + 1)) 51 + #define SEEN_SKB (1 << (BPF_MEMWORDS + 2)) 52 + #define SEEN_DATA (1 << (BPF_MEMWORDS + 3)) 53 + 54 + #define FLAG_NEED_X_RESET (1 << 0) 55 + 56 + struct jit_ctx { 57 + const struct sk_filter *skf; 58 + unsigned idx; 59 + unsigned prologue_bytes; 60 + int ret0_fp_idx; 61 + u32 seen; 62 + u32 flags; 63 + u32 *offsets; 64 + u32 *target; 65 + #if __LINUX_ARM_ARCH__ < 7 66 + u16 epilogue_bytes; 67 + u16 imm_count; 68 + u32 *imms; 69 + #endif 70 + }; 71 + 72 + int bpf_jit_enable __read_mostly; 73 + 74 + static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset) 75 + { 76 + u8 ret; 77 + int err; 78 + 79 + err = skb_copy_bits(skb, offset, &ret, 1); 80 + 81 + return (u64)err << 32 | ret; 82 + } 83 + 84 + static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset) 85 + { 86 + u16 ret; 87 + int err; 88 + 89 + err = skb_copy_bits(skb, offset, &ret, 2); 90 + 91 + return (u64)err << 32 | ntohs(ret); 92 + } 93 + 94 + static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset) 95 + { 96 + u32 ret; 97 + int err; 98 + 99 + err = skb_copy_bits(skb, offset, &ret, 4); 100 + 101 + return (u64)err << 32 | ntohl(ret); 102 + } 103 + 104 + /* 105 + * Wrapper that handles both OABI and EABI and assures Thumb2 interworking 106 + * (where the assembly routines like __aeabi_uidiv could cause problems). 107 + */ 108 + static u32 jit_udiv(u32 dividend, u32 divisor) 109 + { 110 + return dividend / divisor; 111 + } 112 + 113 + static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx) 114 + { 115 + if (ctx->target != NULL) 116 + ctx->target[ctx->idx] = inst | (cond << 28); 117 + 118 + ctx->idx++; 119 + } 120 + 121 + /* 122 + * Emit an instruction that will be executed unconditionally. 123 + */ 124 + static inline void emit(u32 inst, struct jit_ctx *ctx) 125 + { 126 + _emit(ARM_COND_AL, inst, ctx); 127 + } 128 + 129 + static u16 saved_regs(struct jit_ctx *ctx) 130 + { 131 + u16 ret = 0; 132 + 133 + if ((ctx->skf->len > 1) || 134 + (ctx->skf->insns[0].code == BPF_S_RET_A)) 135 + ret |= 1 << r_A; 136 + 137 + #ifdef CONFIG_FRAME_POINTER 138 + ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC); 139 + #else 140 + if (ctx->seen & SEEN_CALL) 141 + ret |= 1 << ARM_LR; 142 + #endif 143 + if (ctx->seen & (SEEN_DATA | SEEN_SKB)) 144 + ret |= 1 << r_skb; 145 + if (ctx->seen & SEEN_DATA) 146 + ret |= (1 << r_skb_data) | (1 << r_skb_hl); 147 + if (ctx->seen & SEEN_X) 148 + ret |= 1 << r_X; 149 + 150 + return ret; 151 + } 152 + 153 + static inline int mem_words_used(struct jit_ctx *ctx) 154 + { 155 + /* yes, we do waste some stack space IF there are "holes" in the set" */ 156 + return fls(ctx->seen & SEEN_MEM); 157 + } 158 + 159 + static inline bool is_load_to_a(u16 inst) 160 + { 161 + switch (inst) { 162 + case BPF_S_LD_W_LEN: 163 + case BPF_S_LD_W_ABS: 164 + case BPF_S_LD_H_ABS: 165 + case BPF_S_LD_B_ABS: 166 + case BPF_S_ANC_CPU: 167 + case BPF_S_ANC_IFINDEX: 168 + case BPF_S_ANC_MARK: 169 + case BPF_S_ANC_PROTOCOL: 170 + case BPF_S_ANC_RXHASH: 171 + case BPF_S_ANC_QUEUE: 172 + return true; 173 + default: 174 + return false; 175 + } 176 + } 177 + 178 + static void build_prologue(struct jit_ctx *ctx) 179 + { 180 + u16 reg_set = saved_regs(ctx); 181 + u16 first_inst = ctx->skf->insns[0].code; 182 + u16 off; 183 + 184 + #ifdef CONFIG_FRAME_POINTER 185 + emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx); 186 + emit(ARM_PUSH(reg_set), ctx); 187 + emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx); 188 + #else 189 + if (reg_set) 190 + emit(ARM_PUSH(reg_set), ctx); 191 + #endif 192 + 193 + if (ctx->seen & (SEEN_DATA | SEEN_SKB)) 194 + emit(ARM_MOV_R(r_skb, ARM_R0), ctx); 195 + 196 + if (ctx->seen & SEEN_DATA) { 197 + off = offsetof(struct sk_buff, data); 198 + emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx); 199 + /* headlen = len - data_len */ 200 + off = offsetof(struct sk_buff, len); 201 + emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx); 202 + off = offsetof(struct sk_buff, data_len); 203 + emit(ARM_LDR_I(r_scratch, r_skb, off), ctx); 204 + emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx); 205 + } 206 + 207 + if (ctx->flags & FLAG_NEED_X_RESET) 208 + emit(ARM_MOV_I(r_X, 0), ctx); 209 + 210 + /* do not leak kernel data to userspace */ 211 + if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst))) 212 + emit(ARM_MOV_I(r_A, 0), ctx); 213 + 214 + /* stack space for the BPF_MEM words */ 215 + if (ctx->seen & SEEN_MEM) 216 + emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx); 217 + } 218 + 219 + static void build_epilogue(struct jit_ctx *ctx) 220 + { 221 + u16 reg_set = saved_regs(ctx); 222 + 223 + if (ctx->seen & SEEN_MEM) 224 + emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx); 225 + 226 + reg_set &= ~(1 << ARM_LR); 227 + 228 + #ifdef CONFIG_FRAME_POINTER 229 + /* the first instruction of the prologue was: mov ip, sp */ 230 + reg_set &= ~(1 << ARM_IP); 231 + reg_set |= (1 << ARM_SP); 232 + emit(ARM_LDM(ARM_SP, reg_set), ctx); 233 + #else 234 + if (reg_set) { 235 + if (ctx->seen & SEEN_CALL) 236 + reg_set |= 1 << ARM_PC; 237 + emit(ARM_POP(reg_set), ctx); 238 + } 239 + 240 + if (!(ctx->seen & SEEN_CALL)) 241 + emit(ARM_BX(ARM_LR), ctx); 242 + #endif 243 + } 244 + 245 + static int16_t imm8m(u32 x) 246 + { 247 + u32 rot; 248 + 249 + for (rot = 0; rot < 16; rot++) 250 + if ((x & ~ror32(0xff, 2 * rot)) == 0) 251 + return rol32(x, 2 * rot) | (rot << 8); 252 + 253 + return -1; 254 + } 255 + 256 + #if __LINUX_ARM_ARCH__ < 7 257 + 258 + static u16 imm_offset(u32 k, struct jit_ctx *ctx) 259 + { 260 + unsigned i = 0, offset; 261 + u16 imm; 262 + 263 + /* on the "fake" run we just count them (duplicates included) */ 264 + if (ctx->target == NULL) { 265 + ctx->imm_count++; 266 + return 0; 267 + } 268 + 269 + while ((i < ctx->imm_count) && ctx->imms[i]) { 270 + if (ctx->imms[i] == k) 271 + break; 272 + i++; 273 + } 274 + 275 + if (ctx->imms[i] == 0) 276 + ctx->imms[i] = k; 277 + 278 + /* constants go just after the epilogue */ 279 + offset = ctx->offsets[ctx->skf->len]; 280 + offset += ctx->prologue_bytes; 281 + offset += ctx->epilogue_bytes; 282 + offset += i * 4; 283 + 284 + ctx->target[offset / 4] = k; 285 + 286 + /* PC in ARM mode == address of the instruction + 8 */ 287 + imm = offset - (8 + ctx->idx * 4); 288 + 289 + return imm; 290 + } 291 + 292 + #endif /* __LINUX_ARM_ARCH__ */ 293 + 294 + /* 295 + * Move an immediate that's not an imm8m to a core register. 296 + */ 297 + static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx) 298 + { 299 + #if __LINUX_ARM_ARCH__ < 7 300 + emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx); 301 + #else 302 + emit(ARM_MOVW(rd, val & 0xffff), ctx); 303 + if (val > 0xffff) 304 + emit(ARM_MOVT(rd, val >> 16), ctx); 305 + #endif 306 + } 307 + 308 + static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx) 309 + { 310 + int imm12 = imm8m(val); 311 + 312 + if (imm12 >= 0) 313 + emit(ARM_MOV_I(rd, imm12), ctx); 314 + else 315 + emit_mov_i_no8m(rd, val, ctx); 316 + } 317 + 318 + #if __LINUX_ARM_ARCH__ < 6 319 + 320 + static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) 321 + { 322 + _emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx); 323 + _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx); 324 + _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx); 325 + _emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx); 326 + _emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx); 327 + _emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx); 328 + _emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx); 329 + _emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx); 330 + } 331 + 332 + static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) 333 + { 334 + _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx); 335 + _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx); 336 + _emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx); 337 + } 338 + 339 + static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx) 340 + { 341 + emit(ARM_LSL_R(ARM_R1, r_src, 8), ctx); 342 + emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSL, 8), ctx); 343 + emit(ARM_LSL_I(r_dst, r_dst, 8), ctx); 344 + emit(ARM_LSL_R(r_dst, r_dst, 8), ctx); 345 + } 346 + 347 + #else /* ARMv6+ */ 348 + 349 + static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) 350 + { 351 + _emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx); 352 + #ifdef __LITTLE_ENDIAN 353 + _emit(cond, ARM_REV(r_res, r_res), ctx); 354 + #endif 355 + } 356 + 357 + static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) 358 + { 359 + _emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx); 360 + #ifdef __LITTLE_ENDIAN 361 + _emit(cond, ARM_REV16(r_res, r_res), ctx); 362 + #endif 363 + } 364 + 365 + static inline void emit_swap16(u8 r_dst __maybe_unused, 366 + u8 r_src __maybe_unused, 367 + struct jit_ctx *ctx __maybe_unused) 368 + { 369 + #ifdef __LITTLE_ENDIAN 370 + emit(ARM_REV16(r_dst, r_src), ctx); 371 + #endif 372 + } 373 + 374 + #endif /* __LINUX_ARM_ARCH__ < 6 */ 375 + 376 + 377 + /* Compute the immediate value for a PC-relative branch. */ 378 + static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx) 379 + { 380 + u32 imm; 381 + 382 + if (ctx->target == NULL) 383 + return 0; 384 + /* 385 + * BPF allows only forward jumps and the offset of the target is 386 + * still the one computed during the first pass. 387 + */ 388 + imm = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8); 389 + 390 + return imm >> 2; 391 + } 392 + 393 + #define OP_IMM3(op, r1, r2, imm_val, ctx) \ 394 + do { \ 395 + imm12 = imm8m(imm_val); \ 396 + if (imm12 < 0) { \ 397 + emit_mov_i_no8m(r_scratch, imm_val, ctx); \ 398 + emit(op ## _R((r1), (r2), r_scratch), ctx); \ 399 + } else { \ 400 + emit(op ## _I((r1), (r2), imm12), ctx); \ 401 + } \ 402 + } while (0) 403 + 404 + static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx) 405 + { 406 + if (ctx->ret0_fp_idx >= 0) { 407 + _emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx); 408 + /* NOP to keep the size constant between passes */ 409 + emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx); 410 + } else { 411 + _emit(cond, ARM_MOV_I(ARM_R0, 0), ctx); 412 + _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx); 413 + } 414 + } 415 + 416 + static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx) 417 + { 418 + #if __LINUX_ARM_ARCH__ < 5 419 + emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx); 420 + 421 + if (elf_hwcap & HWCAP_THUMB) 422 + emit(ARM_BX(tgt_reg), ctx); 423 + else 424 + emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx); 425 + #else 426 + emit(ARM_BLX_R(tgt_reg), ctx); 427 + #endif 428 + } 429 + 430 + static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx) 431 + { 432 + #if __LINUX_ARM_ARCH__ == 7 433 + if (elf_hwcap & HWCAP_IDIVA) { 434 + emit(ARM_UDIV(rd, rm, rn), ctx); 435 + return; 436 + } 437 + #endif 438 + if (rm != ARM_R0) 439 + emit(ARM_MOV_R(ARM_R0, rm), ctx); 440 + if (rn != ARM_R1) 441 + emit(ARM_MOV_R(ARM_R1, rn), ctx); 442 + 443 + ctx->seen |= SEEN_CALL; 444 + emit_mov_i(ARM_R3, (u32)jit_udiv, ctx); 445 + emit_blx_r(ARM_R3, ctx); 446 + 447 + if (rd != ARM_R0) 448 + emit(ARM_MOV_R(rd, ARM_R0), ctx); 449 + } 450 + 451 + static inline void update_on_xread(struct jit_ctx *ctx) 452 + { 453 + if (!(ctx->seen & SEEN_X)) 454 + ctx->flags |= FLAG_NEED_X_RESET; 455 + 456 + ctx->seen |= SEEN_X; 457 + } 458 + 459 + static int build_body(struct jit_ctx *ctx) 460 + { 461 + void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w}; 462 + const struct sk_filter *prog = ctx->skf; 463 + const struct sock_filter *inst; 464 + unsigned i, load_order, off, condt; 465 + int imm12; 466 + u32 k; 467 + 468 + for (i = 0; i < prog->len; i++) { 469 + inst = &(prog->insns[i]); 470 + /* K as an immediate value operand */ 471 + k = inst->k; 472 + 473 + /* compute offsets only in the fake pass */ 474 + if (ctx->target == NULL) 475 + ctx->offsets[i] = ctx->idx * 4; 476 + 477 + switch (inst->code) { 478 + case BPF_S_LD_IMM: 479 + emit_mov_i(r_A, k, ctx); 480 + break; 481 + case BPF_S_LD_W_LEN: 482 + ctx->seen |= SEEN_SKB; 483 + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); 484 + emit(ARM_LDR_I(r_A, r_skb, 485 + offsetof(struct sk_buff, len)), ctx); 486 + break; 487 + case BPF_S_LD_MEM: 488 + /* A = scratch[k] */ 489 + ctx->seen |= SEEN_MEM_WORD(k); 490 + emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); 491 + break; 492 + case BPF_S_LD_W_ABS: 493 + load_order = 2; 494 + goto load; 495 + case BPF_S_LD_H_ABS: 496 + load_order = 1; 497 + goto load; 498 + case BPF_S_LD_B_ABS: 499 + load_order = 0; 500 + load: 501 + /* the interpreter will deal with the negative K */ 502 + if ((int)k < 0) 503 + return -ENOTSUPP; 504 + emit_mov_i(r_off, k, ctx); 505 + load_common: 506 + ctx->seen |= SEEN_DATA | SEEN_CALL; 507 + 508 + if (load_order > 0) { 509 + emit(ARM_SUB_I(r_scratch, r_skb_hl, 510 + 1 << load_order), ctx); 511 + emit(ARM_CMP_R(r_scratch, r_off), ctx); 512 + condt = ARM_COND_HS; 513 + } else { 514 + emit(ARM_CMP_R(r_skb_hl, r_off), ctx); 515 + condt = ARM_COND_HI; 516 + } 517 + 518 + _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data), 519 + ctx); 520 + 521 + if (load_order == 0) 522 + _emit(condt, ARM_LDRB_I(r_A, r_scratch, 0), 523 + ctx); 524 + else if (load_order == 1) 525 + emit_load_be16(condt, r_A, r_scratch, ctx); 526 + else if (load_order == 2) 527 + emit_load_be32(condt, r_A, r_scratch, ctx); 528 + 529 + _emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx); 530 + 531 + /* the slowpath */ 532 + emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx); 533 + emit(ARM_MOV_R(ARM_R0, r_skb), ctx); 534 + /* the offset is already in R1 */ 535 + emit_blx_r(ARM_R3, ctx); 536 + /* check the result of skb_copy_bits */ 537 + emit(ARM_CMP_I(ARM_R1, 0), ctx); 538 + emit_err_ret(ARM_COND_NE, ctx); 539 + emit(ARM_MOV_R(r_A, ARM_R0), ctx); 540 + break; 541 + case BPF_S_LD_W_IND: 542 + load_order = 2; 543 + goto load_ind; 544 + case BPF_S_LD_H_IND: 545 + load_order = 1; 546 + goto load_ind; 547 + case BPF_S_LD_B_IND: 548 + load_order = 0; 549 + load_ind: 550 + OP_IMM3(ARM_ADD, r_off, r_X, k, ctx); 551 + goto load_common; 552 + case BPF_S_LDX_IMM: 553 + ctx->seen |= SEEN_X; 554 + emit_mov_i(r_X, k, ctx); 555 + break; 556 + case BPF_S_LDX_W_LEN: 557 + ctx->seen |= SEEN_X | SEEN_SKB; 558 + emit(ARM_LDR_I(r_X, r_skb, 559 + offsetof(struct sk_buff, len)), ctx); 560 + break; 561 + case BPF_S_LDX_MEM: 562 + ctx->seen |= SEEN_X | SEEN_MEM_WORD(k); 563 + emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); 564 + break; 565 + case BPF_S_LDX_B_MSH: 566 + /* x = ((*(frame + k)) & 0xf) << 2; */ 567 + ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; 568 + /* the interpreter should deal with the negative K */ 569 + if (k < 0) 570 + return -1; 571 + /* offset in r1: we might have to take the slow path */ 572 + emit_mov_i(r_off, k, ctx); 573 + emit(ARM_CMP_R(r_skb_hl, r_off), ctx); 574 + 575 + /* load in r0: common with the slowpath */ 576 + _emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data, 577 + ARM_R1), ctx); 578 + /* 579 + * emit_mov_i() might generate one or two instructions, 580 + * the same holds for emit_blx_r() 581 + */ 582 + _emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx); 583 + 584 + emit(ARM_MOV_R(ARM_R0, r_skb), ctx); 585 + /* r_off is r1 */ 586 + emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx); 587 + emit_blx_r(ARM_R3, ctx); 588 + /* check the return value of skb_copy_bits */ 589 + emit(ARM_CMP_I(ARM_R1, 0), ctx); 590 + emit_err_ret(ARM_COND_NE, ctx); 591 + 592 + emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx); 593 + emit(ARM_LSL_I(r_X, r_X, 2), ctx); 594 + break; 595 + case BPF_S_ST: 596 + ctx->seen |= SEEN_MEM_WORD(k); 597 + emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); 598 + break; 599 + case BPF_S_STX: 600 + update_on_xread(ctx); 601 + ctx->seen |= SEEN_MEM_WORD(k); 602 + emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); 603 + break; 604 + case BPF_S_ALU_ADD_K: 605 + /* A += K */ 606 + OP_IMM3(ARM_ADD, r_A, r_A, k, ctx); 607 + break; 608 + case BPF_S_ALU_ADD_X: 609 + update_on_xread(ctx); 610 + emit(ARM_ADD_R(r_A, r_A, r_X), ctx); 611 + break; 612 + case BPF_S_ALU_SUB_K: 613 + /* A -= K */ 614 + OP_IMM3(ARM_SUB, r_A, r_A, k, ctx); 615 + break; 616 + case BPF_S_ALU_SUB_X: 617 + update_on_xread(ctx); 618 + emit(ARM_SUB_R(r_A, r_A, r_X), ctx); 619 + break; 620 + case BPF_S_ALU_MUL_K: 621 + /* A *= K */ 622 + emit_mov_i(r_scratch, k, ctx); 623 + emit(ARM_MUL(r_A, r_A, r_scratch), ctx); 624 + break; 625 + case BPF_S_ALU_MUL_X: 626 + update_on_xread(ctx); 627 + emit(ARM_MUL(r_A, r_A, r_X), ctx); 628 + break; 629 + case BPF_S_ALU_DIV_K: 630 + /* current k == reciprocal_value(userspace k) */ 631 + emit_mov_i(r_scratch, k, ctx); 632 + /* A = top 32 bits of the product */ 633 + emit(ARM_UMULL(r_scratch, r_A, r_A, r_scratch), ctx); 634 + break; 635 + case BPF_S_ALU_DIV_X: 636 + update_on_xread(ctx); 637 + emit(ARM_CMP_I(r_X, 0), ctx); 638 + emit_err_ret(ARM_COND_EQ, ctx); 639 + emit_udiv(r_A, r_A, r_X, ctx); 640 + break; 641 + case BPF_S_ALU_OR_K: 642 + /* A |= K */ 643 + OP_IMM3(ARM_ORR, r_A, r_A, k, ctx); 644 + break; 645 + case BPF_S_ALU_OR_X: 646 + update_on_xread(ctx); 647 + emit(ARM_ORR_R(r_A, r_A, r_X), ctx); 648 + break; 649 + case BPF_S_ALU_AND_K: 650 + /* A &= K */ 651 + OP_IMM3(ARM_AND, r_A, r_A, k, ctx); 652 + break; 653 + case BPF_S_ALU_AND_X: 654 + update_on_xread(ctx); 655 + emit(ARM_AND_R(r_A, r_A, r_X), ctx); 656 + break; 657 + case BPF_S_ALU_LSH_K: 658 + if (unlikely(k > 31)) 659 + return -1; 660 + emit(ARM_LSL_I(r_A, r_A, k), ctx); 661 + break; 662 + case BPF_S_ALU_LSH_X: 663 + update_on_xread(ctx); 664 + emit(ARM_LSL_R(r_A, r_A, r_X), ctx); 665 + break; 666 + case BPF_S_ALU_RSH_K: 667 + if (unlikely(k > 31)) 668 + return -1; 669 + emit(ARM_LSR_I(r_A, r_A, k), ctx); 670 + break; 671 + case BPF_S_ALU_RSH_X: 672 + update_on_xread(ctx); 673 + emit(ARM_LSR_R(r_A, r_A, r_X), ctx); 674 + break; 675 + case BPF_S_ALU_NEG: 676 + /* A = -A */ 677 + emit(ARM_RSB_I(r_A, r_A, 0), ctx); 678 + break; 679 + case BPF_S_JMP_JA: 680 + /* pc += K */ 681 + emit(ARM_B(b_imm(i + k + 1, ctx)), ctx); 682 + break; 683 + case BPF_S_JMP_JEQ_K: 684 + /* pc += (A == K) ? pc->jt : pc->jf */ 685 + condt = ARM_COND_EQ; 686 + goto cmp_imm; 687 + case BPF_S_JMP_JGT_K: 688 + /* pc += (A > K) ? pc->jt : pc->jf */ 689 + condt = ARM_COND_HI; 690 + goto cmp_imm; 691 + case BPF_S_JMP_JGE_K: 692 + /* pc += (A >= K) ? pc->jt : pc->jf */ 693 + condt = ARM_COND_HS; 694 + cmp_imm: 695 + imm12 = imm8m(k); 696 + if (imm12 < 0) { 697 + emit_mov_i_no8m(r_scratch, k, ctx); 698 + emit(ARM_CMP_R(r_A, r_scratch), ctx); 699 + } else { 700 + emit(ARM_CMP_I(r_A, imm12), ctx); 701 + } 702 + cond_jump: 703 + if (inst->jt) 704 + _emit(condt, ARM_B(b_imm(i + inst->jt + 1, 705 + ctx)), ctx); 706 + if (inst->jf) 707 + _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1, 708 + ctx)), ctx); 709 + break; 710 + case BPF_S_JMP_JEQ_X: 711 + /* pc += (A == X) ? pc->jt : pc->jf */ 712 + condt = ARM_COND_EQ; 713 + goto cmp_x; 714 + case BPF_S_JMP_JGT_X: 715 + /* pc += (A > X) ? pc->jt : pc->jf */ 716 + condt = ARM_COND_HI; 717 + goto cmp_x; 718 + case BPF_S_JMP_JGE_X: 719 + /* pc += (A >= X) ? pc->jt : pc->jf */ 720 + condt = ARM_COND_CS; 721 + cmp_x: 722 + update_on_xread(ctx); 723 + emit(ARM_CMP_R(r_A, r_X), ctx); 724 + goto cond_jump; 725 + case BPF_S_JMP_JSET_K: 726 + /* pc += (A & K) ? pc->jt : pc->jf */ 727 + condt = ARM_COND_NE; 728 + /* not set iff all zeroes iff Z==1 iff EQ */ 729 + 730 + imm12 = imm8m(k); 731 + if (imm12 < 0) { 732 + emit_mov_i_no8m(r_scratch, k, ctx); 733 + emit(ARM_TST_R(r_A, r_scratch), ctx); 734 + } else { 735 + emit(ARM_TST_I(r_A, imm12), ctx); 736 + } 737 + goto cond_jump; 738 + case BPF_S_JMP_JSET_X: 739 + /* pc += (A & X) ? pc->jt : pc->jf */ 740 + update_on_xread(ctx); 741 + condt = ARM_COND_NE; 742 + emit(ARM_TST_R(r_A, r_X), ctx); 743 + goto cond_jump; 744 + case BPF_S_RET_A: 745 + emit(ARM_MOV_R(ARM_R0, r_A), ctx); 746 + goto b_epilogue; 747 + case BPF_S_RET_K: 748 + if ((k == 0) && (ctx->ret0_fp_idx < 0)) 749 + ctx->ret0_fp_idx = i; 750 + emit_mov_i(ARM_R0, k, ctx); 751 + b_epilogue: 752 + if (i != ctx->skf->len - 1) 753 + emit(ARM_B(b_imm(prog->len, ctx)), ctx); 754 + break; 755 + case BPF_S_MISC_TAX: 756 + /* X = A */ 757 + ctx->seen |= SEEN_X; 758 + emit(ARM_MOV_R(r_X, r_A), ctx); 759 + break; 760 + case BPF_S_MISC_TXA: 761 + /* A = X */ 762 + update_on_xread(ctx); 763 + emit(ARM_MOV_R(r_A, r_X), ctx); 764 + break; 765 + case BPF_S_ANC_PROTOCOL: 766 + /* A = ntohs(skb->protocol) */ 767 + ctx->seen |= SEEN_SKB; 768 + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 769 + protocol) != 2); 770 + off = offsetof(struct sk_buff, protocol); 771 + emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx); 772 + emit_swap16(r_A, r_scratch, ctx); 773 + break; 774 + case BPF_S_ANC_CPU: 775 + /* r_scratch = current_thread_info() */ 776 + OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx); 777 + /* A = current_thread_info()->cpu */ 778 + BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4); 779 + off = offsetof(struct thread_info, cpu); 780 + emit(ARM_LDR_I(r_A, r_scratch, off), ctx); 781 + break; 782 + case BPF_S_ANC_IFINDEX: 783 + /* A = skb->dev->ifindex */ 784 + ctx->seen |= SEEN_SKB; 785 + off = offsetof(struct sk_buff, dev); 786 + emit(ARM_LDR_I(r_scratch, r_skb, off), ctx); 787 + 788 + emit(ARM_CMP_I(r_scratch, 0), ctx); 789 + emit_err_ret(ARM_COND_EQ, ctx); 790 + 791 + BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, 792 + ifindex) != 4); 793 + off = offsetof(struct net_device, ifindex); 794 + emit(ARM_LDR_I(r_A, r_scratch, off), ctx); 795 + break; 796 + case BPF_S_ANC_MARK: 797 + ctx->seen |= SEEN_SKB; 798 + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); 799 + off = offsetof(struct sk_buff, mark); 800 + emit(ARM_LDR_I(r_A, r_skb, off), ctx); 801 + break; 802 + case BPF_S_ANC_RXHASH: 803 + ctx->seen |= SEEN_SKB; 804 + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4); 805 + off = offsetof(struct sk_buff, rxhash); 806 + emit(ARM_LDR_I(r_A, r_skb, off), ctx); 807 + break; 808 + case BPF_S_ANC_QUEUE: 809 + ctx->seen |= SEEN_SKB; 810 + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 811 + queue_mapping) != 2); 812 + BUILD_BUG_ON(offsetof(struct sk_buff, 813 + queue_mapping) > 0xff); 814 + off = offsetof(struct sk_buff, queue_mapping); 815 + emit(ARM_LDRH_I(r_A, r_skb, off), ctx); 816 + break; 817 + default: 818 + return -1; 819 + } 820 + } 821 + 822 + /* compute offsets only during the first pass */ 823 + if (ctx->target == NULL) 824 + ctx->offsets[i] = ctx->idx * 4; 825 + 826 + return 0; 827 + } 828 + 829 + 830 + void bpf_jit_compile(struct sk_filter *fp) 831 + { 832 + struct jit_ctx ctx; 833 + unsigned tmp_idx; 834 + unsigned alloc_size; 835 + 836 + if (!bpf_jit_enable) 837 + return; 838 + 839 + memset(&ctx, 0, sizeof(ctx)); 840 + ctx.skf = fp; 841 + ctx.ret0_fp_idx = -1; 842 + 843 + ctx.offsets = kzalloc(GFP_KERNEL, 4 * (ctx.skf->len + 1)); 844 + if (ctx.offsets == NULL) 845 + return; 846 + 847 + /* fake pass to fill in the ctx->seen */ 848 + if (unlikely(build_body(&ctx))) 849 + goto out; 850 + 851 + tmp_idx = ctx.idx; 852 + build_prologue(&ctx); 853 + ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4; 854 + 855 + #if __LINUX_ARM_ARCH__ < 7 856 + tmp_idx = ctx.idx; 857 + build_epilogue(&ctx); 858 + ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4; 859 + 860 + ctx.idx += ctx.imm_count; 861 + if (ctx.imm_count) { 862 + ctx.imms = kzalloc(GFP_KERNEL, 4 * ctx.imm_count); 863 + if (ctx.imms == NULL) 864 + goto out; 865 + } 866 + #else 867 + /* there's nothing after the epilogue on ARMv7 */ 868 + build_epilogue(&ctx); 869 + #endif 870 + 871 + alloc_size = 4 * ctx.idx; 872 + ctx.target = module_alloc(max(sizeof(struct work_struct), 873 + alloc_size)); 874 + if (unlikely(ctx.target == NULL)) 875 + goto out; 876 + 877 + ctx.idx = 0; 878 + build_prologue(&ctx); 879 + build_body(&ctx); 880 + build_epilogue(&ctx); 881 + 882 + flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx)); 883 + 884 + #if __LINUX_ARM_ARCH__ < 7 885 + if (ctx.imm_count) 886 + kfree(ctx.imms); 887 + #endif 888 + 889 + if (bpf_jit_enable > 1) 890 + print_hex_dump(KERN_INFO, "BPF JIT code: ", 891 + DUMP_PREFIX_ADDRESS, 16, 4, ctx.target, 892 + alloc_size, false); 893 + 894 + fp->bpf_func = (void *)ctx.target; 895 + out: 896 + kfree(ctx.offsets); 897 + return; 898 + } 899 + 900 + static void bpf_jit_free_worker(struct work_struct *work) 901 + { 902 + module_free(NULL, work); 903 + } 904 + 905 + void bpf_jit_free(struct sk_filter *fp) 906 + { 907 + struct work_struct *work; 908 + 909 + if (fp->bpf_func != sk_run_filter) { 910 + work = (struct work_struct *)fp->bpf_func; 911 + 912 + INIT_WORK(work, bpf_jit_free_worker); 913 + schedule_work(work); 914 + } 915 + }
+190
arch/arm/net/bpf_jit_32.h
··· 1 + /* 2 + * Just-In-Time compiler for BPF filters on 32bit ARM 3 + * 4 + * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com> 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms of the GNU General Public License as published by the 8 + * Free Software Foundation; version 2 of the License. 9 + */ 10 + 11 + #ifndef PFILTER_OPCODES_ARM_H 12 + #define PFILTER_OPCODES_ARM_H 13 + 14 + #define ARM_R0 0 15 + #define ARM_R1 1 16 + #define ARM_R2 2 17 + #define ARM_R3 3 18 + #define ARM_R4 4 19 + #define ARM_R5 5 20 + #define ARM_R6 6 21 + #define ARM_R7 7 22 + #define ARM_R8 8 23 + #define ARM_R9 9 24 + #define ARM_R10 10 25 + #define ARM_FP 11 26 + #define ARM_IP 12 27 + #define ARM_SP 13 28 + #define ARM_LR 14 29 + #define ARM_PC 15 30 + 31 + #define ARM_COND_EQ 0x0 32 + #define ARM_COND_NE 0x1 33 + #define ARM_COND_CS 0x2 34 + #define ARM_COND_HS ARM_COND_CS 35 + #define ARM_COND_CC 0x3 36 + #define ARM_COND_LO ARM_COND_CC 37 + #define ARM_COND_MI 0x4 38 + #define ARM_COND_PL 0x5 39 + #define ARM_COND_VS 0x6 40 + #define ARM_COND_VC 0x7 41 + #define ARM_COND_HI 0x8 42 + #define ARM_COND_LS 0x9 43 + #define ARM_COND_GE 0xa 44 + #define ARM_COND_LT 0xb 45 + #define ARM_COND_GT 0xc 46 + #define ARM_COND_LE 0xd 47 + #define ARM_COND_AL 0xe 48 + 49 + /* register shift types */ 50 + #define SRTYPE_LSL 0 51 + #define SRTYPE_LSR 1 52 + #define SRTYPE_ASR 2 53 + #define SRTYPE_ROR 3 54 + 55 + #define ARM_INST_ADD_R 0x00800000 56 + #define ARM_INST_ADD_I 0x02800000 57 + 58 + #define ARM_INST_AND_R 0x00000000 59 + #define ARM_INST_AND_I 0x02000000 60 + 61 + #define ARM_INST_BIC_R 0x01c00000 62 + #define ARM_INST_BIC_I 0x03c00000 63 + 64 + #define ARM_INST_B 0x0a000000 65 + #define ARM_INST_BX 0x012FFF10 66 + #define ARM_INST_BLX_R 0x012fff30 67 + 68 + #define ARM_INST_CMP_R 0x01500000 69 + #define ARM_INST_CMP_I 0x03500000 70 + 71 + #define ARM_INST_LDRB_I 0x05d00000 72 + #define ARM_INST_LDRB_R 0x07d00000 73 + #define ARM_INST_LDRH_I 0x01d000b0 74 + #define ARM_INST_LDR_I 0x05900000 75 + 76 + #define ARM_INST_LDM 0x08900000 77 + 78 + #define ARM_INST_LSL_I 0x01a00000 79 + #define ARM_INST_LSL_R 0x01a00010 80 + 81 + #define ARM_INST_LSR_I 0x01a00020 82 + #define ARM_INST_LSR_R 0x01a00030 83 + 84 + #define ARM_INST_MOV_R 0x01a00000 85 + #define ARM_INST_MOV_I 0x03a00000 86 + #define ARM_INST_MOVW 0x03000000 87 + #define ARM_INST_MOVT 0x03400000 88 + 89 + #define ARM_INST_MUL 0x00000090 90 + 91 + #define ARM_INST_POP 0x08bd0000 92 + #define ARM_INST_PUSH 0x092d0000 93 + 94 + #define ARM_INST_ORR_R 0x01800000 95 + #define ARM_INST_ORR_I 0x03800000 96 + 97 + #define ARM_INST_REV 0x06bf0f30 98 + #define ARM_INST_REV16 0x06bf0fb0 99 + 100 + #define ARM_INST_RSB_I 0x02600000 101 + 102 + #define ARM_INST_SUB_R 0x00400000 103 + #define ARM_INST_SUB_I 0x02400000 104 + 105 + #define ARM_INST_STR_I 0x05800000 106 + 107 + #define ARM_INST_TST_R 0x01100000 108 + #define ARM_INST_TST_I 0x03100000 109 + 110 + #define ARM_INST_UDIV 0x0730f010 111 + 112 + #define ARM_INST_UMULL 0x00800090 113 + 114 + /* register */ 115 + #define _AL3_R(op, rd, rn, rm) ((op ## _R) | (rd) << 12 | (rn) << 16 | (rm)) 116 + /* immediate */ 117 + #define _AL3_I(op, rd, rn, imm) ((op ## _I) | (rd) << 12 | (rn) << 16 | (imm)) 118 + 119 + #define ARM_ADD_R(rd, rn, rm) _AL3_R(ARM_INST_ADD, rd, rn, rm) 120 + #define ARM_ADD_I(rd, rn, imm) _AL3_I(ARM_INST_ADD, rd, rn, imm) 121 + 122 + #define ARM_AND_R(rd, rn, rm) _AL3_R(ARM_INST_AND, rd, rn, rm) 123 + #define ARM_AND_I(rd, rn, imm) _AL3_I(ARM_INST_AND, rd, rn, imm) 124 + 125 + #define ARM_BIC_R(rd, rn, rm) _AL3_R(ARM_INST_BIC, rd, rn, rm) 126 + #define ARM_BIC_I(rd, rn, imm) _AL3_I(ARM_INST_BIC, rd, rn, imm) 127 + 128 + #define ARM_B(imm24) (ARM_INST_B | ((imm24) & 0xffffff)) 129 + #define ARM_BX(rm) (ARM_INST_BX | (rm)) 130 + #define ARM_BLX_R(rm) (ARM_INST_BLX_R | (rm)) 131 + 132 + #define ARM_CMP_R(rn, rm) _AL3_R(ARM_INST_CMP, 0, rn, rm) 133 + #define ARM_CMP_I(rn, imm) _AL3_I(ARM_INST_CMP, 0, rn, imm) 134 + 135 + #define ARM_LDR_I(rt, rn, off) (ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \ 136 + | (off)) 137 + #define ARM_LDRB_I(rt, rn, off) (ARM_INST_LDRB_I | (rt) << 12 | (rn) << 16 \ 138 + | (off)) 139 + #define ARM_LDRB_R(rt, rn, rm) (ARM_INST_LDRB_R | (rt) << 12 | (rn) << 16 \ 140 + | (rm)) 141 + #define ARM_LDRH_I(rt, rn, off) (ARM_INST_LDRH_I | (rt) << 12 | (rn) << 16 \ 142 + | (((off) & 0xf0) << 4) | ((off) & 0xf)) 143 + 144 + #define ARM_LDM(rn, regs) (ARM_INST_LDM | (rn) << 16 | (regs)) 145 + 146 + #define ARM_LSL_R(rd, rn, rm) (_AL3_R(ARM_INST_LSL, rd, 0, rn) | (rm) << 8) 147 + #define ARM_LSL_I(rd, rn, imm) (_AL3_I(ARM_INST_LSL, rd, 0, rn) | (imm) << 7) 148 + 149 + #define ARM_LSR_R(rd, rn, rm) (_AL3_R(ARM_INST_LSR, rd, 0, rn) | (rm) << 8) 150 + #define ARM_LSR_I(rd, rn, imm) (_AL3_I(ARM_INST_LSR, rd, 0, rn) | (imm) << 7) 151 + 152 + #define ARM_MOV_R(rd, rm) _AL3_R(ARM_INST_MOV, rd, 0, rm) 153 + #define ARM_MOV_I(rd, imm) _AL3_I(ARM_INST_MOV, rd, 0, imm) 154 + 155 + #define ARM_MOVW(rd, imm) \ 156 + (ARM_INST_MOVW | ((imm) >> 12) << 16 | (rd) << 12 | ((imm) & 0x0fff)) 157 + 158 + #define ARM_MOVT(rd, imm) \ 159 + (ARM_INST_MOVT | ((imm) >> 12) << 16 | (rd) << 12 | ((imm) & 0x0fff)) 160 + 161 + #define ARM_MUL(rd, rm, rn) (ARM_INST_MUL | (rd) << 16 | (rm) << 8 | (rn)) 162 + 163 + #define ARM_POP(regs) (ARM_INST_POP | (regs)) 164 + #define ARM_PUSH(regs) (ARM_INST_PUSH | (regs)) 165 + 166 + #define ARM_ORR_R(rd, rn, rm) _AL3_R(ARM_INST_ORR, rd, rn, rm) 167 + #define ARM_ORR_I(rd, rn, imm) _AL3_I(ARM_INST_ORR, rd, rn, imm) 168 + #define ARM_ORR_S(rd, rn, rm, type, rs) \ 169 + (ARM_ORR_R(rd, rn, rm) | (type) << 5 | (rs) << 7) 170 + 171 + #define ARM_REV(rd, rm) (ARM_INST_REV | (rd) << 12 | (rm)) 172 + #define ARM_REV16(rd, rm) (ARM_INST_REV16 | (rd) << 12 | (rm)) 173 + 174 + #define ARM_RSB_I(rd, rn, imm) _AL3_I(ARM_INST_RSB, rd, rn, imm) 175 + 176 + #define ARM_SUB_R(rd, rn, rm) _AL3_R(ARM_INST_SUB, rd, rn, rm) 177 + #define ARM_SUB_I(rd, rn, imm) _AL3_I(ARM_INST_SUB, rd, rn, imm) 178 + 179 + #define ARM_STR_I(rt, rn, off) (ARM_INST_STR_I | (rt) << 12 | (rn) << 16 \ 180 + | (off)) 181 + 182 + #define ARM_TST_R(rn, rm) _AL3_R(ARM_INST_TST, 0, rn, rm) 183 + #define ARM_TST_I(rn, imm) _AL3_I(ARM_INST_TST, 0, rn, imm) 184 + 185 + #define ARM_UDIV(rd, rn, rm) (ARM_INST_UDIV | (rd) << 16 | (rn) | (rm) << 8) 186 + 187 + #define ARM_UMULL(rd_lo, rd_hi, rn, rm) (ARM_INST_UMULL | (rd_hi) << 16 \ 188 + | (rd_lo) << 12 | (rm) << 8 | rn) 189 + 190 + #endif /* PFILTER_OPCODES_ARM_H */
-1
arch/arm/plat-nomadik/Kconfig
··· 23 23 config NOMADIK_MTU_SCHED_CLOCK 24 24 bool 25 25 depends on HAS_MTU 26 - select HAVE_SCHED_CLOCK 27 26 help 28 27 Use the Multi Timer Unit as the sched_clock. 29 28
+1 -2
arch/arm/plat-versatile/Kconfig
··· 11 11 depends on ARCH_REALVIEW || ARCH_VERSATILE 12 12 13 13 config PLAT_VERSATILE_SCHED_CLOCK 14 - def_bool y if !ARCH_INTEGRATOR_AP 15 - select HAVE_SCHED_CLOCK 14 + def_bool y 16 15 17 16 endif
+1 -1
arch/c6x/Kconfig
··· 11 11 select HAVE_DMA_API_DEBUG 12 12 select HAVE_GENERIC_HARDIRQS 13 13 select HAVE_MEMBLOCK 14 - select HAVE_SPARSE_IRQ 14 + select SPARSE_IRQ 15 15 select IRQ_DOMAIN 16 16 select OF 17 17 select OF_EARLY_FLATTREE
-1
arch/powerpc/Kconfig
··· 133 133 select HAVE_REGS_AND_STACK_ACCESS_API 134 134 select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64 135 135 select HAVE_GENERIC_HARDIRQS 136 - select HAVE_SPARSE_IRQ 137 136 select SPARSE_IRQ 138 137 select IRQ_PER_CPU 139 138 select IRQ_DOMAIN
+1 -1
arch/sh/Kconfig
··· 22 22 select HAVE_SYSCALL_TRACEPOINTS 23 23 select HAVE_REGS_AND_STACK_ACCESS_API 24 24 select HAVE_GENERIC_HARDIRQS 25 - select HAVE_SPARSE_IRQ 25 + select MAY_HAVE_SPARSE_IRQ 26 26 select IRQ_FORCED_THREADING 27 27 select RTC_LIB 28 28 select GENERIC_ATOMIC64
-11
arch/sh/include/asm/irq.h
··· 21 21 #define NO_IRQ_IGNORE ((unsigned int)-1) 22 22 23 23 /* 24 - * Convert back and forth between INTEVT and IRQ values. 25 - */ 26 - #ifdef CONFIG_CPU_HAS_INTEVT 27 - #define evt2irq(evt) (((evt) >> 5) - 16) 28 - #define irq2evt(irq) (((irq) + 16) << 5) 29 - #else 30 - #define evt2irq(evt) (evt) 31 - #define irq2evt(irq) (irq) 32 - #endif 33 - 34 - /* 35 24 * Simple Mask Register Support 36 25 */ 37 26 extern void make_maskreg_irq(unsigned int irq);
-1
arch/x86/Kconfig
··· 69 69 select HAVE_ARCH_JUMP_LABEL 70 70 select HAVE_TEXT_POKE_SMP 71 71 select HAVE_GENERIC_HARDIRQS 72 - select HAVE_SPARSE_IRQ 73 72 select SPARSE_IRQ 74 73 select GENERIC_FIND_FIRST_BIT 75 74 select GENERIC_IRQ_PROBE
-1
drivers/clocksource/Kconfig
··· 26 26 config CLKSRC_DBX500_PRCMU_SCHED_CLOCK 27 27 bool "Clocksource PRCMU Timer sched_clock" 28 28 depends on (CLKSRC_DBX500_PRCMU && !NOMADIK_MTU_SCHED_CLOCK) 29 - select HAVE_SCHED_CLOCK 30 29 default y 31 30 help 32 31 Use the always on PRCMU Timer as sched_clock
+2
drivers/gpio/gpio-pxa.c
··· 22 22 #include <linux/syscore_ops.h> 23 23 #include <linux/slab.h> 24 24 25 + #include <mach/irqs.h> 26 + 25 27 /* 26 28 * We handle the GPIOs by banks, each bank covers up to 32 GPIOs with 27 29 * one set of registers. The register offsets are organized below:
+1 -1
drivers/sh/intc/balancing.c
··· 9 9 */ 10 10 #include "internals.h" 11 11 12 - static unsigned long dist_handle[NR_IRQS]; 12 + static unsigned long dist_handle[INTC_NR_IRQS]; 13 13 14 14 void intc_balancing_enable(unsigned int irq) 15 15 {
+1 -1
drivers/sh/intc/core.c
··· 42 42 * - this needs to be at least 2 for 5-bit priorities on 7780 43 43 */ 44 44 static unsigned int default_prio_level = 2; /* 2 - 16 */ 45 - static unsigned int intc_prio_level[NR_IRQS]; /* for now */ 45 + static unsigned int intc_prio_level[INTC_NR_IRQS]; /* for now */ 46 46 47 47 unsigned int intc_get_dfl_prio_level(void) 48 48 {
+1 -1
drivers/sh/intc/handle.c
··· 13 13 #include <linux/spinlock.h> 14 14 #include "internals.h" 15 15 16 - static unsigned long ack_handle[NR_IRQS]; 16 + static unsigned long ack_handle[INTC_NR_IRQS]; 17 17 18 18 static intc_enum __init intc_grp_id(struct intc_desc *desc, 19 19 intc_enum enum_id)
+1 -1
drivers/sh/intc/virq.c
··· 17 17 #include <linux/export.h> 18 18 #include "internals.h" 19 19 20 - static struct intc_map_entry intc_irq_xlate[NR_IRQS]; 20 + static struct intc_map_entry intc_irq_xlate[INTC_NR_IRQS]; 21 21 22 22 struct intc_virq_list { 23 23 unsigned int irq;
+17
include/linux/sh_intc.h
··· 3 3 4 4 #include <linux/ioport.h> 5 5 6 + #ifdef CONFIG_SUPERH 7 + #define INTC_NR_IRQS 512 8 + #else 9 + #define INTC_NR_IRQS 1024 10 + #endif 11 + 12 + /* 13 + * Convert back and forth between INTEVT and IRQ values. 14 + */ 15 + #ifdef CONFIG_CPU_HAS_INTEVT 16 + #define evt2irq(evt) (((evt) >> 5) - 16) 17 + #define irq2evt(irq) (((irq) + 16) << 5) 18 + #else 19 + #define evt2irq(evt) (evt) 20 + #define irq2evt(irq) (irq) 21 + #endif 22 + 6 23 typedef unsigned char intc_enum; 7 24 8 25 struct intc_vect {
+2 -3
kernel/irq/Kconfig
··· 13 13 # Options selectable by the architecture code 14 14 15 15 # Make sparse irq Kconfig switch below available 16 - config HAVE_SPARSE_IRQ 16 + config MAY_HAVE_SPARSE_IRQ 17 17 bool 18 18 19 19 # Enable the generic irq autoprobe mechanism ··· 71 71 bool 72 72 73 73 config SPARSE_IRQ 74 - bool "Support sparse irq numbering" 75 - depends on HAVE_SPARSE_IRQ 74 + bool "Support sparse irq numbering" if MAY_HAVE_SPARSE_IRQ 76 75 ---help--- 77 76 78 77 Sparse irq numbering is useful for distro kernels that want
+17 -1
scripts/gcc-goto.sh
··· 2 2 # Test for gcc 'asm goto' support 3 3 # Copyright (C) 2010, Jason Baron <jbaron@redhat.com> 4 4 5 - echo "int main(void) { entry: asm goto (\"\"::::entry); return 0; }" | $@ -x c - -c -o /dev/null >/dev/null 2>&1 && echo "y" 5 + cat << "END" | $@ -x c - -c -o /dev/null >/dev/null 2>&1 && echo "y" 6 + int main(void) 7 + { 8 + #ifdef __arm__ 9 + /* 10 + * Not related to asm goto, but used by jump label 11 + * and broken on some ARM GCC versions (see GCC Bug 48637). 12 + */ 13 + static struct { int dummy; int state; } tp; 14 + asm (".long %c0" :: "i" (&tp.state)); 15 + #endif 16 + 17 + entry: 18 + asm goto ("" :::: entry); 19 + return 0; 20 + } 21 + END
+1 -1
sound/arm/pxa2xx-ac97-lib.c
··· 21 21 #include <sound/ac97_codec.h> 22 22 #include <sound/pxa2xx-lib.h> 23 23 24 - #include <asm/irq.h> 24 + #include <mach/irqs.h> 25 25 #include <mach/regs-ac97.h> 26 26 #include <mach/audio.h> 27 27