Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[ARM] 5421/1: ftrace: fix crash due to tracing of __naked functions

This is a fix for the following crash observed in 2.6.29-rc3:
http://lkml.org/lkml/2009/1/29/150

On ARM it doesn't make sense to trace a naked function because then
mcount is called without stack and frame pointer being set up and there
is no chance to restore the lr register to the value before mcount was
called.

Reported-by: Matthias Kaehlcke <matthias@kaehlcke.net>
Tested-by: Matthias Kaehlcke <matthias@kaehlcke.net>

Cc: Abhishek Sagar <sagar.abhishek@gmail.com>
Cc: Steven Rostedt <rostedt@home.goodmis.org>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

authored by

Uwe Kleine-König and committed by
Russell King
446c92b2 9311c593

+18 -10
+2 -2
arch/arm/kernel/fiq.c
··· 88 88 * disable irqs for the duration. Note - these functions are almost 89 89 * entirely coded in assembly. 90 90 */ 91 - void __attribute__((naked)) set_fiq_regs(struct pt_regs *regs) 91 + void __naked set_fiq_regs(struct pt_regs *regs) 92 92 { 93 93 register unsigned long tmp; 94 94 asm volatile ( ··· 106 106 : "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE)); 107 107 } 108 108 109 - void __attribute__((naked)) get_fiq_regs(struct pt_regs *regs) 109 + void __naked get_fiq_regs(struct pt_regs *regs) 110 110 { 111 111 register unsigned long tmp; 112 112 asm volatile (
+1 -1
arch/arm/mm/copypage-feroceon.c
··· 13 13 #include <linux/init.h> 14 14 #include <linux/highmem.h> 15 15 16 - static void __attribute__((naked)) 16 + static void __naked 17 17 feroceon_copy_user_page(void *kto, const void *kfrom) 18 18 { 19 19 asm("\
+1 -1
arch/arm/mm/copypage-v3.c
··· 15 15 * 16 16 * FIXME: do we need to handle cache stuff... 17 17 */ 18 - static void __attribute__((naked)) 18 + static void __naked 19 19 v3_copy_user_page(void *kto, const void *kfrom) 20 20 { 21 21 asm("\n\
+1 -1
arch/arm/mm/copypage-v4mc.c
··· 44 44 * instruction. If your processor does not supply this, you have to write your 45 45 * own copy_user_highpage that does the right thing. 46 46 */ 47 - static void __attribute__((naked)) 47 + static void __naked 48 48 mc_copy_user_page(void *from, void *to) 49 49 { 50 50 asm volatile(
+1 -1
arch/arm/mm/copypage-v4wb.c
··· 22 22 * instruction. If your processor does not supply this, you have to write your 23 23 * own copy_user_highpage that does the right thing. 24 24 */ 25 - static void __attribute__((naked)) 25 + static void __naked 26 26 v4wb_copy_user_page(void *kto, const void *kfrom) 27 27 { 28 28 asm("\
+1 -1
arch/arm/mm/copypage-v4wt.c
··· 20 20 * dirty data in the cache. However, we do have to ensure that 21 21 * subsequent reads are up to date. 22 22 */ 23 - static void __attribute__((naked)) 23 + static void __naked 24 24 v4wt_copy_user_page(void *kto, const void *kfrom) 25 25 { 26 26 asm("\
+1 -1
arch/arm/mm/copypage-xsc3.c
··· 29 29 * if we eventually end up using our copied page. 30 30 * 31 31 */ 32 - static void __attribute__((naked)) 32 + static void __naked 33 33 xsc3_mc_copy_user_page(void *kto, const void *kfrom) 34 34 { 35 35 asm("\
+1 -1
arch/arm/mm/copypage-xscale.c
··· 42 42 * Dcache aliasing issue. The writes will be forwarded to the write buffer, 43 43 * and merged as appropriate. 44 44 */ 45 - static void __attribute__((naked)) 45 + static void __naked 46 46 mc_copy_user_page(void *from, void *to) 47 47 { 48 48 /*
+9 -1
include/linux/compiler-gcc.h
··· 52 52 #define __deprecated __attribute__((deprecated)) 53 53 #define __packed __attribute__((packed)) 54 54 #define __weak __attribute__((weak)) 55 - #define __naked __attribute__((naked)) 55 + 56 + /* 57 + * it doesn't make sense on ARM (currently the only user of __naked) to trace 58 + * naked functions because then mcount is called without stack and frame pointer 59 + * being set up and there is no chance to restore the lr register to the value 60 + * before mcount was called. 61 + */ 62 + #define __naked __attribute__((naked)) notrace 63 + 56 64 #define __noreturn __attribute__((noreturn)) 57 65 58 66 /*