[PATCH] mark several functions __always_inline

Arjan van de Ven <arjan@infradead.org>

Mark a number of functions as 'must inline'. The functions affected by this
patch need to be inlined because they use knowledge that their arguments are
constant so that most of the function optimizes away. At this point this
patch does not change behavior, it's for documentation only (and for future
patches in the inline series)

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@infradead.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Ingo Molnar and committed by Linus Torvalds 652050ae 9ab34fe7

+15 -15
+1 -1
include/asm-i386/bitops.h
··· 247 static int test_bit(int nr, const volatile void * addr); 248 #endif 249 250 - static inline int constant_test_bit(int nr, const volatile unsigned long *addr) 251 { 252 return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; 253 }
··· 247 static int test_bit(int nr, const volatile void * addr); 248 #endif 249 250 + static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr) 251 { 252 return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; 253 }
+1 -1
include/asm-i386/current.h
··· 5 6 struct task_struct; 7 8 - static inline struct task_struct * get_current(void) 9 { 10 return current_thread_info()->task; 11 }
··· 5 6 struct task_struct; 7 8 + static __always_inline struct task_struct * get_current(void) 9 { 10 return current_thread_info()->task; 11 }
+4 -4
include/asm-i386/string.h
··· 201 return __res; 202 } 203 204 - static inline void * __memcpy(void * to, const void * from, size_t n) 205 { 206 int d0, d1, d2; 207 __asm__ __volatile__( ··· 223 * This looks ugly, but the compiler can optimize it totally, 224 * as the count is constant. 225 */ 226 - static inline void * __constant_memcpy(void * to, const void * from, size_t n) 227 { 228 long esi, edi; 229 if (!n) return to; ··· 367 * things 32 bits at a time even when we don't know the size of the 368 * area at compile-time.. 369 */ 370 - static inline void * __constant_c_memset(void * s, unsigned long c, size_t count) 371 { 372 int d0, d1; 373 __asm__ __volatile__( ··· 416 * This looks horribly ugly, but the compiler can optimize it totally, 417 * as we by now know that both pattern and count is constant.. 418 */ 419 - static inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count) 420 { 421 switch (count) { 422 case 0:
··· 201 return __res; 202 } 203 204 + static __always_inline void * __memcpy(void * to, const void * from, size_t n) 205 { 206 int d0, d1, d2; 207 __asm__ __volatile__( ··· 223 * This looks ugly, but the compiler can optimize it totally, 224 * as the count is constant. 225 */ 226 + static __always_inline void * __constant_memcpy(void * to, const void * from, size_t n) 227 { 228 long esi, edi; 229 if (!n) return to; ··· 367 * things 32 bits at a time even when we don't know the size of the 368 * area at compile-time.. 369 */ 370 + static __always_inline void * __constant_c_memset(void * s, unsigned long c, size_t count) 371 { 372 int d0, d1; 373 __asm__ __volatile__( ··· 416 * This looks horribly ugly, but the compiler can optimize it totally, 417 * as we by now know that both pattern and count is constant.. 418 */ 419 + static __always_inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count) 420 { 421 switch (count) { 422 case 0:
+4 -4
include/asm-i386/uaccess.h
··· 411 * Returns number of bytes that could not be copied. 412 * On success, this will be zero. 413 */ 414 - static inline unsigned long __must_check 415 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) 416 { 417 if (__builtin_constant_p(n)) { ··· 432 return __copy_to_user_ll(to, from, n); 433 } 434 435 - static inline unsigned long __must_check 436 __copy_to_user(void __user *to, const void *from, unsigned long n) 437 { 438 might_sleep(); ··· 456 * If some data could not be copied, this function will pad the copied 457 * data to the requested size using zero bytes. 458 */ 459 - static inline unsigned long 460 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) 461 { 462 if (__builtin_constant_p(n)) { ··· 477 return __copy_from_user_ll(to, from, n); 478 } 479 480 - static inline unsigned long 481 __copy_from_user(void *to, const void __user *from, unsigned long n) 482 { 483 might_sleep();
··· 411 * Returns number of bytes that could not be copied. 412 * On success, this will be zero. 413 */ 414 + static __always_inline unsigned long __must_check 415 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) 416 { 417 if (__builtin_constant_p(n)) { ··· 432 return __copy_to_user_ll(to, from, n); 433 } 434 435 + static __always_inline unsigned long __must_check 436 __copy_to_user(void __user *to, const void *from, unsigned long n) 437 { 438 might_sleep(); ··· 456 * If some data could not be copied, this function will pad the copied 457 * data to the requested size using zero bytes. 458 */ 459 + static __always_inline unsigned long 460 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) 461 { 462 if (__builtin_constant_p(n)) { ··· 477 return __copy_from_user_ll(to, from, n); 478 } 479 480 + static __always_inline unsigned long 481 __copy_from_user(void *to, const void __user *from, unsigned long n) 482 { 483 might_sleep();
+1 -1
include/asm-x86_64/fixmap.h
··· 76 * directly without translation, we catch the bug with a NULL-deference 77 * kernel oops. Illegal ranges of incoming indices are caught too. 78 */ 79 - static inline unsigned long fix_to_virt(const unsigned int idx) 80 { 81 /* 82 * this branch gets completely eliminated after inlining,
··· 76 * directly without translation, we catch the bug with a NULL-deference 77 * kernel oops. Illegal ranges of incoming indices are caught too. 78 */ 79 + static __always_inline unsigned long fix_to_virt(const unsigned int idx) 80 { 81 /* 82 * this branch gets completely eliminated after inlining,
+3 -3
include/asm-x86_64/uaccess.h
··· 244 extern unsigned long copy_from_user(void *to, const void __user *from, unsigned len); 245 extern unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len); 246 247 - static inline int __copy_from_user(void *dst, const void __user *src, unsigned size) 248 { 249 int ret = 0; 250 if (!__builtin_constant_p(size)) ··· 273 } 274 } 275 276 - static inline int __copy_to_user(void __user *dst, const void *src, unsigned size) 277 { 278 int ret = 0; 279 if (!__builtin_constant_p(size)) ··· 305 } 306 307 308 - static inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size) 309 { 310 int ret = 0; 311 if (!__builtin_constant_p(size))
··· 244 extern unsigned long copy_from_user(void *to, const void __user *from, unsigned len); 245 extern unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len); 246 247 + static __always_inline int __copy_from_user(void *dst, const void __user *src, unsigned size) 248 { 249 int ret = 0; 250 if (!__builtin_constant_p(size)) ··· 273 } 274 } 275 276 + static __always_inline int __copy_to_user(void __user *dst, const void *src, unsigned size) 277 { 278 int ret = 0; 279 if (!__builtin_constant_p(size)) ··· 305 } 306 307 308 + static __always_inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size) 309 { 310 int ret = 0; 311 if (!__builtin_constant_p(size))
+1 -1
include/linux/mm.h
··· 512 extern struct page *mem_map; 513 #endif 514 515 - static inline void *lowmem_page_address(struct page *page) 516 { 517 return __va(page_to_pfn(page) << PAGE_SHIFT); 518 }
··· 512 extern struct page *mem_map; 513 #endif 514 515 + static __always_inline void *lowmem_page_address(struct page *page) 516 { 517 return __va(page_to_pfn(page) << PAGE_SHIFT); 518 }