[PATCH] mark several functions __always_inline

Arjan van de Ven <arjan@infradead.org>

Mark a number of functions as 'must inline'. The functions affected by this
patch need to be inlined because they use knowledge that their arguments are
constant so that most of the function optimizes away. At this point this
patch does not change behavior, it's for documentation only (and for future
patches in the inline series)

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@infradead.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Ingo Molnar and committed by Linus Torvalds 652050ae 9ab34fe7

+15 -15
+1 -1
include/asm-i386/bitops.h
··· 247 247 static int test_bit(int nr, const volatile void * addr); 248 248 #endif 249 249 250 - static inline int constant_test_bit(int nr, const volatile unsigned long *addr) 250 + static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr) 251 251 { 252 252 return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; 253 253 }
+1 -1
include/asm-i386/current.h
··· 5 5 6 6 struct task_struct; 7 7 8 - static inline struct task_struct * get_current(void) 8 + static __always_inline struct task_struct * get_current(void) 9 9 { 10 10 return current_thread_info()->task; 11 11 }
+4 -4
include/asm-i386/string.h
··· 201 201 return __res; 202 202 } 203 203 204 - static inline void * __memcpy(void * to, const void * from, size_t n) 204 + static __always_inline void * __memcpy(void * to, const void * from, size_t n) 205 205 { 206 206 int d0, d1, d2; 207 207 __asm__ __volatile__( ··· 223 223 * This looks ugly, but the compiler can optimize it totally, 224 224 * as the count is constant. 225 225 */ 226 - static inline void * __constant_memcpy(void * to, const void * from, size_t n) 226 + static __always_inline void * __constant_memcpy(void * to, const void * from, size_t n) 227 227 { 228 228 long esi, edi; 229 229 if (!n) return to; ··· 367 367 * things 32 bits at a time even when we don't know the size of the 368 368 * area at compile-time.. 369 369 */ 370 - static inline void * __constant_c_memset(void * s, unsigned long c, size_t count) 370 + static __always_inline void * __constant_c_memset(void * s, unsigned long c, size_t count) 371 371 { 372 372 int d0, d1; 373 373 __asm__ __volatile__( ··· 416 416 * This looks horribly ugly, but the compiler can optimize it totally, 417 417 * as we by now know that both pattern and count is constant.. 418 418 */ 419 - static inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count) 419 + static __always_inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count) 420 420 { 421 421 switch (count) { 422 422 case 0:
+4 -4
include/asm-i386/uaccess.h
··· 411 411 * Returns number of bytes that could not be copied. 412 412 * On success, this will be zero. 413 413 */ 414 - static inline unsigned long __must_check 414 + static __always_inline unsigned long __must_check 415 415 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) 416 416 { 417 417 if (__builtin_constant_p(n)) { ··· 432 432 return __copy_to_user_ll(to, from, n); 433 433 } 434 434 435 - static inline unsigned long __must_check 435 + static __always_inline unsigned long __must_check 436 436 __copy_to_user(void __user *to, const void *from, unsigned long n) 437 437 { 438 438 might_sleep(); ··· 456 456 * If some data could not be copied, this function will pad the copied 457 457 * data to the requested size using zero bytes. 458 458 */ 459 - static inline unsigned long 459 + static __always_inline unsigned long 460 460 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) 461 461 { 462 462 if (__builtin_constant_p(n)) { ··· 477 477 return __copy_from_user_ll(to, from, n); 478 478 } 479 479 480 - static inline unsigned long 480 + static __always_inline unsigned long 481 481 __copy_from_user(void *to, const void __user *from, unsigned long n) 482 482 { 483 483 might_sleep();
+1 -1
include/asm-x86_64/fixmap.h
··· 76 76 * directly without translation, we catch the bug with a NULL-deference 77 77 * kernel oops. Illegal ranges of incoming indices are caught too. 78 78 */ 79 - static inline unsigned long fix_to_virt(const unsigned int idx) 79 + static __always_inline unsigned long fix_to_virt(const unsigned int idx) 80 80 { 81 81 /* 82 82 * this branch gets completely eliminated after inlining,
+3 -3
include/asm-x86_64/uaccess.h
··· 244 244 extern unsigned long copy_from_user(void *to, const void __user *from, unsigned len); 245 245 extern unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len); 246 246 247 - static inline int __copy_from_user(void *dst, const void __user *src, unsigned size) 247 + static __always_inline int __copy_from_user(void *dst, const void __user *src, unsigned size) 248 248 { 249 249 int ret = 0; 250 250 if (!__builtin_constant_p(size)) ··· 273 273 } 274 274 } 275 275 276 - static inline int __copy_to_user(void __user *dst, const void *src, unsigned size) 276 + static __always_inline int __copy_to_user(void __user *dst, const void *src, unsigned size) 277 277 { 278 278 int ret = 0; 279 279 if (!__builtin_constant_p(size)) ··· 305 305 } 306 306 307 307 308 - static inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size) 308 + static __always_inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size) 309 309 { 310 310 int ret = 0; 311 311 if (!__builtin_constant_p(size))
+1 -1
include/linux/mm.h
··· 512 512 extern struct page *mem_map; 513 513 #endif 514 514 515 - static inline void *lowmem_page_address(struct page *page) 515 + static __always_inline void *lowmem_page_address(struct page *page) 516 516 { 517 517 return __va(page_to_pfn(page) << PAGE_SHIFT); 518 518 }