Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] include/asm-v850/ "extern inline" -> "static inline"

"extern inline" doesn't make much sense.

Signed-off-by: Adrian Bunk <bunk@stusta.de>
Cc: Miles Bader <miles@gnu.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Adrian Bunk and committed by
Linus Torvalds
23f88fe4 26d89d1e

+21 -21
+1 -1
include/asm-v850/atomic.h
··· 31 31 #define atomic_read(v) ((v)->counter) 32 32 #define atomic_set(v,i) (((v)->counter) = (i)) 33 33 34 - extern __inline__ int atomic_add_return (int i, volatile atomic_t *v) 34 + static inline int atomic_add_return (int i, volatile atomic_t *v) 35 35 { 36 36 unsigned long flags; 37 37 int res;
+3 -3
include/asm-v850/bitops.h
··· 30 30 * ffz = Find First Zero in word. Undefined if no zero exists, 31 31 * so code should check against ~0UL first.. 32 32 */ 33 - extern __inline__ unsigned long ffz (unsigned long word) 33 + static inline unsigned long ffz (unsigned long word) 34 34 { 35 35 unsigned long result = 0; 36 36 ··· 135 135 "m" (*((const char *)(addr) + ((nr) >> 3)))); \ 136 136 __test_bit_res; \ 137 137 }) 138 - extern __inline__ int __test_bit (int nr, const void *addr) 138 + static inline int __test_bit (int nr, const void *addr) 139 139 { 140 140 int res; 141 141 __asm__ __volatile__ ("tst1 %1, [%2]; setf nz, %0" ··· 157 157 #define find_first_zero_bit(addr, size) \ 158 158 find_next_zero_bit ((addr), (size), 0) 159 159 160 - extern __inline__ int find_next_zero_bit(const void *addr, int size, int offset) 160 + static inline int find_next_zero_bit(const void *addr, int size, int offset) 161 161 { 162 162 unsigned long *p = ((unsigned long *) addr) + (offset >> 5); 163 163 unsigned long result = offset & ~31UL;
+2 -2
include/asm-v850/delay.h
··· 16 16 17 17 #include <asm/param.h> 18 18 19 - extern __inline__ void __delay(unsigned long loops) 19 + static inline void __delay(unsigned long loops) 20 20 { 21 21 if (loops) 22 22 __asm__ __volatile__ ("1: add -1, %0; bnz 1b" ··· 33 33 34 34 extern unsigned long loops_per_jiffy; 35 35 36 - extern __inline__ void udelay(unsigned long usecs) 36 + static inline void udelay(unsigned long usecs) 37 37 { 38 38 register unsigned long full_loops, part_loops; 39 39
+1 -1
include/asm-v850/hw_irq.h
··· 1 1 #ifndef __V850_HW_IRQ_H__ 2 2 #define __V850_HW_IRQ_H__ 3 3 4 - extern inline void hw_resend_irq (struct hw_interrupt_type *h, unsigned int i) 4 + static inline void hw_resend_irq (struct hw_interrupt_type *h, unsigned int i) 5 5 { 6 6 } 7 7
+2 -2
include/asm-v850/processor.h
··· 59 59 60 60 61 61 /* Do necessary setup to start up a newly executed thread. */ 62 - extern inline void start_thread (struct pt_regs *regs, 62 + static inline void start_thread (struct pt_regs *regs, 63 63 unsigned long pc, unsigned long usp) 64 64 { 65 65 regs->pc = pc; ··· 68 68 } 69 69 70 70 /* Free all resources held by a thread. */ 71 - extern inline void release_thread (struct task_struct *dead_task) 71 + static inline void release_thread (struct task_struct *dead_task) 72 72 { 73 73 } 74 74
+5 -5
include/asm-v850/semaphore.h
··· 24 24 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC (name,1) 25 25 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC (name,0) 26 26 27 - extern inline void sema_init (struct semaphore *sem, int val) 27 + static inline void sema_init (struct semaphore *sem, int val) 28 28 { 29 29 *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); 30 30 } ··· 52 52 extern int __down_trylock (struct semaphore * sem); 53 53 extern void __up (struct semaphore * sem); 54 54 55 - extern inline void down (struct semaphore * sem) 55 + static inline void down (struct semaphore * sem) 56 56 { 57 57 might_sleep(); 58 58 if (atomic_dec_return (&sem->count) < 0) 59 59 __down (sem); 60 60 } 61 61 62 - extern inline int down_interruptible (struct semaphore * sem) 62 + static inline int down_interruptible (struct semaphore * sem) 63 63 { 64 64 int ret = 0; 65 65 might_sleep(); ··· 68 68 return ret; 69 69 } 70 70 71 - extern inline int down_trylock (struct semaphore *sem) 71 + static inline int down_trylock (struct semaphore *sem) 72 72 { 73 73 int ret = 0; 74 74 if (atomic_dec_return (&sem->count) < 0) ··· 76 76 return ret; 77 77 } 78 78 79 - extern inline void up (struct semaphore * sem) 79 + static inline void up (struct semaphore * sem) 80 80 { 81 81 if (atomic_inc_return (&sem->count) <= 0) 82 82 __up (sem);
+1 -1
include/asm-v850/system.h
··· 81 81 ((__typeof__ (*(ptr)))__xchg ((unsigned long)(with), (ptr), sizeof (*(ptr)))) 82 82 #define tas(ptr) (xchg ((ptr), 1)) 83 83 84 - extern inline unsigned long __xchg (unsigned long with, 84 + static inline unsigned long __xchg (unsigned long with, 85 85 __volatile__ void *ptr, int size) 86 86 { 87 87 unsigned long tmp, flags;
+2 -2
include/asm-v850/tlbflush.h
··· 56 56 BUG (); 57 57 } 58 58 59 - extern inline void flush_tlb_kernel_page(unsigned long addr) 59 + static inline void flush_tlb_kernel_page(unsigned long addr) 60 60 { 61 61 BUG (); 62 62 } 63 63 64 - extern inline void flush_tlb_pgtables(struct mm_struct *mm, 64 + static inline void flush_tlb_pgtables(struct mm_struct *mm, 65 65 unsigned long start, unsigned long end) 66 66 { 67 67 BUG ();
+1 -1
include/asm-v850/uaccess.h
··· 14 14 #define VERIFY_READ 0 15 15 #define VERIFY_WRITE 1 16 16 17 - extern inline int access_ok (int type, const void *addr, unsigned long size) 17 + static inline int access_ok (int type, const void *addr, unsigned long size) 18 18 { 19 19 /* XXX I guess we should check against real ram bounds at least, and 20 20 possibly make sure ADDR is not within the kernel.
+3 -3
include/asm-v850/unaligned.h
··· 82 82 }) 83 83 84 84 85 - extern inline void __put_unaligned_2(__u32 __v, register __u8 *__p) 85 + static inline void __put_unaligned_2(__u32 __v, register __u8 *__p) 86 86 { 87 87 *__p++ = __v; 88 88 *__p++ = __v >> 8; 89 89 } 90 90 91 - extern inline void __put_unaligned_4(__u32 __v, register __u8 *__p) 91 + static inline void __put_unaligned_4(__u32 __v, register __u8 *__p) 92 92 { 93 93 __put_unaligned_2(__v >> 16, __p + 2); 94 94 __put_unaligned_2(__v, __p); 95 95 } 96 96 97 - extern inline void __put_unaligned_8(const unsigned long long __v, register __u8 *__p) 97 + static inline void __put_unaligned_8(const unsigned long long __v, register __u8 *__p) 98 98 { 99 99 /* 100 100 * tradeoff: 8 bytes of stack for all unaligned puts (2