[PATCH] include/asm-sh64/: "extern inline" -> "static inline"

"extern inline" doesn't make much sense.

Signed-off-by: Adrian Bunk <bunk@stusta.de>
Cc: Paul Mundt <lethal@linux-sh.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Adrian Bunk and committed by
Linus Torvalds
ca5ed2f5 e0795cf4

+30 -30
+4 -4
include/asm-sh64/io.h
··· 143 143 * Change virtual addresses to physical addresses and vv. 144 144 * These are trivial on the 1:1 Linux/SuperH mapping 145 145 */ 146 - extern __inline__ unsigned long virt_to_phys(volatile void * address) 146 + static inline unsigned long virt_to_phys(volatile void * address) 147 147 { 148 148 return __pa(address); 149 149 } 150 150 151 - extern __inline__ void * phys_to_virt(unsigned long address) 151 + static inline void * phys_to_virt(unsigned long address) 152 152 { 153 153 return __va(address); 154 154 } ··· 156 156 extern void * __ioremap(unsigned long phys_addr, unsigned long size, 157 157 unsigned long flags); 158 158 159 - extern __inline__ void * ioremap(unsigned long phys_addr, unsigned long size) 159 + static inline void * ioremap(unsigned long phys_addr, unsigned long size) 160 160 { 161 161 return __ioremap(phys_addr, size, 1); 162 162 } 163 163 164 - extern __inline__ void * ioremap_nocache (unsigned long phys_addr, unsigned long size) 164 + static inline void * ioremap_nocache (unsigned long phys_addr, unsigned long size) 165 165 { 166 166 return __ioremap(phys_addr, size, 0); 167 167 }
+1 -1
include/asm-sh64/mmu_context.h
··· 50 50 */ 51 51 #define MMU_VPN_MASK 0xfffff000 52 52 53 - extern __inline__ void 53 + static inline void 54 54 get_new_mmu_context(struct mm_struct *mm) 55 55 { 56 56 extern void flush_tlb_all(void);
+7 -7
include/asm-sh64/pgalloc.h
··· 38 38 * if any. 39 39 */ 40 40 41 - extern __inline__ pgd_t *get_pgd_slow(void) 41 + static inline pgd_t *get_pgd_slow(void) 42 42 { 43 43 unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t)); 44 44 pgd_t *ret = (pgd_t *)kmalloc(pgd_size, GFP_KERNEL); 45 45 return ret; 46 46 } 47 47 48 - extern __inline__ pgd_t *get_pgd_fast(void) 48 + static inline pgd_t *get_pgd_fast(void) 49 49 { 50 50 unsigned long *ret; 51 51 ··· 62 62 return (pgd_t *)ret; 63 63 } 64 64 65 - extern __inline__ void free_pgd_fast(pgd_t *pgd) 65 + static inline void free_pgd_fast(pgd_t *pgd) 66 66 { 67 67 *(unsigned long *)pgd = (unsigned long) pgd_quicklist; 68 68 pgd_quicklist = (unsigned long *) pgd; 69 69 pgtable_cache_size++; 70 70 } 71 71 72 - extern __inline__ void free_pgd_slow(pgd_t *pgd) 72 + static inline void free_pgd_slow(pgd_t *pgd) 73 73 { 74 74 kfree((void *)pgd); 75 75 } ··· 77 77 extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted); 78 78 extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted); 79 79 80 - extern __inline__ pte_t *get_pte_fast(void) 80 + static inline pte_t *get_pte_fast(void) 81 81 { 82 82 unsigned long *ret; 83 83 ··· 89 89 return (pte_t *)ret; 90 90 } 91 91 92 - extern __inline__ void free_pte_fast(pte_t *pte) 92 + static inline void free_pte_fast(pte_t *pte) 93 93 { 94 94 *(unsigned long *)pte = (unsigned long) pte_quicklist; 95 95 pte_quicklist = (unsigned long *) pte; ··· 167 167 168 168 extern int do_check_pgt_cache(int, int); 169 169 170 - extern inline void set_pgdir(unsigned long address, pgd_t entry) 170 + static inline void set_pgdir(unsigned long address, pgd_t entry) 171 171 { 172 172 struct task_struct * p; 173 173 pgd_t *pgd;
+12 -12
include/asm-sh64/pgtable.h
··· 421 421 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 422 422 static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_WRITE; } 423 423 424 - extern inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_READ)); return pte; } 425 - extern inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; } 426 - extern inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_EXECUTE)); return pte; } 427 - extern inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } 428 - extern inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; } 424 + static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_READ)); return pte; } 425 + static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; } 426 + static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_EXECUTE)); return pte; } 427 + static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } 428 + static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; } 429 429 430 - extern inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_READ)); return pte; } 431 - extern inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; } 432 - extern inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_EXECUTE)); return pte; } 433 - extern inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } 434 - extern inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } 435 - extern inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; } 430 + static inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_READ)); return pte; } 431 + static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; } 432 + static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_EXECUTE)); return pte; } 433 + static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } 434 + static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } 435 + static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; } 436 436 437 437 438 438 /* ··· 456 456 #define mk_pte_phys(physpage, pgprot) \ 457 457 ({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; }) 458 458 459 - extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 459 + static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 460 460 { set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; } 461 461 462 462 typedef pte_t *pte_addr_t;
+2 -2
include/asm-sh64/processor.h
··· 228 228 * FPU lazy state save handling. 229 229 */ 230 230 231 - extern __inline__ void release_fpu(void) 231 + static inline void release_fpu(void) 232 232 { 233 233 unsigned long long __dummy; 234 234 ··· 240 240 : "r" (SR_FD)); 241 241 } 242 242 243 - extern __inline__ void grab_fpu(void) 243 + static inline void grab_fpu(void) 244 244 { 245 245 unsigned long long __dummy; 246 246
+2 -2
include/asm-sh64/system.h
··· 132 132 (flags != 0); \ 133 133 }) 134 134 135 - extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val) 135 + static inline unsigned long xchg_u32(volatile int * m, unsigned long val) 136 136 { 137 137 unsigned long flags, retval; 138 138 ··· 143 143 return retval; 144 144 } 145 145 146 - extern __inline__ unsigned long xchg_u8(volatile unsigned char * m, unsigned long val) 146 + static inline unsigned long xchg_u8(volatile unsigned char * m, unsigned long val) 147 147 { 148 148 unsigned long flags, retval; 149 149
+1 -1
include/asm-sh64/tlbflush.h
··· 20 20 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 21 21 unsigned long end); 22 22 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 23 - extern inline void flush_tlb_pgtables(struct mm_struct *mm, 23 + static inline void flush_tlb_pgtables(struct mm_struct *mm, 24 24 unsigned long start, unsigned long end) 25 25 { 26 26 }
+1 -1
include/asm-sh64/uaccess.h
··· 287 287 */ 288 288 extern long __strnlen_user(const char *__s, long __n); 289 289 290 - extern __inline__ long strnlen_user(const char *s, long n) 290 + static inline long strnlen_user(const char *s, long n) 291 291 { 292 292 if (!__addr_ok(s)) 293 293 return 0;