Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[SPARC]: "extern inline" doesn't make much sense.

Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Adrian Bunk and committed by
David S. Miller
3115624e ed39f731

+116 -116
+1 -1
arch/sparc/kernel/time.c
··· 457 457 sbus_time_init(); 458 458 } 459 459 460 - extern __inline__ unsigned long do_gettimeoffset(void) 460 + static inline unsigned long do_gettimeoffset(void) 461 461 { 462 462 return (*master_l10_counter >> 10) & 0x1fffff; 463 463 }
+1 -1
arch/sparc/mm/srmmu.c
··· 260 260 { return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); } 261 261 262 262 /* to find an entry in a top-level page table... */ 263 - extern inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address) 263 + static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address) 264 264 { return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); } 265 265 266 266 /* Find an entry in the second-level page table.. */
+6 -6
include/asm-sparc/btfixup.h
··· 51 51 #define BTFIXUPDEF_SIMM13(__name) \ 52 52 extern unsigned int ___sf_##__name(void) __attribute_const__; \ 53 53 extern unsigned ___ss_##__name[2]; \ 54 - extern __inline__ unsigned int ___sf_##__name(void) { \ 54 + static inline unsigned int ___sf_##__name(void) { \ 55 55 unsigned int ret; \ 56 56 __asm__ ("or %%g0, ___s_" #__name ", %0" : "=r"(ret)); \ 57 57 return ret; \ ··· 59 59 #define BTFIXUPDEF_SIMM13_INIT(__name,__val) \ 60 60 extern unsigned int ___sf_##__name(void) __attribute_const__; \ 61 61 extern unsigned ___ss_##__name[2]; \ 62 - extern __inline__ unsigned int ___sf_##__name(void) { \ 62 + static inline unsigned int ___sf_##__name(void) { \ 63 63 unsigned int ret; \ 64 64 __asm__ ("or %%g0, ___s_" #__name "__btset_" #__val ", %0" : "=r"(ret));\ 65 65 return ret; \ ··· 73 73 #define BTFIXUPDEF_HALF(__name) \ 74 74 extern unsigned int ___af_##__name(void) __attribute_const__; \ 75 75 extern unsigned ___as_##__name[2]; \ 76 - extern __inline__ unsigned int ___af_##__name(void) { \ 76 + static inline unsigned int ___af_##__name(void) { \ 77 77 unsigned int ret; \ 78 78 __asm__ ("or %%g0, ___a_" #__name ", %0" : "=r"(ret)); \ 79 79 return ret; \ ··· 81 81 #define BTFIXUPDEF_HALF_INIT(__name,__val) \ 82 82 extern unsigned int ___af_##__name(void) __attribute_const__; \ 83 83 extern unsigned ___as_##__name[2]; \ 84 - extern __inline__ unsigned int ___af_##__name(void) { \ 84 + static inline unsigned int ___af_##__name(void) { \ 85 85 unsigned int ret; \ 86 86 __asm__ ("or %%g0, ___a_" #__name "__btset_" #__val ", %0" : "=r"(ret));\ 87 87 return ret; \ ··· 92 92 #define BTFIXUPDEF_SETHI(__name) \ 93 93 extern unsigned int ___hf_##__name(void) __attribute_const__; \ 94 94 extern unsigned ___hs_##__name[2]; \ 95 - extern __inline__ unsigned int ___hf_##__name(void) { \ 95 + static inline unsigned int ___hf_##__name(void) { \ 96 96 unsigned int ret; \ 97 97 __asm__ ("sethi %%hi(___h_" #__name "), %0" : "=r"(ret)); \ 98 98 return ret; \ ··· 100 100 #define BTFIXUPDEF_SETHI_INIT(__name,__val) \ 101 101 extern unsigned int ___hf_##__name(void) __attribute_const__; \ 102 102 extern unsigned ___hs_##__name[2]; \ 103 - extern __inline__ unsigned int ___hf_##__name(void) { \ 103 + static inline unsigned int ___hf_##__name(void) { \ 104 104 unsigned int ret; \ 105 105 __asm__ ("sethi %%hi(___h_" #__name "__btset_" #__val "), %0" : \ 106 106 "=r"(ret)); \
+9 -9
include/asm-sparc/cache.h
··· 27 27 */ 28 28 29 29 /* First, cache-tag access. */ 30 - extern __inline__ unsigned int get_icache_tag(int setnum, int tagnum) 30 + static inline unsigned int get_icache_tag(int setnum, int tagnum) 31 31 { 32 32 unsigned int vaddr, retval; 33 33 ··· 38 38 return retval; 39 39 } 40 40 41 - extern __inline__ void put_icache_tag(int setnum, int tagnum, unsigned int entry) 41 + static inline void put_icache_tag(int setnum, int tagnum, unsigned int entry) 42 42 { 43 43 unsigned int vaddr; 44 44 ··· 51 51 /* Second cache-data access. The data is returned two-32bit quantities 52 52 * at a time. 53 53 */ 54 - extern __inline__ void get_icache_data(int setnum, int tagnum, int subblock, 54 + static inline void get_icache_data(int setnum, int tagnum, int subblock, 55 55 unsigned int *data) 56 56 { 57 57 unsigned int value1, value2, vaddr; ··· 67 67 data[0] = value1; data[1] = value2; 68 68 } 69 69 70 - extern __inline__ void put_icache_data(int setnum, int tagnum, int subblock, 70 + static inline void put_icache_data(int setnum, int tagnum, int subblock, 71 71 unsigned int *data) 72 72 { 73 73 unsigned int value1, value2, vaddr; ··· 92 92 */ 93 93 94 94 /* Flushes which clear out both the on-chip and external caches */ 95 - extern __inline__ void flush_ei_page(unsigned int addr) 95 + static inline void flush_ei_page(unsigned int addr) 96 96 { 97 97 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : 98 98 "r" (addr), "i" (ASI_M_FLUSH_PAGE) : 99 99 "memory"); 100 100 } 101 101 102 - extern __inline__ void flush_ei_seg(unsigned int addr) 102 + static inline void flush_ei_seg(unsigned int addr) 103 103 { 104 104 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : 105 105 "r" (addr), "i" (ASI_M_FLUSH_SEG) : 106 106 "memory"); 107 107 } 108 108 109 - extern __inline__ void flush_ei_region(unsigned int addr) 109 + static inline void flush_ei_region(unsigned int addr) 110 110 { 111 111 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : 112 112 "r" (addr), "i" (ASI_M_FLUSH_REGION) : 113 113 "memory"); 114 114 } 115 115 116 - extern __inline__ void flush_ei_ctx(unsigned int addr) 116 + static inline void flush_ei_ctx(unsigned int addr) 117 117 { 118 118 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : 119 119 "r" (addr), "i" (ASI_M_FLUSH_CTX) : 120 120 "memory"); 121 121 } 122 122 123 - extern __inline__ void flush_ei_user(unsigned int addr) 123 + static inline void flush_ei_user(unsigned int addr) 124 124 { 125 125 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : 126 126 "r" (addr), "i" (ASI_M_FLUSH_USER) :
+4 -4
include/asm-sparc/cypress.h
··· 48 48 #define CYPRESS_NFAULT 0x00000002 49 49 #define CYPRESS_MENABLE 0x00000001 50 50 51 - extern __inline__ void cypress_flush_page(unsigned long page) 51 + static inline void cypress_flush_page(unsigned long page) 52 52 { 53 53 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : 54 54 "r" (page), "i" (ASI_M_FLUSH_PAGE)); 55 55 } 56 56 57 - extern __inline__ void cypress_flush_segment(unsigned long addr) 57 + static inline void cypress_flush_segment(unsigned long addr) 58 58 { 59 59 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : 60 60 "r" (addr), "i" (ASI_M_FLUSH_SEG)); 61 61 } 62 62 63 - extern __inline__ void cypress_flush_region(unsigned long addr) 63 + static inline void cypress_flush_region(unsigned long addr) 64 64 { 65 65 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : 66 66 "r" (addr), "i" (ASI_M_FLUSH_REGION)); 67 67 } 68 68 69 - extern __inline__ void cypress_flush_context(void) 69 + static inline void cypress_flush_context(void) 70 70 { 71 71 __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : : 72 72 "i" (ASI_M_FLUSH_CTX));
+1 -1
include/asm-sparc/delay.h
··· 10 10 #include <linux/config.h> 11 11 #include <asm/cpudata.h> 12 12 13 - extern __inline__ void __delay(unsigned long loops) 13 + static inline void __delay(unsigned long loops) 14 14 { 15 15 __asm__ __volatile__("cmp %0, 0\n\t" 16 16 "1: bne 1b\n\t"
+1 -1
include/asm-sparc/dma.h
··· 198 198 /* Pause until counter runs out or BIT isn't set in the DMA condition 199 199 * register. 200 200 */ 201 - extern __inline__ void sparc_dma_pause(struct sparc_dma_registers *regs, 201 + static inline void sparc_dma_pause(struct sparc_dma_registers *regs, 202 202 unsigned long bit) 203 203 { 204 204 int ctr = 50000; /* Let's find some bugs ;) */
+2 -2
include/asm-sparc/iommu.h
··· 108 108 struct bit_map usemap; 109 109 }; 110 110 111 - extern __inline__ void iommu_invalidate(struct iommu_regs *regs) 111 + static inline void iommu_invalidate(struct iommu_regs *regs) 112 112 { 113 113 regs->tlbflush = 0; 114 114 } 115 115 116 - extern __inline__ void iommu_invalidate_page(struct iommu_regs *regs, unsigned long ba) 116 + static inline void iommu_invalidate_page(struct iommu_regs *regs, unsigned long ba) 117 117 { 118 118 regs->pageflush = (ba & PAGE_MASK); 119 119 }
+1 -1
include/asm-sparc/kdebug.h
··· 46 46 extern struct kernel_debug *linux_dbvec; 47 47 48 48 /* Use this macro in C-code to enter the debugger. */ 49 - extern __inline__ void sp_enter_debugger(void) 49 + static inline void sp_enter_debugger(void) 50 50 { 51 51 __asm__ __volatile__("jmpl %0, %%o7\n\t" 52 52 "nop\n\t" : :
+2 -2
include/asm-sparc/mbus.h
··· 83 83 */ 84 84 #define TBR_ID_SHIFT 20 85 85 86 - extern __inline__ int get_cpuid(void) 86 + static inline int get_cpuid(void) 87 87 { 88 88 register int retval; 89 89 __asm__ __volatile__("rd %%tbr, %0\n\t" ··· 93 93 return (retval & 3); 94 94 } 95 95 96 - extern __inline__ int get_modid(void) 96 + static inline int get_modid(void) 97 97 { 98 98 return (get_cpuid() | 0x8); 99 99 }
+1 -1
include/asm-sparc/msi.h
··· 19 19 #define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */ 20 20 21 21 22 - extern __inline__ void msi_set_sync(void) 22 + static inline void msi_set_sync(void) 23 23 { 24 24 __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t" 25 25 "andn %%g3, %2, %%g3\n\t"
+4 -4
include/asm-sparc/mxcc.h
··· 85 85 86 86 #ifndef __ASSEMBLY__ 87 87 88 - extern __inline__ void mxcc_set_stream_src(unsigned long *paddr) 88 + static inline void mxcc_set_stream_src(unsigned long *paddr) 89 89 { 90 90 unsigned long data0 = paddr[0]; 91 91 unsigned long data1 = paddr[1]; ··· 98 98 "i" (ASI_M_MXCC) : "g2", "g3"); 99 99 } 100 100 101 - extern __inline__ void mxcc_set_stream_dst(unsigned long *paddr) 101 + static inline void mxcc_set_stream_dst(unsigned long *paddr) 102 102 { 103 103 unsigned long data0 = paddr[0]; 104 104 unsigned long data1 = paddr[1]; ··· 111 111 "i" (ASI_M_MXCC) : "g2", "g3"); 112 112 } 113 113 114 - extern __inline__ unsigned long mxcc_get_creg(void) 114 + static inline unsigned long mxcc_get_creg(void) 115 115 { 116 116 unsigned long mxcc_control; 117 117 ··· 125 125 return mxcc_control; 126 126 } 127 127 128 - extern __inline__ void mxcc_set_creg(unsigned long mxcc_control) 128 + static inline void mxcc_set_creg(unsigned long mxcc_control) 129 129 { 130 130 __asm__ __volatile__("sta %0, [%1] %2\n\t" : : 131 131 "r" (mxcc_control), "r" (MXCC_CREG),
+15 -15
include/asm-sparc/obio.h
··· 98 98 99 99 #ifndef __ASSEMBLY__ 100 100 101 - extern __inline__ int bw_get_intr_mask(int sbus_level) 101 + static inline int bw_get_intr_mask(int sbus_level) 102 102 { 103 103 int mask; 104 104 ··· 109 109 return mask; 110 110 } 111 111 112 - extern __inline__ void bw_clear_intr_mask(int sbus_level, int mask) 112 + static inline void bw_clear_intr_mask(int sbus_level, int mask) 113 113 { 114 114 __asm__ __volatile__ ("stha %0, [%1] %2" : : 115 115 "r" (mask), ··· 117 117 "i" (ASI_M_CTL)); 118 118 } 119 119 120 - extern __inline__ unsigned bw_get_prof_limit(int cpu) 120 + static inline unsigned bw_get_prof_limit(int cpu) 121 121 { 122 122 unsigned limit; 123 123 ··· 128 128 return limit; 129 129 } 130 130 131 - extern __inline__ void bw_set_prof_limit(int cpu, unsigned limit) 131 + static inline void bw_set_prof_limit(int cpu, unsigned limit) 132 132 { 133 133 __asm__ __volatile__ ("sta %0, [%1] %2" : : 134 134 "r" (limit), ··· 136 136 "i" (ASI_M_CTL)); 137 137 } 138 138 139 - extern __inline__ unsigned bw_get_ctrl(int cpu) 139 + static inline unsigned bw_get_ctrl(int cpu) 140 140 { 141 141 unsigned ctrl; 142 142 ··· 147 147 return ctrl; 148 148 } 149 149 150 - extern __inline__ void bw_set_ctrl(int cpu, unsigned ctrl) 150 + static inline void bw_set_ctrl(int cpu, unsigned ctrl) 151 151 { 152 152 __asm__ __volatile__ ("sta %0, [%1] %2" : : 153 153 "r" (ctrl), ··· 157 157 158 158 extern unsigned char cpu_leds[32]; 159 159 160 - extern __inline__ void show_leds(int cpuid) 160 + static inline void show_leds(int cpuid) 161 161 { 162 162 cpuid &= 0x1e; 163 163 __asm__ __volatile__ ("stba %0, [%1] %2" : : ··· 166 166 "i" (ASI_M_CTL)); 167 167 } 168 168 169 - extern __inline__ unsigned cc_get_ipen(void) 169 + static inline unsigned cc_get_ipen(void) 170 170 { 171 171 unsigned pending; 172 172 ··· 177 177 return pending; 178 178 } 179 179 180 - extern __inline__ void cc_set_iclr(unsigned clear) 180 + static inline void cc_set_iclr(unsigned clear) 181 181 { 182 182 __asm__ __volatile__ ("stha %0, [%1] %2" : : 183 183 "r" (clear), ··· 185 185 "i" (ASI_M_MXCC)); 186 186 } 187 187 188 - extern __inline__ unsigned cc_get_imsk(void) 188 + static inline unsigned cc_get_imsk(void) 189 189 { 190 190 unsigned mask; 191 191 ··· 196 196 return mask; 197 197 } 198 198 199 - extern __inline__ void cc_set_imsk(unsigned mask) 199 + static inline void cc_set_imsk(unsigned mask) 200 200 { 201 201 __asm__ __volatile__ ("stha %0, [%1] %2" : : 202 202 "r" (mask), ··· 204 204 "i" (ASI_M_MXCC)); 205 205 } 206 206 207 - extern __inline__ unsigned cc_get_imsk_other(int cpuid) 207 + static inline unsigned cc_get_imsk_other(int cpuid) 208 208 { 209 209 unsigned mask; 210 210 ··· 215 215 return mask; 216 216 } 217 217 218 - extern __inline__ void cc_set_imsk_other(int cpuid, unsigned mask) 218 + static inline void cc_set_imsk_other(int cpuid, unsigned mask) 219 219 { 220 220 __asm__ __volatile__ ("stha %0, [%1] %2" : : 221 221 "r" (mask), ··· 223 223 "i" (ASI_M_CTL)); 224 224 } 225 225 226 - extern __inline__ void cc_set_igen(unsigned gen) 226 + static inline void cc_set_igen(unsigned gen) 227 227 { 228 228 __asm__ __volatile__ ("sta %0, [%1] %2" : : 229 229 "r" (gen), ··· 239 239 #define IGEN_MESSAGE(bcast, devid, sid, levels) \ 240 240 (((bcast) << 31) | ((devid) << 23) | ((sid) << 15) | (levels)) 241 241 242 - extern __inline__ void sun4d_send_ipi(int cpu, int level) 242 + static inline void sun4d_send_ipi(int cpu, int level) 243 243 { 244 244 cc_set_igen(IGEN_MESSAGE(0, cpu << 3, 6 + ((level >> 1) & 7), 1 << (level - 1))); 245 245 }
+3 -3
include/asm-sparc/pci.h
··· 15 15 16 16 #define PCI_IRQ_NONE 0xffffffff 17 17 18 - extern inline void pcibios_set_master(struct pci_dev *dev) 18 + static inline void pcibios_set_master(struct pci_dev *dev) 19 19 { 20 20 /* No special bus mastering setup handling */ 21 21 } 22 22 23 - extern inline void pcibios_penalize_isa_irq(int irq, int active) 23 + static inline void pcibios_penalize_isa_irq(int irq, int active) 24 24 { 25 25 /* We don't do dynamic PCI IRQ allocation */ 26 26 } ··· 137 137 * only drive the low 24-bits during PCI bus mastering, then 138 138 * you would pass 0x00ffffff as the mask to this function. 139 139 */ 140 - extern inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask) 140 + static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask) 141 141 { 142 142 return 1; 143 143 }
+14 -14
include/asm-sparc/pgtable.h
··· 154 154 BTFIXUPDEF_CALL(void, pte_clear, pte_t *) 155 155 BTFIXUPDEF_CALL(int, pte_read, pte_t) 156 156 157 - extern __inline__ int pte_none(pte_t pte) 157 + static inline int pte_none(pte_t pte) 158 158 { 159 159 return !(pte_val(pte) & ~BTFIXUP_SETHI(none_mask)); 160 160 } ··· 167 167 BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t) 168 168 BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *) 169 169 170 - extern __inline__ int pmd_none(pmd_t pmd) 170 + static inline int pmd_none(pmd_t pmd) 171 171 { 172 172 return !(pmd_val(pmd) & ~BTFIXUP_SETHI(none_mask)); 173 173 } ··· 195 195 BTFIXUPDEF_HALF(pte_youngi) 196 196 197 197 extern int pte_write(pte_t pte) __attribute_const__; 198 - extern __inline__ int pte_write(pte_t pte) 198 + static inline int pte_write(pte_t pte) 199 199 { 200 200 return pte_val(pte) & BTFIXUP_HALF(pte_writei); 201 201 } 202 202 203 203 extern int pte_dirty(pte_t pte) __attribute_const__; 204 - extern __inline__ int pte_dirty(pte_t pte) 204 + static inline int pte_dirty(pte_t pte) 205 205 { 206 206 return pte_val(pte) & BTFIXUP_HALF(pte_dirtyi); 207 207 } 208 208 209 209 extern int pte_young(pte_t pte) __attribute_const__; 210 - extern __inline__ int pte_young(pte_t pte) 210 + static inline int pte_young(pte_t pte) 211 211 { 212 212 return pte_val(pte) & BTFIXUP_HALF(pte_youngi); 213 213 } ··· 218 218 BTFIXUPDEF_HALF(pte_filei) 219 219 220 220 extern int pte_file(pte_t pte) __attribute_const__; 221 - extern __inline__ int pte_file(pte_t pte) 221 + static inline int pte_file(pte_t pte) 222 222 { 223 223 return pte_val(pte) & BTFIXUP_HALF(pte_filei); 224 224 } ··· 230 230 BTFIXUPDEF_HALF(pte_mkoldi) 231 231 232 232 extern pte_t pte_wrprotect(pte_t pte) __attribute_const__; 233 - extern __inline__ pte_t pte_wrprotect(pte_t pte) 233 + static inline pte_t pte_wrprotect(pte_t pte) 234 234 { 235 235 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_wrprotecti)); 236 236 } 237 237 238 238 extern pte_t pte_mkclean(pte_t pte) __attribute_const__; 239 - extern __inline__ pte_t pte_mkclean(pte_t pte) 239 + static inline pte_t pte_mkclean(pte_t pte) 240 240 { 241 241 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkcleani)); 242 242 } 243 243 244 244 extern pte_t pte_mkold(pte_t pte) __attribute_const__; 245 - extern __inline__ pte_t pte_mkold(pte_t pte) 245 + static inline pte_t pte_mkold(pte_t pte) 246 246 { 247 247 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkoldi)); 248 248 } ··· 279 279 BTFIXUPDEF_INT(pte_modify_mask) 280 280 281 281 extern pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__; 282 - extern __inline__ pte_t pte_modify(pte_t pte, pgprot_t newprot) 282 + static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 283 283 { 284 284 return __pte((pte_val(pte) & BTFIXUP_INT(pte_modify_mask)) | 285 285 pgprot_val(newprot)); ··· 386 386 387 387 #define NO_CONTEXT -1 388 388 389 - extern __inline__ void remove_from_ctx_list(struct ctx_list *entry) 389 + static inline void remove_from_ctx_list(struct ctx_list *entry) 390 390 { 391 391 entry->next->prev = entry->prev; 392 392 entry->prev->next = entry->next; 393 393 } 394 394 395 - extern __inline__ void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry) 395 + static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry) 396 396 { 397 397 entry->next = head; 398 398 (entry->prev = head->prev)->next = entry; ··· 401 401 #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry) 402 402 #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry) 403 403 404 - extern __inline__ unsigned long 404 + static inline unsigned long 405 405 __get_phys (unsigned long addr) 406 406 { 407 407 switch (sparc_cpu_model){ ··· 416 416 } 417 417 } 418 418 419 - extern __inline__ int 419 + static inline int 420 420 __get_iospace (unsigned long addr) 421 421 { 422 422 switch (sparc_cpu_model){
+15 -15
include/asm-sparc/pgtsrmmu.h
··· 148 148 #define __nocache_fix(VADDR) __va(__nocache_pa(VADDR)) 149 149 150 150 /* Accessing the MMU control register. */ 151 - extern __inline__ unsigned int srmmu_get_mmureg(void) 151 + static inline unsigned int srmmu_get_mmureg(void) 152 152 { 153 153 unsigned int retval; 154 154 __asm__ __volatile__("lda [%%g0] %1, %0\n\t" : ··· 157 157 return retval; 158 158 } 159 159 160 - extern __inline__ void srmmu_set_mmureg(unsigned long regval) 160 + static inline void srmmu_set_mmureg(unsigned long regval) 161 161 { 162 162 __asm__ __volatile__("sta %0, [%%g0] %1\n\t" : : 163 163 "r" (regval), "i" (ASI_M_MMUREGS) : "memory"); 164 164 165 165 } 166 166 167 - extern __inline__ void srmmu_set_ctable_ptr(unsigned long paddr) 167 + static inline void srmmu_set_ctable_ptr(unsigned long paddr) 168 168 { 169 169 paddr = ((paddr >> 4) & SRMMU_CTX_PMASK); 170 170 __asm__ __volatile__("sta %0, [%1] %2\n\t" : : ··· 173 173 "memory"); 174 174 } 175 175 176 - extern __inline__ unsigned long srmmu_get_ctable_ptr(void) 176 + static inline unsigned long srmmu_get_ctable_ptr(void) 177 177 { 178 178 unsigned int retval; 179 179 ··· 184 184 return (retval & SRMMU_CTX_PMASK) << 4; 185 185 } 186 186 187 - extern __inline__ void srmmu_set_context(int context) 187 + static inline void srmmu_set_context(int context) 188 188 { 189 189 __asm__ __volatile__("sta %0, [%1] %2\n\t" : : 190 190 "r" (context), "r" (SRMMU_CTX_REG), 191 191 "i" (ASI_M_MMUREGS) : "memory"); 192 192 } 193 193 194 - extern __inline__ int srmmu_get_context(void) 194 + static inline int srmmu_get_context(void) 195 195 { 196 196 register int retval; 197 197 __asm__ __volatile__("lda [%1] %2, %0\n\t" : ··· 201 201 return retval; 202 202 } 203 203 204 - extern __inline__ unsigned int srmmu_get_fstatus(void) 204 + static inline unsigned int srmmu_get_fstatus(void) 205 205 { 206 206 unsigned int retval; 207 207 ··· 211 211 return retval; 212 212 } 213 213 214 - extern __inline__ unsigned int srmmu_get_faddr(void) 214 + static inline unsigned int srmmu_get_faddr(void) 215 215 { 216 216 unsigned int retval; 217 217 ··· 222 222 } 223 223 224 224 /* This is guaranteed on all SRMMU's. */ 225 - extern __inline__ void srmmu_flush_whole_tlb(void) 225 + static inline void srmmu_flush_whole_tlb(void) 226 226 { 227 227 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : 228 228 "r" (0x400), /* Flush entire TLB!! */ ··· 231 231 } 232 232 233 233 /* These flush types are not available on all chips... */ 234 - extern __inline__ void srmmu_flush_tlb_ctx(void) 234 + static inline void srmmu_flush_tlb_ctx(void) 235 235 { 236 236 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : 237 237 "r" (0x300), /* Flush TLB ctx.. */ ··· 239 239 240 240 } 241 241 242 - extern __inline__ void srmmu_flush_tlb_region(unsigned long addr) 242 + static inline void srmmu_flush_tlb_region(unsigned long addr) 243 243 { 244 244 addr &= SRMMU_PGDIR_MASK; 245 245 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : ··· 249 249 } 250 250 251 251 252 - extern __inline__ void srmmu_flush_tlb_segment(unsigned long addr) 252 + static inline void srmmu_flush_tlb_segment(unsigned long addr) 253 253 { 254 254 addr &= SRMMU_REAL_PMD_MASK; 255 255 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : ··· 258 258 259 259 } 260 260 261 - extern __inline__ void srmmu_flush_tlb_page(unsigned long page) 261 + static inline void srmmu_flush_tlb_page(unsigned long page) 262 262 { 263 263 page &= PAGE_MASK; 264 264 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : ··· 267 267 268 268 } 269 269 270 - extern __inline__ unsigned long srmmu_hwprobe(unsigned long vaddr) 270 + static inline unsigned long srmmu_hwprobe(unsigned long vaddr) 271 271 { 272 272 unsigned long retval; 273 273 ··· 279 279 return retval; 280 280 } 281 281 282 - extern __inline__ int 282 + static inline int 283 283 srmmu_get_pte (unsigned long addr) 284 284 { 285 285 register unsigned long entry;
+1 -1
include/asm-sparc/processor.h
··· 79 79 extern unsigned long thread_saved_pc(struct task_struct *t); 80 80 81 81 /* Do necessary setup to start up a newly executed thread. */ 82 - extern __inline__ void start_thread(struct pt_regs * regs, unsigned long pc, 82 + static inline void start_thread(struct pt_regs * regs, unsigned long pc, 83 83 unsigned long sp) 84 84 { 85 85 register unsigned long zero asm("g1");
+3 -3
include/asm-sparc/psr.h
··· 38 38 39 39 #ifndef __ASSEMBLY__ 40 40 /* Get the %psr register. */ 41 - extern __inline__ unsigned int get_psr(void) 41 + static inline unsigned int get_psr(void) 42 42 { 43 43 unsigned int psr; 44 44 __asm__ __volatile__( ··· 53 53 return psr; 54 54 } 55 55 56 - extern __inline__ void put_psr(unsigned int new_psr) 56 + static inline void put_psr(unsigned int new_psr) 57 57 { 58 58 __asm__ __volatile__( 59 59 "wr %0, 0x0, %%psr\n\t" ··· 72 72 73 73 extern unsigned int fsr_storage; 74 74 75 - extern __inline__ unsigned int get_fsr(void) 75 + static inline unsigned int get_fsr(void) 76 76 { 77 77 unsigned int fsr = 0; 78 78
+5 -5
include/asm-sparc/sbi.h
··· 65 65 66 66 #ifndef __ASSEMBLY__ 67 67 68 - extern __inline__ int acquire_sbi(int devid, int mask) 68 + static inline int acquire_sbi(int devid, int mask) 69 69 { 70 70 __asm__ __volatile__ ("swapa [%2] %3, %0" : 71 71 "=r" (mask) : ··· 75 75 return mask; 76 76 } 77 77 78 - extern __inline__ void release_sbi(int devid, int mask) 78 + static inline void release_sbi(int devid, int mask) 79 79 { 80 80 __asm__ __volatile__ ("sta %0, [%1] %2" : : 81 81 "r" (mask), ··· 83 83 "i" (ASI_M_CTL)); 84 84 } 85 85 86 - extern __inline__ void set_sbi_tid(int devid, int targetid) 86 + static inline void set_sbi_tid(int devid, int targetid) 87 87 { 88 88 __asm__ __volatile__ ("sta %0, [%1] %2" : : 89 89 "r" (targetid), ··· 91 91 "i" (ASI_M_CTL)); 92 92 } 93 93 94 - extern __inline__ int get_sbi_ctl(int devid, int cfgno) 94 + static inline int get_sbi_ctl(int devid, int cfgno) 95 95 { 96 96 int cfg; 97 97 ··· 102 102 return cfg; 103 103 } 104 104 105 - extern __inline__ void set_sbi_ctl(int devid, int cfgno, int cfg) 105 + static inline void set_sbi_ctl(int devid, int cfgno, int cfg) 106 106 { 107 107 __asm__ __volatile__ ("sta %0, [%1] %2" : : 108 108 "r" (cfg),
+3 -3
include/asm-sparc/sbus.h
··· 28 28 * numbers + offsets, and vice versa. 29 29 */ 30 30 31 - extern __inline__ unsigned long sbus_devaddr(int slotnum, unsigned long offset) 31 + static inline unsigned long sbus_devaddr(int slotnum, unsigned long offset) 32 32 { 33 33 return (unsigned long) (SUN_SBUS_BVADDR+((slotnum)<<25)+(offset)); 34 34 } 35 35 36 - extern __inline__ int sbus_dev_slot(unsigned long dev_addr) 36 + static inline int sbus_dev_slot(unsigned long dev_addr) 37 37 { 38 38 return (int) (((dev_addr)-SUN_SBUS_BVADDR)>>25); 39 39 } ··· 80 80 81 81 extern struct sbus_bus *sbus_root; 82 82 83 - extern __inline__ int 83 + static inline int 84 84 sbus_is_slave(struct sbus_dev *dev) 85 85 { 86 86 /* XXX Have to write this for sun4c's */
+13 -13
include/asm-sparc/smp.h
··· 60 60 #define smp_cross_call(func,arg1,arg2,arg3,arg4,arg5) BTFIXUP_CALL(smp_cross_call)(func,arg1,arg2,arg3,arg4,arg5) 61 61 #define smp_message_pass(target,msg,data,wait) BTFIXUP_CALL(smp_message_pass)(target,msg,data,wait) 62 62 63 - extern __inline__ void xc0(smpfunc_t func) { smp_cross_call(func, 0, 0, 0, 0, 0); } 64 - extern __inline__ void xc1(smpfunc_t func, unsigned long arg1) 63 + static inline void xc0(smpfunc_t func) { smp_cross_call(func, 0, 0, 0, 0, 0); } 64 + static inline void xc1(smpfunc_t func, unsigned long arg1) 65 65 { smp_cross_call(func, arg1, 0, 0, 0, 0); } 66 - extern __inline__ void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2) 66 + static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2) 67 67 { smp_cross_call(func, arg1, arg2, 0, 0, 0); } 68 - extern __inline__ void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2, 68 + static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2, 69 69 unsigned long arg3) 70 70 { smp_cross_call(func, arg1, arg2, arg3, 0, 0); } 71 - extern __inline__ void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2, 71 + static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2, 72 72 unsigned long arg3, unsigned long arg4) 73 73 { smp_cross_call(func, arg1, arg2, arg3, arg4, 0); } 74 - extern __inline__ void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2, 74 + static inline void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2, 75 75 unsigned long arg3, unsigned long arg4, unsigned long arg5) 76 76 { smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); } 77 77 78 - extern __inline__ int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait) 78 + static inline int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait) 79 79 { 80 80 xc1((smpfunc_t)func, (unsigned long)info); 81 81 return 0; ··· 84 84 extern __volatile__ int __cpu_number_map[NR_CPUS]; 85 85 extern __volatile__ int __cpu_logical_map[NR_CPUS]; 86 86 87 - extern __inline__ int cpu_logical_map(int cpu) 87 + static inline int cpu_logical_map(int cpu) 88 88 { 89 89 return __cpu_logical_map[cpu]; 90 90 } 91 - extern __inline__ int cpu_number_map(int cpu) 91 + static inline int cpu_number_map(int cpu) 92 92 { 93 93 return __cpu_number_map[cpu]; 94 94 } 95 95 96 - extern __inline__ int hard_smp4m_processor_id(void) 96 + static inline int hard_smp4m_processor_id(void) 97 97 { 98 98 int cpuid; 99 99 ··· 104 104 return cpuid; 105 105 } 106 106 107 - extern __inline__ int hard_smp4d_processor_id(void) 107 + static inline int hard_smp4d_processor_id(void) 108 108 { 109 109 int cpuid; 110 110 ··· 114 114 } 115 115 116 116 #ifndef MODULE 117 - extern __inline__ int hard_smp_processor_id(void) 117 + static inline int hard_smp_processor_id(void) 118 118 { 119 119 int cpuid; 120 120 ··· 136 136 return cpuid; 137 137 } 138 138 #else 139 - extern __inline__ int hard_smp_processor_id(void) 139 + static inline int hard_smp_processor_id(void) 140 140 { 141 141 int cpuid; 142 142
+4 -4
include/asm-sparc/smpprim.h
··· 15 15 * atomic. 16 16 */ 17 17 18 - extern __inline__ __volatile__ char test_and_set(void *addr) 18 + static inline __volatile__ char test_and_set(void *addr) 19 19 { 20 20 char state = 0; 21 21 ··· 27 27 } 28 28 29 29 /* Initialize a spin-lock. */ 30 - extern __inline__ __volatile__ smp_initlock(void *spinlock) 30 + static inline __volatile__ smp_initlock(void *spinlock) 31 31 { 32 32 /* Unset the lock. */ 33 33 *((unsigned char *) spinlock) = 0; ··· 36 36 } 37 37 38 38 /* This routine spins until it acquires the lock at ADDR. */ 39 - extern __inline__ __volatile__ smp_lock(void *addr) 39 + static inline __volatile__ smp_lock(void *addr) 40 40 { 41 41 while(test_and_set(addr) == 0xff) 42 42 ; ··· 46 46 } 47 47 48 48 /* This routine releases the lock at ADDR. */ 49 - extern __inline__ __volatile__ smp_unlock(void *addr) 49 + static inline __volatile__ smp_unlock(void *addr) 50 50 { 51 51 *((unsigned char *) addr) = 0; 52 52 }
+5 -5
include/asm-sparc/spinlock.h
··· 17 17 #define __raw_spin_unlock_wait(lock) \ 18 18 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 19 19 20 - extern __inline__ void __raw_spin_lock(raw_spinlock_t *lock) 20 + static inline void __raw_spin_lock(raw_spinlock_t *lock) 21 21 { 22 22 __asm__ __volatile__( 23 23 "\n1:\n\t" ··· 37 37 : "g2", "memory", "cc"); 38 38 } 39 39 40 - extern __inline__ int __raw_spin_trylock(raw_spinlock_t *lock) 40 + static inline int __raw_spin_trylock(raw_spinlock_t *lock) 41 41 { 42 42 unsigned int result; 43 43 __asm__ __volatile__("ldstub [%1], %0" ··· 47 47 return (result == 0); 48 48 } 49 49 50 - extern __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) 50 + static inline void __raw_spin_unlock(raw_spinlock_t *lock) 51 51 { 52 52 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); 53 53 } ··· 78 78 * 79 79 * Unfortunately this scheme limits us to ~16,000,000 cpus. 80 80 */ 81 - extern __inline__ void __read_lock(raw_rwlock_t *rw) 81 + static inline void __read_lock(raw_rwlock_t *rw) 82 82 { 83 83 register raw_rwlock_t *lp asm("g1"); 84 84 lp = rw; ··· 98 98 local_irq_restore(flags); \ 99 99 } while(0) 100 100 101 - extern __inline__ void __read_unlock(raw_rwlock_t *rw) 101 + static inline void __read_unlock(raw_rwlock_t *rw) 102 102 { 103 103 register raw_rwlock_t *lp asm("g1"); 104 104 lp = rw;
+1 -1
include/asm-sparc/system.h
··· 204 204 BTFIXUPDEF_CALL(void, ___xchg32, void) 205 205 #endif 206 206 207 - extern __inline__ unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val) 207 + static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val) 208 208 { 209 209 #ifdef CONFIG_SMP 210 210 __asm__ __volatile__("swap [%2], %0"
+1 -1
include/asm-sparc/traps.h
··· 22 22 /* We set this to _start in system setup. */ 23 23 extern struct tt_entry *sparc_ttable; 24 24 25 - extern __inline__ unsigned long get_tbr(void) 25 + static inline unsigned long get_tbr(void) 26 26 { 27 27 unsigned long tbr; 28 28