Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] s390: "extern inline" -> "static inline"

"extern inline" -> "static inline"

Signed-off-by: Adrian Bunk <bunk@stusta.de>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Adrian Bunk and committed by
Linus Torvalds
4448aaf0 0fbeb5a4

+71 -71
+1 -1
Documentation/s390/Debugging390.txt
··· 871 871 872 872 873 873 874 - extern inline void spin_lock(spinlock_t *lp) 874 + static inline void spin_lock(spinlock_t *lp) 875 875 { 876 876 a0: 18 34 lr %r3,%r4 877 877 a2: a7 3a 03 bc ahi %r3,956
+6 -6
arch/s390/kernel/debug.c
··· 486 486 * - goto next entry in p_info 487 487 */ 488 488 489 - extern inline int 489 + static inline int 490 490 debug_next_entry(file_private_info_t *p_info) 491 491 { 492 492 debug_info_t *id; ··· 800 800 * - set active entry to next in the ring buffer 801 801 */ 802 802 803 - extern inline void 803 + static inline void 804 804 proceed_active_entry(debug_info_t * id) 805 805 { 806 806 if ((id->active_entries[id->active_area] += id->entry_size) ··· 817 817 * - set active area to next in the ring buffer 818 818 */ 819 819 820 - extern inline void 820 + static inline void 821 821 proceed_active_area(debug_info_t * id) 822 822 { 823 823 id->active_area++; ··· 828 828 * get_active_entry: 829 829 */ 830 830 831 - extern inline debug_entry_t* 831 + static inline debug_entry_t* 832 832 get_active_entry(debug_info_t * id) 833 833 { 834 834 return (debug_entry_t *) (((char *) id->areas[id->active_area] ··· 841 841 * - set timestamp, caller address, cpu number etc. 842 842 */ 843 843 844 - extern inline void 844 + static inline void 845 845 debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level, 846 846 int exception) 847 847 { ··· 971 971 * counts arguments in format string for sprintf view 972 972 */ 973 973 974 - extern inline int 974 + static inline int 975 975 debug_count_numargs(char *string) 976 976 { 977 977 int numargs=0;
+1 -1
arch/s390/mm/fault.c
··· 160 160 * 11 Page translation -> Not present (nullification) 161 161 * 3b Region third trans. -> Not present (nullification) 162 162 */ 163 - extern inline void 163 + static inline void 164 164 do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) 165 165 { 166 166 struct task_struct *tsk;
+2 -2
drivers/s390/char/keyboard.h
··· 41 41 /* 42 42 * Helper Functions. 43 43 */ 44 - extern inline void 44 + static inline void 45 45 kbd_put_queue(struct tty_struct *tty, int ch) 46 46 { 47 47 tty_insert_flip_char(tty, ch, 0); 48 48 tty_schedule_flip(tty); 49 49 } 50 50 51 - extern inline void 51 + static inline void 52 52 kbd_puts_queue(struct tty_struct *tty, char *cp) 53 53 { 54 54 while (*cp)
+4 -4
drivers/s390/cio/qdio.h
··· 265 265 /* 266 266 * Some instructions as assembly 267 267 */ 268 - extern __inline__ int 268 + static inline int 269 269 do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2) 270 270 { 271 271 int cc; ··· 300 300 return cc; 301 301 } 302 302 303 - extern __inline__ int 303 + static inline int 304 304 do_siga_input(unsigned int irq, unsigned int mask) 305 305 { 306 306 int cc; ··· 334 334 return cc; 335 335 } 336 336 337 - extern __inline__ int 337 + static inline int 338 338 do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb) 339 339 { 340 340 int cc; ··· 401 401 return cc; 402 402 } 403 403 404 - extern __inline__ unsigned long 404 + static inline unsigned long 405 405 do_clear_global_summary(void) 406 406 { 407 407
+3 -3
drivers/s390/net/fsm.h
··· 140 140 * 1 if current state or event is out of range 141 141 * !0 if state and event in range, but no action defined. 142 142 */ 143 - extern __inline__ int 143 + static inline int 144 144 fsm_event(fsm_instance *fi, int event, void *arg) 145 145 { 146 146 fsm_function_t r; ··· 188 188 * @param fi Pointer to FSM 189 189 * @param state The new state for this FSM. 190 190 */ 191 - extern __inline__ void 191 + static inline void 192 192 fsm_newstate(fsm_instance *fi, int newstate) 193 193 { 194 194 atomic_set(&fi->state,newstate); ··· 208 208 * 209 209 * @return The current state of the FSM. 210 210 */ 211 - extern __inline__ int 211 + static inline int 212 212 fsm_getstate(fsm_instance *fi) 213 213 { 214 214 return atomic_read(&fi->state);
+1 -1
drivers/s390/s390mach.h
··· 88 88 #define CRW_ERC_PERRI 0x07 /* perm. error, facility init */ 89 89 #define CRW_ERC_PMOD 0x08 /* installed parameters modified */ 90 90 91 - extern __inline__ int stcrw(struct crw *pcrw ) 91 + static inline int stcrw(struct crw *pcrw ) 92 92 { 93 93 int ccode; 94 94
+8 -8
include/asm-s390/debug.h
··· 129 129 130 130 void debug_stop_all(void); 131 131 132 - extern inline debug_entry_t* 132 + static inline debug_entry_t* 133 133 debug_event(debug_info_t* id, int level, void* data, int length) 134 134 { 135 135 if ((!id) || (level > id->level) || (id->pages_per_area == 0)) ··· 137 137 return debug_event_common(id,level,data,length); 138 138 } 139 139 140 - extern inline debug_entry_t* 140 + static inline debug_entry_t* 141 141 debug_int_event(debug_info_t* id, int level, unsigned int tag) 142 142 { 143 143 unsigned int t=tag; ··· 146 146 return debug_event_common(id,level,&t,sizeof(unsigned int)); 147 147 } 148 148 149 - extern inline debug_entry_t * 149 + static inline debug_entry_t * 150 150 debug_long_event (debug_info_t* id, int level, unsigned long tag) 151 151 { 152 152 unsigned long t=tag; ··· 155 155 return debug_event_common(id,level,&t,sizeof(unsigned long)); 156 156 } 157 157 158 - extern inline debug_entry_t* 158 + static inline debug_entry_t* 159 159 debug_text_event(debug_info_t* id, int level, const char* txt) 160 160 { 161 161 if ((!id) || (level > id->level) || (id->pages_per_area == 0)) ··· 168 168 __attribute__ ((format(printf, 3, 4))); 169 169 170 170 171 - extern inline debug_entry_t* 171 + static inline debug_entry_t* 172 172 debug_exception(debug_info_t* id, int level, void* data, int length) 173 173 { 174 174 if ((!id) || (level > id->level) || (id->pages_per_area == 0)) ··· 176 176 return debug_exception_common(id,level,data,length); 177 177 } 178 178 179 - extern inline debug_entry_t* 179 + static inline debug_entry_t* 180 180 debug_int_exception(debug_info_t* id, int level, unsigned int tag) 181 181 { 182 182 unsigned int t=tag; ··· 185 185 return debug_exception_common(id,level,&t,sizeof(unsigned int)); 186 186 } 187 187 188 - extern inline debug_entry_t * 188 + static inline debug_entry_t * 189 189 debug_long_exception (debug_info_t* id, int level, unsigned long tag) 190 190 { 191 191 unsigned long t=tag; ··· 194 194 return debug_exception_common(id,level,&t,sizeof(unsigned long)); 195 195 } 196 196 197 - extern inline debug_entry_t* 197 + static inline debug_entry_t* 198 198 debug_text_exception(debug_info_t* id, int level, const char* txt) 199 199 { 200 200 if ((!id) || (level > id->level) || (id->pages_per_area == 0))
+1 -1
include/asm-s390/ebcdic.h
··· 21 21 extern __u8 _ebc_tolower[]; /* EBCDIC -> lowercase */ 22 22 extern __u8 _ebc_toupper[]; /* EBCDIC -> uppercase */ 23 23 24 - extern __inline__ void 24 + static inline void 25 25 codepage_convert(const __u8 *codepage, volatile __u8 * addr, unsigned long nr) 26 26 { 27 27 if (nr-- <= 0)
+4 -4
include/asm-s390/io.h
··· 24 24 * Change virtual addresses to physical addresses and vv. 25 25 * These are pretty trivial 26 26 */ 27 - extern inline unsigned long virt_to_phys(volatile void * address) 27 + static inline unsigned long virt_to_phys(volatile void * address) 28 28 { 29 29 unsigned long real_address; 30 30 __asm__ ( ··· 42 42 return real_address; 43 43 } 44 44 45 - extern inline void * phys_to_virt(unsigned long address) 45 + static inline void * phys_to_virt(unsigned long address) 46 46 { 47 47 return __io_virt(address); 48 48 } ··· 54 54 55 55 extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags); 56 56 57 - extern inline void * ioremap (unsigned long offset, unsigned long size) 57 + static inline void * ioremap (unsigned long offset, unsigned long size) 58 58 { 59 59 return __ioremap(offset, size, 0); 60 60 } ··· 64 64 * it's useful if some control registers are in such an area and write combining 65 65 * or read caching is not desirable: 66 66 */ 67 - extern inline void * ioremap_nocache (unsigned long offset, unsigned long size) 67 + static inline void * ioremap_nocache (unsigned long offset, unsigned long size) 68 68 { 69 69 return __ioremap(offset, size, 0); 70 70 }
+1 -1
include/asm-s390/lowcore.h
··· 346 346 #define S390_lowcore (*((struct _lowcore *) 0)) 347 347 extern struct _lowcore *lowcore_ptr[]; 348 348 349 - extern __inline__ void set_prefix(__u32 address) 349 + static inline void set_prefix(__u32 address) 350 350 { 351 351 __asm__ __volatile__ ("spx %0" : : "m" (address) : "memory" ); 352 352 }
+1 -1
include/asm-s390/mmu_context.h
··· 44 44 45 45 #define deactivate_mm(tsk,mm) do { } while (0) 46 46 47 - extern inline void activate_mm(struct mm_struct *prev, 47 + static inline void activate_mm(struct mm_struct *prev, 48 48 struct mm_struct *next) 49 49 { 50 50 switch_mm(prev, next, current);
+34 -34
include/asm-s390/pgtable.h
··· 319 319 * within a page table are directly modified. Thus, the following 320 320 * hook is made available. 321 321 */ 322 - extern inline void set_pte(pte_t *pteptr, pte_t pteval) 322 + static inline void set_pte(pte_t *pteptr, pte_t pteval) 323 323 { 324 324 *pteptr = pteval; 325 325 } ··· 330 330 */ 331 331 #ifndef __s390x__ 332 332 333 - extern inline int pgd_present(pgd_t pgd) { return 1; } 334 - extern inline int pgd_none(pgd_t pgd) { return 0; } 335 - extern inline int pgd_bad(pgd_t pgd) { return 0; } 333 + static inline int pgd_present(pgd_t pgd) { return 1; } 334 + static inline int pgd_none(pgd_t pgd) { return 0; } 335 + static inline int pgd_bad(pgd_t pgd) { return 0; } 336 336 337 - extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _SEG_PRESENT; } 338 - extern inline int pmd_none(pmd_t pmd) { return pmd_val(pmd) & _PAGE_TABLE_INV; } 339 - extern inline int pmd_bad(pmd_t pmd) 337 + static inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _SEG_PRESENT; } 338 + static inline int pmd_none(pmd_t pmd) { return pmd_val(pmd) & _PAGE_TABLE_INV; } 339 + static inline int pmd_bad(pmd_t pmd) 340 340 { 341 341 return (pmd_val(pmd) & (~PAGE_MASK & ~_PAGE_TABLE_INV)) != _PAGE_TABLE; 342 342 } 343 343 344 344 #else /* __s390x__ */ 345 345 346 - extern inline int pgd_present(pgd_t pgd) 346 + static inline int pgd_present(pgd_t pgd) 347 347 { 348 348 return (pgd_val(pgd) & ~PAGE_MASK) == _PGD_ENTRY; 349 349 } 350 350 351 - extern inline int pgd_none(pgd_t pgd) 351 + static inline int pgd_none(pgd_t pgd) 352 352 { 353 353 return pgd_val(pgd) & _PGD_ENTRY_INV; 354 354 } 355 355 356 - extern inline int pgd_bad(pgd_t pgd) 356 + static inline int pgd_bad(pgd_t pgd) 357 357 { 358 358 return (pgd_val(pgd) & (~PAGE_MASK & ~_PGD_ENTRY_INV)) != _PGD_ENTRY; 359 359 } 360 360 361 - extern inline int pmd_present(pmd_t pmd) 361 + static inline int pmd_present(pmd_t pmd) 362 362 { 363 363 return (pmd_val(pmd) & ~PAGE_MASK) == _PMD_ENTRY; 364 364 } 365 365 366 - extern inline int pmd_none(pmd_t pmd) 366 + static inline int pmd_none(pmd_t pmd) 367 367 { 368 368 return pmd_val(pmd) & _PMD_ENTRY_INV; 369 369 } 370 370 371 - extern inline int pmd_bad(pmd_t pmd) 371 + static inline int pmd_bad(pmd_t pmd) 372 372 { 373 373 return (pmd_val(pmd) & (~PAGE_MASK & ~_PMD_ENTRY_INV)) != _PMD_ENTRY; 374 374 } 375 375 376 376 #endif /* __s390x__ */ 377 377 378 - extern inline int pte_none(pte_t pte) 378 + static inline int pte_none(pte_t pte) 379 379 { 380 380 return (pte_val(pte) & _PAGE_INVALID_MASK) == _PAGE_INVALID_EMPTY; 381 381 } 382 382 383 - extern inline int pte_present(pte_t pte) 383 + static inline int pte_present(pte_t pte) 384 384 { 385 385 return !(pte_val(pte) & _PAGE_INVALID) || 386 386 (pte_val(pte) & _PAGE_INVALID_MASK) == _PAGE_INVALID_NONE; 387 387 } 388 388 389 - extern inline int pte_file(pte_t pte) 389 + static inline int pte_file(pte_t pte) 390 390 { 391 391 return (pte_val(pte) & _PAGE_INVALID_MASK) == _PAGE_INVALID_FILE; 392 392 } ··· 397 397 * query functions pte_write/pte_dirty/pte_young only work if 398 398 * pte_present() is true. Undefined behaviour if not.. 399 399 */ 400 - extern inline int pte_write(pte_t pte) 400 + static inline int pte_write(pte_t pte) 401 401 { 402 402 return (pte_val(pte) & _PAGE_RO) == 0; 403 403 } 404 404 405 - extern inline int pte_dirty(pte_t pte) 405 + static inline int pte_dirty(pte_t pte) 406 406 { 407 407 /* A pte is neither clean nor dirty on s/390. The dirty bit 408 408 * is in the storage key. See page_test_and_clear_dirty for ··· 411 411 return 0; 412 412 } 413 413 414 - extern inline int pte_young(pte_t pte) 414 + static inline int pte_young(pte_t pte) 415 415 { 416 416 /* A pte is neither young nor old on s/390. The young bit 417 417 * is in the storage key. See page_test_and_clear_young for ··· 420 420 return 0; 421 421 } 422 422 423 - extern inline int pte_read(pte_t pte) 423 + static inline int pte_read(pte_t pte) 424 424 { 425 425 /* All pages are readable since we don't use the fetch 426 426 * protection bit in the storage key. ··· 434 434 435 435 #ifndef __s390x__ 436 436 437 - extern inline void pgd_clear(pgd_t * pgdp) { } 437 + static inline void pgd_clear(pgd_t * pgdp) { } 438 438 439 - extern inline void pmd_clear(pmd_t * pmdp) 439 + static inline void pmd_clear(pmd_t * pmdp) 440 440 { 441 441 pmd_val(pmdp[0]) = _PAGE_TABLE_INV; 442 442 pmd_val(pmdp[1]) = _PAGE_TABLE_INV; ··· 446 446 447 447 #else /* __s390x__ */ 448 448 449 - extern inline void pgd_clear(pgd_t * pgdp) 449 + static inline void pgd_clear(pgd_t * pgdp) 450 450 { 451 451 pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY; 452 452 } 453 453 454 - extern inline void pmd_clear(pmd_t * pmdp) 454 + static inline void pmd_clear(pmd_t * pmdp) 455 455 { 456 456 pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; 457 457 pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; ··· 459 459 460 460 #endif /* __s390x__ */ 461 461 462 - extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 462 + static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 463 463 { 464 464 pte_val(*ptep) = _PAGE_INVALID_EMPTY; 465 465 } ··· 468 468 * The following pte modification functions only work if 469 469 * pte_present() is true. Undefined behaviour if not.. 470 470 */ 471 - extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 471 + static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 472 472 { 473 473 pte_val(pte) &= PAGE_MASK; 474 474 pte_val(pte) |= pgprot_val(newprot); 475 475 return pte; 476 476 } 477 477 478 - extern inline pte_t pte_wrprotect(pte_t pte) 478 + static inline pte_t pte_wrprotect(pte_t pte) 479 479 { 480 480 /* Do not clobber _PAGE_INVALID_NONE pages! */ 481 481 if (!(pte_val(pte) & _PAGE_INVALID)) ··· 483 483 return pte; 484 484 } 485 485 486 - extern inline pte_t pte_mkwrite(pte_t pte) 486 + static inline pte_t pte_mkwrite(pte_t pte) 487 487 { 488 488 pte_val(pte) &= ~_PAGE_RO; 489 489 return pte; 490 490 } 491 491 492 - extern inline pte_t pte_mkclean(pte_t pte) 492 + static inline pte_t pte_mkclean(pte_t pte) 493 493 { 494 494 /* The only user of pte_mkclean is the fork() code. 495 495 We must *not* clear the *physical* page dirty bit ··· 498 498 return pte; 499 499 } 500 500 501 - extern inline pte_t pte_mkdirty(pte_t pte) 501 + static inline pte_t pte_mkdirty(pte_t pte) 502 502 { 503 503 /* We do not explicitly set the dirty bit because the 504 504 * sske instruction is slow. It is faster to let the ··· 507 507 return pte; 508 508 } 509 509 510 - extern inline pte_t pte_mkold(pte_t pte) 510 + static inline pte_t pte_mkold(pte_t pte) 511 511 { 512 512 /* S/390 doesn't keep its dirty/referenced bit in the pte. 513 513 * There is no point in clearing the real referenced bit. ··· 515 515 return pte; 516 516 } 517 517 518 - extern inline pte_t pte_mkyoung(pte_t pte) 518 + static inline pte_t pte_mkyoung(pte_t pte) 519 519 { 520 520 /* S/390 doesn't keep its dirty/referenced bit in the pte. 521 521 * There is no point in setting the real referenced bit. ··· 695 695 #ifndef __s390x__ 696 696 697 697 /* Find an entry in the second-level page table.. */ 698 - extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) 698 + static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) 699 699 { 700 700 return (pmd_t *) dir; 701 701 } ··· 758 758 #else 759 759 #define __SWP_OFFSET_MASK (~0UL >> 11) 760 760 #endif 761 - extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 761 + static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 762 762 { 763 763 pte_t pte; 764 764 offset &= __SWP_OFFSET_MASK;
+3 -3
include/asm-s390/sigp.h
··· 67 67 /* 68 68 * Signal processor 69 69 */ 70 - extern __inline__ sigp_ccode 70 + static inline sigp_ccode 71 71 signal_processor(__u16 cpu_addr, sigp_order_code order_code) 72 72 { 73 73 sigp_ccode ccode; ··· 86 86 /* 87 87 * Signal processor with parameter 88 88 */ 89 - extern __inline__ sigp_ccode 89 + static inline sigp_ccode 90 90 signal_processor_p(__u32 parameter, __u16 cpu_addr, 91 91 sigp_order_code order_code) 92 92 { ··· 107 107 /* 108 108 * Signal processor with parameter and return status 109 109 */ 110 - extern __inline__ sigp_ccode 110 + static inline sigp_ccode 111 111 signal_processor_ps(__u32 *statusptr, __u32 parameter, 112 112 __u16 cpu_addr, sigp_order_code order_code) 113 113 {
+1 -1
include/asm-s390/smp.h
··· 52 52 extern int smp_get_cpu(cpumask_t cpu_map); 53 53 extern void smp_put_cpu(int cpu); 54 54 55 - extern __inline__ __u16 hard_smp_processor_id(void) 55 + static inline __u16 hard_smp_processor_id(void) 56 56 { 57 57 __u16 cpu_address; 58 58