Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: 9388/2: mm: Type-annotate all per-processor assembly routines

Type tag the remaining per-processor assembly using the CFI
symbol macros, in addition to those that were previously tagged
for cache maintenance calls.

This will be used to finally provide proper C prototypes for
all these calls as well so that CFI can be made to work.

Tested-by: Kees Cook <keescook@chromium.org>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Reviewed-by: Sami Tolvanen <samitolvanen@google.com>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>

authored by

Linus Walleij and committed by
Russell King (Oracle)
51db13aa b4d20eff

+434 -274
+15 -9
arch/arm/mm/proc-arm1020.S
··· 57 57 /* 58 58 * cpu_arm1020_proc_init() 59 59 */ 60 - ENTRY(cpu_arm1020_proc_init) 60 + SYM_TYPED_FUNC_START(cpu_arm1020_proc_init) 61 61 ret lr 62 + SYM_FUNC_END(cpu_arm1020_proc_init) 62 63 63 64 /* 64 65 * cpu_arm1020_proc_fin() 65 66 */ 66 - ENTRY(cpu_arm1020_proc_fin) 67 + SYM_TYPED_FUNC_START(cpu_arm1020_proc_fin) 67 68 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 68 69 bic r0, r0, #0x1000 @ ...i............ 69 70 bic r0, r0, #0x000e @ ............wca. 70 71 mcr p15, 0, r0, c1, c0, 0 @ disable caches 71 72 ret lr 73 + SYM_FUNC_END(cpu_arm1020_proc_fin) 72 74 73 75 /* 74 76 * cpu_arm1020_reset(loc) ··· 83 81 */ 84 82 .align 5 85 83 .pushsection .idmap.text, "ax" 86 - ENTRY(cpu_arm1020_reset) 84 + SYM_TYPED_FUNC_START(cpu_arm1020_reset) 87 85 mov ip, #0 88 86 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 89 87 mcr p15, 0, ip, c7, c10, 4 @ drain WB ··· 95 93 bic ip, ip, #0x1100 @ ...i...s........ 96 94 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 97 95 ret r0 98 - ENDPROC(cpu_arm1020_reset) 96 + SYM_FUNC_END(cpu_arm1020_reset) 99 97 .popsection 100 98 101 99 /* 102 100 * cpu_arm1020_do_idle() 103 101 */ 104 102 .align 5 105 - ENTRY(cpu_arm1020_do_idle) 103 + SYM_TYPED_FUNC_START(cpu_arm1020_do_idle) 106 104 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 107 105 ret lr 106 + SYM_FUNC_END(cpu_arm1020_do_idle) 108 107 109 108 /* ================================= CACHE ================================ */ 110 109 ··· 363 360 SYM_FUNC_END(arm1020_dma_unmap_area) 364 361 365 362 .align 5 366 - ENTRY(cpu_arm1020_dcache_clean_area) 363 + SYM_TYPED_FUNC_START(cpu_arm1020_dcache_clean_area) 367 364 #ifndef CONFIG_CPU_DCACHE_DISABLE 368 365 mov ip, #0 369 366 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry ··· 373 370 bhi 1b 374 371 #endif 375 372 ret lr 373 + SYM_FUNC_END(cpu_arm1020_dcache_clean_area) 376 374 377 375 /* =============================== PageTable ============================== */ 378 376 ··· 385 381 * pgd: new page tables 386 382 */ 387 383 .align 5 388 - ENTRY(cpu_arm1020_switch_mm) 384 + SYM_TYPED_FUNC_START(cpu_arm1020_switch_mm) 389 385 #ifdef CONFIG_MMU 390 386 #ifndef CONFIG_CPU_DCACHE_DISABLE 391 387 mcr p15, 0, r3, c7, c10, 4 ··· 413 409 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 414 410 #endif /* CONFIG_MMU */ 415 411 ret lr 416 - 412 + SYM_FUNC_END(cpu_arm1020_switch_mm) 413 + 417 414 /* 418 415 * cpu_arm1020_set_pte(ptep, pte) 419 416 * 420 417 * Set a PTE and flush it out 421 418 */ 422 419 .align 5 423 - ENTRY(cpu_arm1020_set_pte_ext) 420 + SYM_TYPED_FUNC_START(cpu_arm1020_set_pte_ext) 424 421 #ifdef CONFIG_MMU 425 422 armv3_set_pte_ext 426 423 mov r0, r0 ··· 432 427 mcr p15, 0, r0, c7, c10, 4 @ drain WB 433 428 #endif /* CONFIG_MMU */ 434 429 ret lr 430 + SYM_FUNC_END(cpu_arm1020_set_pte_ext) 435 431 436 432 .type __arm1020_setup, #function 437 433 __arm1020_setup:
+15 -9
arch/arm/mm/proc-arm1020e.S
··· 57 57 /* 58 58 * cpu_arm1020e_proc_init() 59 59 */ 60 - ENTRY(cpu_arm1020e_proc_init) 60 + SYM_TYPED_FUNC_START(cpu_arm1020e_proc_init) 61 61 ret lr 62 + SYM_FUNC_END(cpu_arm1020e_proc_init) 62 63 63 64 /* 64 65 * cpu_arm1020e_proc_fin() 65 66 */ 66 - ENTRY(cpu_arm1020e_proc_fin) 67 + SYM_TYPED_FUNC_START(cpu_arm1020e_proc_fin) 67 68 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 68 69 bic r0, r0, #0x1000 @ ...i............ 69 70 bic r0, r0, #0x000e @ ............wca. 70 71 mcr p15, 0, r0, c1, c0, 0 @ disable caches 71 72 ret lr 73 + SYM_FUNC_END(cpu_arm1020e_proc_fin) 72 74 73 75 /* 74 76 * cpu_arm1020e_reset(loc) ··· 83 81 */ 84 82 .align 5 85 83 .pushsection .idmap.text, "ax" 86 - ENTRY(cpu_arm1020e_reset) 84 + SYM_TYPED_FUNC_START(cpu_arm1020e_reset) 87 85 mov ip, #0 88 86 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 89 87 mcr p15, 0, ip, c7, c10, 4 @ drain WB ··· 95 93 bic ip, ip, #0x1100 @ ...i...s........ 96 94 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 97 95 ret r0 98 - ENDPROC(cpu_arm1020e_reset) 96 + SYM_FUNC_END(cpu_arm1020e_reset) 99 97 .popsection 100 98 101 99 /* 102 100 * cpu_arm1020e_do_idle() 103 101 */ 104 102 .align 5 105 - ENTRY(cpu_arm1020e_do_idle) 103 + SYM_TYPED_FUNC_START(cpu_arm1020e_do_idle) 106 104 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 107 105 ret lr 106 + SYM_FUNC_END(cpu_arm1020e_do_idle) 108 107 109 108 /* ================================= CACHE ================================ */ 110 109 ··· 350 347 SYM_FUNC_END(arm1020e_dma_unmap_area) 351 348 352 349 .align 5 353 - ENTRY(cpu_arm1020e_dcache_clean_area) 350 + SYM_TYPED_FUNC_START(cpu_arm1020e_dcache_clean_area) 354 351 #ifndef CONFIG_CPU_DCACHE_DISABLE 355 352 mov ip, #0 356 353 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry ··· 359 356 bhi 1b 360 357 #endif 361 358 ret lr 359 + SYM_FUNC_END(cpu_arm1020e_dcache_clean_area) 362 360 363 361 /* =============================== PageTable ============================== */ 364 362 ··· 371 367 * pgd: new page tables 372 368 */ 373 369 .align 5 374 - ENTRY(cpu_arm1020e_switch_mm) 370 + SYM_TYPED_FUNC_START(cpu_arm1020e_switch_mm) 375 371 #ifdef CONFIG_MMU 376 372 #ifndef CONFIG_CPU_DCACHE_DISABLE 377 373 mcr p15, 0, r3, c7, c10, 4 ··· 398 394 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 399 395 #endif 400 396 ret lr 401 - 397 + SYM_FUNC_END(cpu_arm1020e_switch_mm) 398 + 402 399 /* 403 400 * cpu_arm1020e_set_pte(ptep, pte) 404 401 * 405 402 * Set a PTE and flush it out 406 403 */ 407 404 .align 5 408 - ENTRY(cpu_arm1020e_set_pte_ext) 405 + SYM_TYPED_FUNC_START(cpu_arm1020e_set_pte_ext) 409 406 #ifdef CONFIG_MMU 410 407 armv3_set_pte_ext 411 408 mov r0, r0 ··· 415 410 #endif 416 411 #endif /* CONFIG_MMU */ 417 412 ret lr 413 + SYM_FUNC_END(cpu_arm1020e_set_pte_ext) 418 414 419 415 .type __arm1020e_setup, #function 420 416 __arm1020e_setup:
+15 -9
arch/arm/mm/proc-arm1022.S
··· 57 57 /* 58 58 * cpu_arm1022_proc_init() 59 59 */ 60 - ENTRY(cpu_arm1022_proc_init) 60 + SYM_TYPED_FUNC_START(cpu_arm1022_proc_init) 61 61 ret lr 62 + SYM_FUNC_END(cpu_arm1022_proc_init) 62 63 63 64 /* 64 65 * cpu_arm1022_proc_fin() 65 66 */ 66 - ENTRY(cpu_arm1022_proc_fin) 67 + SYM_TYPED_FUNC_START(cpu_arm1022_proc_fin) 67 68 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 68 69 bic r0, r0, #0x1000 @ ...i............ 69 70 bic r0, r0, #0x000e @ ............wca. 70 71 mcr p15, 0, r0, c1, c0, 0 @ disable caches 71 72 ret lr 73 + SYM_FUNC_END(cpu_arm1022_proc_fin) 72 74 73 75 /* 74 76 * cpu_arm1022_reset(loc) ··· 83 81 */ 84 82 .align 5 85 83 .pushsection .idmap.text, "ax" 86 - ENTRY(cpu_arm1022_reset) 84 + SYM_TYPED_FUNC_START(cpu_arm1022_reset) 87 85 mov ip, #0 88 86 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 89 87 mcr p15, 0, ip, c7, c10, 4 @ drain WB ··· 95 93 bic ip, ip, #0x1100 @ ...i...s........ 96 94 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 97 95 ret r0 98 - ENDPROC(cpu_arm1022_reset) 96 + SYM_FUNC_END(cpu_arm1022_reset) 99 97 .popsection 100 98 101 99 /* 102 100 * cpu_arm1022_do_idle() 103 101 */ 104 102 .align 5 105 - ENTRY(cpu_arm1022_do_idle) 103 + SYM_TYPED_FUNC_START(cpu_arm1022_do_idle) 106 104 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 107 105 ret lr 106 + SYM_FUNC_END(cpu_arm1022_do_idle) 108 107 109 108 /* ================================= CACHE ================================ */ 110 109 ··· 349 346 SYM_FUNC_END(arm1022_dma_unmap_area) 350 347 351 348 .align 5 352 - ENTRY(cpu_arm1022_dcache_clean_area) 349 + SYM_TYPED_FUNC_START(cpu_arm1022_dcache_clean_area) 353 350 #ifndef CONFIG_CPU_DCACHE_DISABLE 354 351 mov ip, #0 355 352 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry ··· 358 355 bhi 1b 359 356 #endif 360 357 ret lr 358 + SYM_FUNC_END(cpu_arm1022_dcache_clean_area) 361 359 362 360 /* =============================== PageTable ============================== */ 363 361 ··· 370 366 * pgd: new page tables 371 367 */ 372 368 .align 5 373 - ENTRY(cpu_arm1022_switch_mm) 369 + SYM_TYPED_FUNC_START(cpu_arm1022_switch_mm) 374 370 #ifdef CONFIG_MMU 375 371 #ifndef CONFIG_CPU_DCACHE_DISABLE 376 372 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments ··· 390 386 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 391 387 #endif 392 388 ret lr 393 - 389 + SYM_FUNC_END(cpu_arm1022_switch_mm) 390 + 394 391 /* 395 392 * cpu_arm1022_set_pte_ext(ptep, pte, ext) 396 393 * 397 394 * Set a PTE and flush it out 398 395 */ 399 396 .align 5 400 - ENTRY(cpu_arm1022_set_pte_ext) 397 + SYM_TYPED_FUNC_START(cpu_arm1022_set_pte_ext) 401 398 #ifdef CONFIG_MMU 402 399 armv3_set_pte_ext 403 400 mov r0, r0 ··· 407 402 #endif 408 403 #endif /* CONFIG_MMU */ 409 404 ret lr 405 + SYM_FUNC_END(cpu_arm1022_set_pte_ext) 410 406 411 407 .type __arm1022_setup, #function 412 408 __arm1022_setup:
+15 -9
arch/arm/mm/proc-arm1026.S
··· 57 57 /* 58 58 * cpu_arm1026_proc_init() 59 59 */ 60 - ENTRY(cpu_arm1026_proc_init) 60 + SYM_TYPED_FUNC_START(cpu_arm1026_proc_init) 61 61 ret lr 62 + SYM_FUNC_END(cpu_arm1026_proc_init) 62 63 63 64 /* 64 65 * cpu_arm1026_proc_fin() 65 66 */ 66 - ENTRY(cpu_arm1026_proc_fin) 67 + SYM_TYPED_FUNC_START(cpu_arm1026_proc_fin) 67 68 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 68 69 bic r0, r0, #0x1000 @ ...i............ 69 70 bic r0, r0, #0x000e @ ............wca. 70 71 mcr p15, 0, r0, c1, c0, 0 @ disable caches 71 72 ret lr 73 + SYM_FUNC_END(cpu_arm1026_proc_fin) 72 74 73 75 /* 74 76 * cpu_arm1026_reset(loc) ··· 83 81 */ 84 82 .align 5 85 83 .pushsection .idmap.text, "ax" 86 - ENTRY(cpu_arm1026_reset) 84 + SYM_TYPED_FUNC_START(cpu_arm1026_reset) 87 85 mov ip, #0 88 86 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 89 87 mcr p15, 0, ip, c7, c10, 4 @ drain WB ··· 95 93 bic ip, ip, #0x1100 @ ...i...s........ 96 94 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 97 95 ret r0 98 - ENDPROC(cpu_arm1026_reset) 96 + SYM_FUNC_END(cpu_arm1026_reset) 99 97 .popsection 100 98 101 99 /* 102 100 * cpu_arm1026_do_idle() 103 101 */ 104 102 .align 5 105 - ENTRY(cpu_arm1026_do_idle) 103 + SYM_TYPED_FUNC_START(cpu_arm1026_do_idle) 106 104 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 107 105 ret lr 106 + SYM_FUNC_END(cpu_arm1026_do_idle) 108 107 109 108 /* ================================= CACHE ================================ */ 110 109 ··· 344 341 SYM_FUNC_END(arm1026_dma_unmap_area) 345 342 346 343 .align 5 347 - ENTRY(cpu_arm1026_dcache_clean_area) 344 + SYM_TYPED_FUNC_START(cpu_arm1026_dcache_clean_area) 348 345 #ifndef CONFIG_CPU_DCACHE_DISABLE 349 346 mov ip, #0 350 347 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry ··· 353 350 bhi 1b 354 351 #endif 355 352 ret lr 353 + SYM_FUNC_END(cpu_arm1026_dcache_clean_area) 356 354 357 355 /* =============================== PageTable ============================== */ 358 356 ··· 365 361 * pgd: new page tables 366 362 */ 367 363 .align 5 368 - ENTRY(cpu_arm1026_switch_mm) 364 + SYM_TYPED_FUNC_START(cpu_arm1026_switch_mm) 369 365 #ifdef CONFIG_MMU 370 366 mov r1, #0 371 367 #ifndef CONFIG_CPU_DCACHE_DISABLE ··· 380 376 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 381 377 #endif 382 378 ret lr 383 - 379 + SYM_FUNC_END(cpu_arm1026_switch_mm) 380 + 384 381 /* 385 382 * cpu_arm1026_set_pte_ext(ptep, pte, ext) 386 383 * 387 384 * Set a PTE and flush it out 388 385 */ 389 386 .align 5 390 - ENTRY(cpu_arm1026_set_pte_ext) 387 + SYM_TYPED_FUNC_START(cpu_arm1026_set_pte_ext) 391 388 #ifdef CONFIG_MMU 392 389 armv3_set_pte_ext 393 390 mov r0, r0 ··· 397 392 #endif 398 393 #endif /* CONFIG_MMU */ 399 394 ret lr 395 + SYM_FUNC_END(cpu_arm1026_set_pte_ext) 400 396 401 397 .type __arm1026_setup, #function 402 398 __arm1026_setup:
+17 -8
arch/arm/mm/proc-arm720.S
··· 20 20 */ 21 21 #include <linux/linkage.h> 22 22 #include <linux/init.h> 23 + #include <linux/cfi_types.h> 23 24 #include <linux/pgtable.h> 24 25 #include <asm/assembler.h> 25 26 #include <asm/asm-offsets.h> ··· 36 35 * 37 36 * Notes : This processor does not require these 38 37 */ 39 - ENTRY(cpu_arm720_dcache_clean_area) 40 - ENTRY(cpu_arm720_proc_init) 38 + SYM_TYPED_FUNC_START(cpu_arm720_dcache_clean_area) 41 39 ret lr 40 + SYM_FUNC_END(cpu_arm720_dcache_clean_area) 42 41 43 - ENTRY(cpu_arm720_proc_fin) 42 + SYM_TYPED_FUNC_START(cpu_arm720_proc_init) 43 + ret lr 44 + SYM_FUNC_END(cpu_arm720_proc_init) 45 + 46 + SYM_TYPED_FUNC_START(cpu_arm720_proc_fin) 44 47 mrc p15, 0, r0, c1, c0, 0 45 48 bic r0, r0, #0x1000 @ ...i............ 46 49 bic r0, r0, #0x000e @ ............wca. 47 50 mcr p15, 0, r0, c1, c0, 0 @ disable caches 48 51 ret lr 52 + SYM_FUNC_END(cpu_arm720_proc_fin) 49 53 50 54 /* 51 55 * Function: arm720_proc_do_idle(void) 52 56 * Params : r0 = unused 53 57 * Purpose : put the processor in proper idle mode 54 58 */ 55 - ENTRY(cpu_arm720_do_idle) 59 + SYM_TYPED_FUNC_START(cpu_arm720_do_idle) 56 60 ret lr 61 + SYM_FUNC_END(cpu_arm720_do_idle) 57 62 58 63 /* 59 64 * Function: arm720_switch_mm(unsigned long pgd_phys) ··· 67 60 * Purpose : Perform a task switch, saving the old process' state and restoring 68 61 * the new. 69 62 */ 70 - ENTRY(cpu_arm720_switch_mm) 63 + SYM_TYPED_FUNC_START(cpu_arm720_switch_mm) 71 64 #ifdef CONFIG_MMU 72 65 mov r1, #0 73 66 mcr p15, 0, r1, c7, c7, 0 @ invalidate cache ··· 75 68 mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4) 76 69 #endif 77 70 ret lr 71 + SYM_FUNC_END(cpu_arm720_switch_mm) 78 72 79 73 /* 80 74 * Function: arm720_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext) ··· 84 76 * Purpose : Set a PTE and flush it out of any WB cache 85 77 */ 86 78 .align 5 87 - ENTRY(cpu_arm720_set_pte_ext) 79 + SYM_TYPED_FUNC_START(cpu_arm720_set_pte_ext) 88 80 #ifdef CONFIG_MMU 89 81 armv3_set_pte_ext wc_disable=0 90 82 #endif 91 83 ret lr 84 + SYM_FUNC_END(cpu_arm720_set_pte_ext) 92 85 93 86 /* 94 87 * Function: arm720_reset ··· 97 88 * Notes : This sets up everything for a reset 98 89 */ 99 90 .pushsection .idmap.text, "ax" 100 - ENTRY(cpu_arm720_reset) 91 + SYM_TYPED_FUNC_START(cpu_arm720_reset) 101 92 mov ip, #0 102 93 mcr p15, 0, ip, c7, c7, 0 @ invalidate cache 103 94 #ifdef CONFIG_MMU ··· 108 99 bic ip, ip, #0x2100 @ ..v....s........ 109 100 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 110 101 ret r0 111 - ENDPROC(cpu_arm720_reset) 102 + SYM_FUNC_END(cpu_arm720_reset) 112 103 .popsection 113 104 114 105 .type __arm710_setup, #function
+19 -7
arch/arm/mm/proc-arm740.S
··· 6 6 */ 7 7 #include <linux/linkage.h> 8 8 #include <linux/init.h> 9 + #include <linux/cfi_types.h> 9 10 #include <linux/pgtable.h> 10 11 #include <asm/assembler.h> 11 12 #include <asm/asm-offsets.h> ··· 25 24 * 26 25 * These are not required. 27 26 */ 28 - ENTRY(cpu_arm740_proc_init) 29 - ENTRY(cpu_arm740_do_idle) 30 - ENTRY(cpu_arm740_dcache_clean_area) 31 - ENTRY(cpu_arm740_switch_mm) 27 + SYM_TYPED_FUNC_START(cpu_arm740_proc_init) 32 28 ret lr 29 + SYM_FUNC_END(cpu_arm740_proc_init) 30 + 31 + SYM_TYPED_FUNC_START(cpu_arm740_do_idle) 32 + ret lr 33 + SYM_FUNC_END(cpu_arm740_do_idle) 34 + 35 + SYM_TYPED_FUNC_START(cpu_arm740_dcache_clean_area) 36 + ret lr 37 + SYM_FUNC_END(cpu_arm740_dcache_clean_area) 38 + 39 + SYM_TYPED_FUNC_START(cpu_arm740_switch_mm) 40 + ret lr 41 + SYM_FUNC_END(cpu_arm740_switch_mm) 33 42 34 43 /* 35 44 * cpu_arm740_proc_fin() 36 45 */ 37 - ENTRY(cpu_arm740_proc_fin) 46 + SYM_TYPED_FUNC_START(cpu_arm740_proc_fin) 38 47 mrc p15, 0, r0, c1, c0, 0 39 48 bic r0, r0, #0x3f000000 @ bank/f/lock/s 40 49 bic r0, r0, #0x0000000c @ w-buffer/cache 41 50 mcr p15, 0, r0, c1, c0, 0 @ disable caches 42 51 ret lr 52 + SYM_FUNC_END(cpu_arm740_proc_fin) 43 53 44 54 /* 45 55 * cpu_arm740_reset(loc) ··· 58 46 * Notes : This sets up everything for a reset 59 47 */ 60 48 .pushsection .idmap.text, "ax" 61 - ENTRY(cpu_arm740_reset) 49 + SYM_TYPED_FUNC_START(cpu_arm740_reset) 62 50 mov ip, #0 63 51 mcr p15, 0, ip, c7, c0, 0 @ invalidate cache 64 52 mrc p15, 0, ip, c1, c0, 0 @ get ctrl register 65 53 bic ip, ip, #0x0000000c @ ............wc.. 66 54 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 67 55 ret r0 68 - ENDPROC(cpu_arm740_reset) 56 + SYM_FUNC_END(cpu_arm740_reset) 69 57 .popsection 70 58 71 59 .type __arm740_setup, #function
+23 -11
arch/arm/mm/proc-arm7tdmi.S
··· 6 6 */ 7 7 #include <linux/linkage.h> 8 8 #include <linux/init.h> 9 + #include <linux/cfi_types.h> 9 10 #include <linux/pgtable.h> 10 11 #include <asm/assembler.h> 11 12 #include <asm/asm-offsets.h> ··· 24 23 * cpu_arm7tdmi_switch_mm() 25 24 * 26 25 * These are not required. 27 - */ 28 - ENTRY(cpu_arm7tdmi_proc_init) 29 - ENTRY(cpu_arm7tdmi_do_idle) 30 - ENTRY(cpu_arm7tdmi_dcache_clean_area) 31 - ENTRY(cpu_arm7tdmi_switch_mm) 32 - ret lr 26 + */ 27 + SYM_TYPED_FUNC_START(cpu_arm7tdmi_proc_init) 28 + ret lr 29 + SYM_FUNC_END(cpu_arm7tdmi_proc_init) 30 + 31 + SYM_TYPED_FUNC_START(cpu_arm7tdmi_do_idle) 32 + ret lr 33 + SYM_FUNC_END(cpu_arm7tdmi_do_idle) 34 + 35 + SYM_TYPED_FUNC_START(cpu_arm7tdmi_dcache_clean_area) 36 + ret lr 37 + SYM_FUNC_END(cpu_arm7tdmi_dcache_clean_area) 38 + 39 + SYM_TYPED_FUNC_START(cpu_arm7tdmi_switch_mm) 40 + ret lr 41 + SYM_FUNC_END(cpu_arm7tdmi_switch_mm) 33 42 34 43 /* 35 44 * cpu_arm7tdmi_proc_fin() 36 - */ 37 - ENTRY(cpu_arm7tdmi_proc_fin) 38 - ret lr 45 + */ 46 + SYM_TYPED_FUNC_START(cpu_arm7tdmi_proc_fin) 47 + ret lr 48 + SYM_FUNC_END(cpu_arm7tdmi_proc_fin) 39 49 40 50 /* 41 51 * Function: cpu_arm7tdmi_reset(loc) ··· 54 42 * Purpose : Sets up everything for a reset and jump to the location for soft reset. 55 43 */ 56 44 .pushsection .idmap.text, "ax" 57 - ENTRY(cpu_arm7tdmi_reset) 45 + SYM_TYPED_FUNC_START(cpu_arm7tdmi_reset) 58 46 ret r0 59 - ENDPROC(cpu_arm7tdmi_reset) 47 + SYM_FUNC_END(cpu_arm7tdmi_reset) 60 48 .popsection 61 49 62 50 .type __arm7tdmi_setup, #function
+18 -13
arch/arm/mm/proc-arm920.S
··· 49 49 /* 50 50 * cpu_arm920_proc_init() 51 51 */ 52 - ENTRY(cpu_arm920_proc_init) 52 + SYM_TYPED_FUNC_START(cpu_arm920_proc_init) 53 53 ret lr 54 + SYM_FUNC_END(cpu_arm920_proc_init) 54 55 55 56 /* 56 57 * cpu_arm920_proc_fin() 57 58 */ 58 - ENTRY(cpu_arm920_proc_fin) 59 + SYM_TYPED_FUNC_START(cpu_arm920_proc_fin) 59 60 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 60 61 bic r0, r0, #0x1000 @ ...i............ 61 62 bic r0, r0, #0x000e @ ............wca. 62 63 mcr p15, 0, r0, c1, c0, 0 @ disable caches 63 64 ret lr 65 + SYM_FUNC_END(cpu_arm920_proc_fin) 64 66 65 67 /* 66 68 * cpu_arm920_reset(loc) ··· 75 73 */ 76 74 .align 5 77 75 .pushsection .idmap.text, "ax" 78 - ENTRY(cpu_arm920_reset) 76 + SYM_TYPED_FUNC_START(cpu_arm920_reset) 79 77 mov ip, #0 80 78 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 81 79 mcr p15, 0, ip, c7, c10, 4 @ drain WB ··· 87 85 bic ip, ip, #0x1100 @ ...i...s........ 88 86 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 89 87 ret r0 90 - ENDPROC(cpu_arm920_reset) 88 + SYM_FUNC_END(cpu_arm920_reset) 91 89 .popsection 92 90 93 91 /* 94 92 * cpu_arm920_do_idle() 95 93 */ 96 94 .align 5 97 - ENTRY(cpu_arm920_do_idle) 95 + SYM_TYPED_FUNC_START(cpu_arm920_do_idle) 98 96 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 99 97 ret lr 100 - 98 + SYM_FUNC_END(cpu_arm920_do_idle) 101 99 102 100 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 103 101 ··· 316 314 #endif /* !CONFIG_CPU_DCACHE_WRITETHROUGH */ 317 315 318 316 319 - ENTRY(cpu_arm920_dcache_clean_area) 317 + SYM_TYPED_FUNC_START(cpu_arm920_dcache_clean_area) 320 318 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 321 319 add r0, r0, #CACHE_DLINESIZE 322 320 subs r1, r1, #CACHE_DLINESIZE 323 321 bhi 1b 324 322 ret lr 323 + SYM_FUNC_END(cpu_arm920_dcache_clean_area) 325 324 326 325 /* =============================== PageTable ============================== */ 327 326 ··· 334 331 * pgd: new page tables 335 332 */ 336 333 .align 5 337 - ENTRY(cpu_arm920_switch_mm) 334 + SYM_TYPED_FUNC_START(cpu_arm920_switch_mm) 338 335 #ifdef CONFIG_MMU 339 336 mov ip, #0 340 337 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH ··· 358 355 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 359 356 #endif 360 357 ret lr 358 + SYM_FUNC_END(cpu_arm920_switch_mm) 361 359 362 360 /* 363 361 * cpu_arm920_set_pte(ptep, pte, ext) ··· 366 362 * Set a PTE and flush it out 367 363 */ 368 364 .align 5 369 - ENTRY(cpu_arm920_set_pte_ext) 365 + SYM_TYPED_FUNC_START(cpu_arm920_set_pte_ext) 370 366 #ifdef CONFIG_MMU 371 367 armv3_set_pte_ext 372 368 mov r0, r0 ··· 374 370 mcr p15, 0, r0, c7, c10, 4 @ drain WB 375 371 #endif 376 372 ret lr 373 + SYM_FUNC_END(cpu_arm920_set_pte_ext) 377 374 378 375 /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 379 376 .globl cpu_arm920_suspend_size 380 377 .equ cpu_arm920_suspend_size, 4 * 3 381 378 #ifdef CONFIG_ARM_CPU_SUSPEND 382 - ENTRY(cpu_arm920_do_suspend) 379 + SYM_TYPED_FUNC_START(cpu_arm920_do_suspend) 383 380 stmfd sp!, {r4 - r6, lr} 384 381 mrc p15, 0, r4, c13, c0, 0 @ PID 385 382 mrc p15, 0, r5, c3, c0, 0 @ Domain ID 386 383 mrc p15, 0, r6, c1, c0, 0 @ Control register 387 384 stmia r0, {r4 - r6} 388 385 ldmfd sp!, {r4 - r6, pc} 389 - ENDPROC(cpu_arm920_do_suspend) 386 + SYM_FUNC_END(cpu_arm920_do_suspend) 390 387 391 - ENTRY(cpu_arm920_do_resume) 388 + SYM_TYPED_FUNC_START(cpu_arm920_do_resume) 392 389 mov ip, #0 393 390 mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs 394 391 mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches ··· 399 394 mcr p15, 0, r1, c2, c0, 0 @ TTB address 400 395 mov r0, r6 @ control register 401 396 b cpu_resume_mmu 402 - ENDPROC(cpu_arm920_do_resume) 397 + SYM_FUNC_END(cpu_arm920_do_resume) 403 398 #endif 404 399 405 400 .type __arm920_setup, #function
+14 -9
arch/arm/mm/proc-arm922.S
··· 51 51 /* 52 52 * cpu_arm922_proc_init() 53 53 */ 54 - ENTRY(cpu_arm922_proc_init) 54 + SYM_TYPED_FUNC_START(cpu_arm922_proc_init) 55 55 ret lr 56 + SYM_FUNC_END(cpu_arm922_proc_init) 56 57 57 58 /* 58 59 * cpu_arm922_proc_fin() 59 60 */ 60 - ENTRY(cpu_arm922_proc_fin) 61 + SYM_TYPED_FUNC_START(cpu_arm922_proc_fin) 61 62 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 62 63 bic r0, r0, #0x1000 @ ...i............ 63 64 bic r0, r0, #0x000e @ ............wca. 64 65 mcr p15, 0, r0, c1, c0, 0 @ disable caches 65 66 ret lr 67 + SYM_FUNC_END(cpu_arm922_proc_fin) 66 68 67 69 /* 68 70 * cpu_arm922_reset(loc) ··· 77 75 */ 78 76 .align 5 79 77 .pushsection .idmap.text, "ax" 80 - ENTRY(cpu_arm922_reset) 78 + SYM_TYPED_FUNC_START(cpu_arm922_reset) 81 79 mov ip, #0 82 80 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 83 81 mcr p15, 0, ip, c7, c10, 4 @ drain WB ··· 89 87 bic ip, ip, #0x1100 @ ...i...s........ 90 88 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 91 89 ret r0 92 - ENDPROC(cpu_arm922_reset) 90 + SYM_FUNC_END(cpu_arm922_reset) 93 91 .popsection 94 92 95 93 /* 96 94 * cpu_arm922_do_idle() 97 95 */ 98 96 .align 5 99 - ENTRY(cpu_arm922_do_idle) 97 + SYM_TYPED_FUNC_START(cpu_arm922_do_idle) 100 98 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 101 99 ret lr 102 - 100 + SYM_FUNC_END(cpu_arm922_do_idle) 103 101 104 102 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 105 103 ··· 317 315 318 316 #endif /* !CONFIG_CPU_DCACHE_WRITETHROUGH */ 319 317 320 - ENTRY(cpu_arm922_dcache_clean_area) 318 + SYM_TYPED_FUNC_START(cpu_arm922_dcache_clean_area) 321 319 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 322 320 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 323 321 add r0, r0, #CACHE_DLINESIZE ··· 325 323 bhi 1b 326 324 #endif 327 325 ret lr 326 + SYM_FUNC_END(cpu_arm922_dcache_clean_area) 328 327 329 328 /* =============================== PageTable ============================== */ 330 329 ··· 337 334 * pgd: new page tables 338 335 */ 339 336 .align 5 340 - ENTRY(cpu_arm922_switch_mm) 337 + SYM_TYPED_FUNC_START(cpu_arm922_switch_mm) 341 338 #ifdef CONFIG_MMU 342 339 mov ip, #0 343 340 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH ··· 361 358 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 362 359 #endif 363 360 ret lr 361 + SYM_FUNC_END(cpu_arm922_switch_mm) 364 362 365 363 /* 366 364 * cpu_arm922_set_pte_ext(ptep, pte, ext) ··· 369 365 * Set a PTE and flush it out 370 366 */ 371 367 .align 5 372 - ENTRY(cpu_arm922_set_pte_ext) 368 + SYM_TYPED_FUNC_START(cpu_arm922_set_pte_ext) 373 369 #ifdef CONFIG_MMU 374 370 armv3_set_pte_ext 375 371 mov r0, r0 ··· 377 373 mcr p15, 0, r0, c7, c10, 4 @ drain WB 378 374 #endif /* CONFIG_MMU */ 379 375 ret lr 376 + SYM_FUNC_END(cpu_arm922_set_pte_ext) 380 377 381 378 .type __arm922_setup, #function 382 379 __arm922_setup:
+14 -8
arch/arm/mm/proc-arm925.S
··· 72 72 /* 73 73 * cpu_arm925_proc_init() 74 74 */ 75 - ENTRY(cpu_arm925_proc_init) 75 + SYM_TYPED_FUNC_START(cpu_arm925_proc_init) 76 76 ret lr 77 + SYM_FUNC_END(cpu_arm925_proc_init) 77 78 78 79 /* 79 80 * cpu_arm925_proc_fin() 80 81 */ 81 - ENTRY(cpu_arm925_proc_fin) 82 + SYM_TYPED_FUNC_START(cpu_arm925_proc_fin) 82 83 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 83 84 bic r0, r0, #0x1000 @ ...i............ 84 85 bic r0, r0, #0x000e @ ............wca. 85 86 mcr p15, 0, r0, c1, c0, 0 @ disable caches 86 87 ret lr 88 + SYM_FUNC_END(cpu_arm925_proc_fin) 87 89 88 90 /* 89 91 * cpu_arm925_reset(loc) ··· 98 96 */ 99 97 .align 5 100 98 .pushsection .idmap.text, "ax" 101 - ENTRY(cpu_arm925_reset) 99 + SYM_TYPED_FUNC_START(cpu_arm925_reset) 102 100 /* Send software reset to MPU and DSP */ 103 101 mov ip, #0xff000000 104 102 orr ip, ip, #0x00fe0000 105 103 orr ip, ip, #0x0000ce00 106 104 mov r4, #1 107 105 strh r4, [ip, #0x10] 108 - ENDPROC(cpu_arm925_reset) 106 + SYM_FUNC_END(cpu_arm925_reset) 109 107 .popsection 110 108 111 109 mov ip, #0 ··· 126 124 * Called with IRQs disabled 127 125 */ 128 126 .align 10 129 - ENTRY(cpu_arm925_do_idle) 127 + SYM_TYPED_FUNC_START(cpu_arm925_do_idle) 130 128 mov r0, #0 131 129 mrc p15, 0, r1, c1, c0, 0 @ Read control register 132 130 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer ··· 135 133 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 136 134 mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable 137 135 ret lr 136 + SYM_FUNC_END(cpu_arm925_do_idle) 138 137 139 138 /* 140 139 * flush_icache_all() ··· 371 368 ret lr 372 369 SYM_FUNC_END(arm925_dma_unmap_area) 373 370 374 - ENTRY(cpu_arm925_dcache_clean_area) 371 + SYM_TYPED_FUNC_START(cpu_arm925_dcache_clean_area) 375 372 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 376 373 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 377 374 add r0, r0, #CACHE_DLINESIZE ··· 380 377 #endif 381 378 mcr p15, 0, r0, c7, c10, 4 @ drain WB 382 379 ret lr 380 + SYM_FUNC_END(cpu_arm925_dcache_clean_area) 383 381 384 382 /* =============================== PageTable ============================== */ 385 383 ··· 392 388 * pgd: new page tables 393 389 */ 394 390 .align 5 395 - ENTRY(cpu_arm925_switch_mm) 391 + SYM_TYPED_FUNC_START(cpu_arm925_switch_mm) 396 392 #ifdef CONFIG_MMU 397 393 mov ip, #0 398 394 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH ··· 410 406 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 411 407 #endif 412 408 ret lr 409 + SYM_FUNC_END(cpu_arm925_switch_mm) 413 410 414 411 /* 415 412 * cpu_arm925_set_pte_ext(ptep, pte, ext) ··· 418 413 * Set a PTE and flush it out 419 414 */ 420 415 .align 5 421 - ENTRY(cpu_arm925_set_pte_ext) 416 + SYM_TYPED_FUNC_START(cpu_arm925_set_pte_ext) 422 417 #ifdef CONFIG_MMU 423 418 armv3_set_pte_ext 424 419 mov r0, r0 ··· 428 423 mcr p15, 0, r0, c7, c10, 4 @ drain WB 429 424 #endif /* CONFIG_MMU */ 430 425 ret lr 426 + SYM_FUNC_END(cpu_arm925_set_pte_ext) 431 427 432 428 .type __arm925_setup, #function 433 429 __arm925_setup:
+19 -12
arch/arm/mm/proc-arm926.S
··· 41 41 /* 42 42 * cpu_arm926_proc_init() 43 43 */ 44 - ENTRY(cpu_arm926_proc_init) 44 + SYM_TYPED_FUNC_START(cpu_arm926_proc_init) 45 45 ret lr 46 + SYM_FUNC_END(cpu_arm926_proc_init) 46 47 47 48 /* 48 49 * cpu_arm926_proc_fin() 49 50 */ 50 - ENTRY(cpu_arm926_proc_fin) 51 + SYM_TYPED_FUNC_START(cpu_arm926_proc_fin) 51 52 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 52 53 bic r0, r0, #0x1000 @ ...i............ 53 54 bic r0, r0, #0x000e @ ............wca. 54 55 mcr p15, 0, r0, c1, c0, 0 @ disable caches 55 56 ret lr 57 + SYM_FUNC_END(cpu_arm926_proc_fin) 56 58 57 59 /* 58 60 * cpu_arm926_reset(loc) ··· 67 65 */ 68 66 .align 5 69 67 .pushsection .idmap.text, "ax" 70 - ENTRY(cpu_arm926_reset) 68 + SYM_TYPED_FUNC_START(cpu_arm926_reset) 71 69 mov ip, #0 72 70 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 73 71 mcr p15, 0, ip, c7, c10, 4 @ drain WB ··· 79 77 bic ip, ip, #0x1100 @ ...i...s........ 80 78 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 81 79 ret r0 82 - ENDPROC(cpu_arm926_reset) 80 + SYM_FUNC_END(cpu_arm926_reset) 83 81 .popsection 84 82 85 83 /* ··· 88 86 * Called with IRQs disabled 89 87 */ 90 88 .align 10 91 - ENTRY(cpu_arm926_do_idle) 89 + SYM_TYPED_FUNC_START(cpu_arm926_do_idle) 92 90 mov r0, #0 93 91 mrc p15, 0, r1, c1, c0, 0 @ Read control register 94 92 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer ··· 101 99 mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable 102 100 msr cpsr_c, r3 @ Restore FIQ state 103 101 ret lr 102 + SYM_FUNC_END(cpu_arm926_do_idle) 104 103 105 104 /* 106 105 * flush_icache_all() ··· 334 331 ret lr 335 332 SYM_FUNC_END(arm926_dma_unmap_area) 336 333 337 - ENTRY(cpu_arm926_dcache_clean_area) 334 + SYM_TYPED_FUNC_START(cpu_arm926_dcache_clean_area) 338 335 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 339 336 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 340 337 add r0, r0, #CACHE_DLINESIZE ··· 343 340 #endif 344 341 mcr p15, 0, r0, c7, c10, 4 @ drain WB 345 342 ret lr 343 + SYM_FUNC_END(cpu_arm926_dcache_clean_area) 346 344 347 345 /* =============================== PageTable ============================== */ 348 346 ··· 355 351 * pgd: new page tables 356 352 */ 357 353 .align 5 358 - ENTRY(cpu_arm926_switch_mm) 354 + 355 + SYM_TYPED_FUNC_START(cpu_arm926_switch_mm) 359 356 #ifdef CONFIG_MMU 360 357 mov ip, #0 361 358 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH ··· 372 367 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 373 368 #endif 374 369 ret lr 370 + SYM_FUNC_END(cpu_arm926_switch_mm) 375 371 376 372 /* 377 373 * cpu_arm926_set_pte_ext(ptep, pte, ext) ··· 380 374 * Set a PTE and flush it out 381 375 */ 382 376 .align 5 383 - ENTRY(cpu_arm926_set_pte_ext) 377 + SYM_TYPED_FUNC_START(cpu_arm926_set_pte_ext) 384 378 #ifdef CONFIG_MMU 385 379 armv3_set_pte_ext 386 380 mov r0, r0 ··· 390 384 mcr p15, 0, r0, c7, c10, 4 @ drain WB 391 385 #endif 392 386 ret lr 387 + SYM_FUNC_END(cpu_arm926_set_pte_ext) 393 388 394 389 /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 395 390 .globl cpu_arm926_suspend_size 396 391 .equ cpu_arm926_suspend_size, 4 * 3 397 392 #ifdef CONFIG_ARM_CPU_SUSPEND 398 - ENTRY(cpu_arm926_do_suspend) 393 + SYM_TYPED_FUNC_START(cpu_arm926_do_suspend) 399 394 stmfd sp!, {r4 - r6, lr} 400 395 mrc p15, 0, r4, c13, c0, 0 @ PID 401 396 mrc p15, 0, r5, c3, c0, 0 @ Domain ID 402 397 mrc p15, 0, r6, c1, c0, 0 @ Control register 403 398 stmia r0, {r4 - r6} 404 399 ldmfd sp!, {r4 - r6, pc} 405 - ENDPROC(cpu_arm926_do_suspend) 400 + SYM_FUNC_END(cpu_arm926_do_suspend) 406 401 407 - ENTRY(cpu_arm926_do_resume) 402 + SYM_TYPED_FUNC_START(cpu_arm926_do_resume) 408 403 mov ip, #0 409 404 mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs 410 405 mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches ··· 415 408 mcr p15, 0, r1, c2, c0, 0 @ TTB address 416 409 mov r0, r6 @ control register 417 410 b cpu_resume_mmu 418 - ENDPROC(cpu_arm926_do_resume) 411 + SYM_FUNC_END(cpu_arm926_do_resume) 419 412 #endif 420 413 421 414 .type __arm926_setup, #function
+14 -7
arch/arm/mm/proc-arm940.S
··· 26 26 * 27 27 * These are not required. 28 28 */ 29 - ENTRY(cpu_arm940_proc_init) 30 - ENTRY(cpu_arm940_switch_mm) 29 + SYM_TYPED_FUNC_START(cpu_arm940_proc_init) 31 30 ret lr 31 + SYM_FUNC_END(cpu_arm940_proc_init) 32 + 33 + SYM_TYPED_FUNC_START(cpu_arm940_switch_mm) 34 + ret lr 35 + SYM_FUNC_END(cpu_arm940_switch_mm) 32 36 33 37 /* 34 38 * cpu_arm940_proc_fin() 35 39 */ 36 - ENTRY(cpu_arm940_proc_fin) 40 + SYM_TYPED_FUNC_START(cpu_arm940_proc_fin) 37 41 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 38 42 bic r0, r0, #0x00001000 @ i-cache 39 43 bic r0, r0, #0x00000004 @ d-cache 40 44 mcr p15, 0, r0, c1, c0, 0 @ disable caches 41 45 ret lr 46 + SYM_FUNC_END(cpu_arm940_proc_fin) 42 47 43 48 /* 44 49 * cpu_arm940_reset(loc) ··· 51 46 * Notes : This sets up everything for a reset 52 47 */ 53 48 .pushsection .idmap.text, "ax" 54 - ENTRY(cpu_arm940_reset) 49 + SYM_TYPED_FUNC_START(cpu_arm940_reset) 55 50 mov ip, #0 56 51 mcr p15, 0, ip, c7, c5, 0 @ flush I cache 57 52 mcr p15, 0, ip, c7, c6, 0 @ flush D cache ··· 61 56 bic ip, ip, #0x00001000 @ i-cache 62 57 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 63 58 ret r0 64 - ENDPROC(cpu_arm940_reset) 59 + SYM_FUNC_END(cpu_arm940_reset) 65 60 .popsection 66 61 67 62 /* 68 63 * cpu_arm940_do_idle() 69 64 */ 70 65 .align 5 71 - ENTRY(cpu_arm940_do_idle) 66 + SYM_TYPED_FUNC_START(cpu_arm940_do_idle) 72 67 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 73 68 ret lr 69 + SYM_FUNC_END(cpu_arm940_do_idle) 74 70 75 71 /* 76 72 * flush_icache_all() ··· 210 204 * - end - virtual end address 211 205 */ 212 206 arm940_dma_clean_range: 213 - ENTRY(cpu_arm940_dcache_clean_area) 207 + SYM_TYPED_FUNC_START(cpu_arm940_dcache_clean_area) 214 208 mov ip, #0 215 209 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 216 210 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments ··· 223 217 #endif 224 218 mcr p15, 0, ip, c7, c10, 4 @ drain WB 225 219 ret lr 220 + SYM_FUNC_END(cpu_arm940_dcache_clean_area) 226 221 227 222 /* 228 223 * dma_flush_range(start, end)
+14 -7
arch/arm/mm/proc-arm946.S
··· 33 33 * 34 34 * These are not required. 35 35 */ 36 - ENTRY(cpu_arm946_proc_init) 37 - ENTRY(cpu_arm946_switch_mm) 36 + SYM_TYPED_FUNC_START(cpu_arm946_proc_init) 38 37 ret lr 38 + SYM_FUNC_END(cpu_arm946_proc_init) 39 + 40 + SYM_TYPED_FUNC_START(cpu_arm946_switch_mm) 41 + ret lr 42 + SYM_FUNC_END(cpu_arm946_switch_mm) 39 43 40 44 /* 41 45 * cpu_arm946_proc_fin() 42 46 */ 43 - ENTRY(cpu_arm946_proc_fin) 47 + SYM_TYPED_FUNC_START(cpu_arm946_proc_fin) 44 48 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 45 49 bic r0, r0, #0x00001000 @ i-cache 46 50 bic r0, r0, #0x00000004 @ d-cache 47 51 mcr p15, 0, r0, c1, c0, 0 @ disable caches 48 52 ret lr 53 + SYM_FUNC_END(cpu_arm946_proc_fin) 49 54 50 55 /* 51 56 * cpu_arm946_reset(loc) ··· 58 53 * Notes : This sets up everything for a reset 59 54 */ 60 55 .pushsection .idmap.text, "ax" 61 - ENTRY(cpu_arm946_reset) 56 + SYM_TYPED_FUNC_START(cpu_arm946_reset) 62 57 mov ip, #0 63 58 mcr p15, 0, ip, c7, c5, 0 @ flush I cache 64 59 mcr p15, 0, ip, c7, c6, 0 @ flush D cache ··· 68 63 bic ip, ip, #0x00001000 @ i-cache 69 64 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 70 65 ret r0 71 - ENDPROC(cpu_arm946_reset) 66 + SYM_FUNC_END(cpu_arm946_reset) 72 67 .popsection 73 68 74 69 /* 75 70 * cpu_arm946_do_idle() 76 71 */ 77 72 .align 5 78 - ENTRY(cpu_arm946_do_idle) 73 + SYM_TYPED_FUNC_START(cpu_arm946_do_idle) 79 74 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 80 75 ret lr 76 + SYM_FUNC_END(cpu_arm946_do_idle) 81 77 82 78 /* 83 79 * flush_icache_all() ··· 318 312 ret lr 319 313 SYM_FUNC_END(arm946_dma_unmap_area) 320 314 321 - ENTRY(cpu_arm946_dcache_clean_area) 315 + SYM_TYPED_FUNC_START(cpu_arm946_dcache_clean_area) 322 316 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 323 317 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 324 318 add r0, r0, #CACHE_DLINESIZE ··· 327 321 #endif 328 322 mcr p15, 0, r0, c7, c10, 4 @ drain WB 329 323 ret lr 324 + SYM_FUNC_END(cpu_arm946_dcache_clean_area) 330 325 331 326 .type __arm946_setup, #function 332 327 __arm946_setup:
+19 -7
arch/arm/mm/proc-arm9tdmi.S
··· 6 6 */ 7 7 #include <linux/linkage.h> 8 8 #include <linux/init.h> 9 + #include <linux/cfi_types.h> 9 10 #include <linux/pgtable.h> 10 11 #include <asm/assembler.h> 11 12 #include <asm/asm-offsets.h> ··· 25 24 * 26 25 * These are not required. 27 26 */ 28 - ENTRY(cpu_arm9tdmi_proc_init) 29 - ENTRY(cpu_arm9tdmi_do_idle) 30 - ENTRY(cpu_arm9tdmi_dcache_clean_area) 31 - ENTRY(cpu_arm9tdmi_switch_mm) 27 + SYM_TYPED_FUNC_START(cpu_arm9tdmi_proc_init) 32 28 ret lr 29 + SYM_FUNC_END(cpu_arm9tdmi_proc_init) 30 + 31 + SYM_TYPED_FUNC_START(cpu_arm9tdmi_do_idle) 32 + ret lr 33 + SYM_FUNC_END(cpu_arm9tdmi_do_idle) 34 + 35 + SYM_TYPED_FUNC_START(cpu_arm9tdmi_dcache_clean_area) 36 + ret lr 37 + SYM_FUNC_END(cpu_arm9tdmi_dcache_clean_area) 38 + 39 + SYM_TYPED_FUNC_START(cpu_arm9tdmi_switch_mm) 40 + ret lr 41 + SYM_FUNC_END(cpu_arm9tdmi_switch_mm) 33 42 34 43 /* 35 44 * cpu_arm9tdmi_proc_fin() 36 45 */ 37 - ENTRY(cpu_arm9tdmi_proc_fin) 46 + SYM_TYPED_FUNC_START(cpu_arm9tdmi_proc_fin) 38 47 ret lr 48 + SYM_FUNC_END(cpu_arm9tdmi_proc_fin) 39 49 40 50 /* 41 51 * Function: cpu_arm9tdmi_reset(loc) ··· 54 42 * Purpose : Sets up everything for a reset and jump to the location for soft reset. 55 43 */ 56 44 .pushsection .idmap.text, "ax" 57 - ENTRY(cpu_arm9tdmi_reset) 45 + SYM_TYPED_FUNC_START(cpu_arm9tdmi_reset) 58 46 ret r0 59 - ENDPROC(cpu_arm9tdmi_reset) 47 + SYM_FUNC_END(cpu_arm9tdmi_reset) 60 48 .popsection 61 49 62 50 .type __arm9tdmi_setup, #function
+15 -9
arch/arm/mm/proc-fa526.S
··· 11 11 */ 12 12 #include <linux/linkage.h> 13 13 #include <linux/init.h> 14 + #include <linux/cfi_types.h> 14 15 #include <linux/pgtable.h> 15 16 #include <asm/assembler.h> 16 17 #include <asm/hwcap.h> ··· 27 26 /* 28 27 * cpu_fa526_proc_init() 29 28 */ 30 - ENTRY(cpu_fa526_proc_init) 29 + SYM_TYPED_FUNC_START(cpu_fa526_proc_init) 31 30 ret lr 31 + SYM_FUNC_END(cpu_fa526_proc_init) 32 32 33 33 /* 34 34 * cpu_fa526_proc_fin() 35 35 */ 36 - ENTRY(cpu_fa526_proc_fin) 36 + SYM_TYPED_FUNC_START(cpu_fa526_proc_fin) 37 37 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 38 38 bic r0, r0, #0x1000 @ ...i............ 39 39 bic r0, r0, #0x000e @ ............wca. ··· 42 40 nop 43 41 nop 44 42 ret lr 43 + SYM_FUNC_END(cpu_fa526_proc_fin) 45 44 46 45 /* 47 46 * cpu_fa526_reset(loc) ··· 55 52 */ 56 53 .align 4 57 54 .pushsection .idmap.text, "ax" 58 - ENTRY(cpu_fa526_reset) 55 + SYM_TYPED_FUNC_START(cpu_fa526_reset) 59 56 /* TODO: Use CP8 if possible... */ 60 57 mov ip, #0 61 58 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches ··· 71 68 nop 72 69 nop 73 70 ret r0 74 - ENDPROC(cpu_fa526_reset) 71 + SYM_FUNC_END(cpu_fa526_reset) 75 72 .popsection 76 73 77 74 /* 78 75 * cpu_fa526_do_idle() 79 76 */ 80 77 .align 4 81 - ENTRY(cpu_fa526_do_idle) 78 + SYM_TYPED_FUNC_START(cpu_fa526_do_idle) 82 79 ret lr 80 + SYM_FUNC_END(cpu_fa526_do_idle) 83 81 84 - 85 - ENTRY(cpu_fa526_dcache_clean_area) 82 + SYM_TYPED_FUNC_START(cpu_fa526_dcache_clean_area) 86 83 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 87 84 add r0, r0, #CACHE_DLINESIZE 88 85 subs r1, r1, #CACHE_DLINESIZE 89 86 bhi 1b 90 87 mcr p15, 0, r0, c7, c10, 4 @ drain WB 91 88 ret lr 89 + SYM_FUNC_END(cpu_fa526_dcache_clean_area) 92 90 93 91 /* =============================== PageTable ============================== */ 94 92 ··· 101 97 * pgd: new page tables 102 98 */ 103 99 .align 4 104 - ENTRY(cpu_fa526_switch_mm) 100 + SYM_TYPED_FUNC_START(cpu_fa526_switch_mm) 105 101 #ifdef CONFIG_MMU 106 102 mov ip, #0 107 103 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH ··· 117 113 mcr p15, 0, ip, c8, c7, 0 @ invalidate UTLB 118 114 #endif 119 115 ret lr 116 + SYM_FUNC_END(cpu_fa526_switch_mm) 120 117 121 118 /* 122 119 * cpu_fa526_set_pte_ext(ptep, pte, ext) ··· 125 120 * Set a PTE and flush it out 126 121 */ 127 122 .align 4 128 - ENTRY(cpu_fa526_set_pte_ext) 123 + SYM_TYPED_FUNC_START(cpu_fa526_set_pte_ext) 129 124 #ifdef CONFIG_MMU 130 125 armv3_set_pte_ext 131 126 mov r0, r0 ··· 134 129 mcr p15, 0, r0, c7, c10, 4 @ drain WB 135 130 #endif 136 131 ret lr 132 + SYM_FUNC_END(cpu_fa526_set_pte_ext) 137 133 138 134 .type __fa526_setup, #function 139 135 __fa526_setup:
+18 -12
arch/arm/mm/proc-feroceon.S
··· 44 44 /* 45 45 * cpu_feroceon_proc_init() 46 46 */ 47 - ENTRY(cpu_feroceon_proc_init) 47 + SYM_TYPED_FUNC_START(cpu_feroceon_proc_init) 48 48 mrc p15, 0, r0, c0, c0, 1 @ read cache type register 49 49 ldr r1, __cache_params 50 50 mov r2, #(16 << 5) ··· 62 62 str_l r1, VFP_arch_feroceon, r2 63 63 #endif 64 64 ret lr 65 + SYM_FUNC_END(cpu_feroceon_proc_init) 65 66 66 67 /* 67 68 * cpu_feroceon_proc_fin() 68 69 */ 69 - ENTRY(cpu_feroceon_proc_fin) 70 + SYM_TYPED_FUNC_START(cpu_feroceon_proc_fin) 70 71 #if defined(CONFIG_CACHE_FEROCEON_L2) && \ 71 72 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) 72 73 mov r0, #0 ··· 80 79 bic r0, r0, #0x000e @ ............wca. 81 80 mcr p15, 0, r0, c1, c0, 0 @ disable caches 82 81 ret lr 82 + SYM_FUNC_END(cpu_feroceon_proc_fin) 83 83 84 84 /* 85 85 * cpu_feroceon_reset(loc) ··· 93 91 */ 94 92 .align 5 95 93 .pushsection .idmap.text, "ax" 96 - ENTRY(cpu_feroceon_reset) 94 + SYM_TYPED_FUNC_START(cpu_feroceon_reset) 97 95 mov ip, #0 98 96 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 99 97 mcr p15, 0, ip, c7, c10, 4 @ drain WB ··· 105 103 bic ip, ip, #0x1100 @ ...i...s........ 106 104 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 107 105 ret r0 108 - ENDPROC(cpu_feroceon_reset) 106 + SYM_FUNC_END(cpu_feroceon_reset) 109 107 .popsection 110 108 111 109 /* ··· 114 112 * Called with IRQs disabled 115 113 */ 116 114 .align 5 117 - ENTRY(cpu_feroceon_do_idle) 115 + SYM_TYPED_FUNC_START(cpu_feroceon_do_idle) 118 116 mov r0, #0 119 117 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer 120 118 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 121 119 ret lr 120 + SYM_FUNC_END(cpu_feroceon_do_idle) 122 121 123 122 /* 124 123 * flush_icache_all() ··· 418 415 SYM_FUNC_END(feroceon_dma_unmap_area) 419 416 420 417 .align 5 421 - ENTRY(cpu_feroceon_dcache_clean_area) 418 + SYM_TYPED_FUNC_START(cpu_feroceon_dcache_clean_area) 422 419 #if defined(CONFIG_CACHE_FEROCEON_L2) && \ 423 420 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) 424 421 mov r2, r0 ··· 437 434 #endif 438 435 mcr p15, 0, r0, c7, c10, 4 @ drain WB 439 436 ret lr 437 + SYM_FUNC_END(cpu_feroceon_dcache_clean_area) 440 438 441 439 /* =============================== PageTable ============================== */ 442 440 ··· 449 445 * pgd: new page tables 450 446 */ 451 447 .align 5 452 - ENTRY(cpu_feroceon_switch_mm) 448 + SYM_TYPED_FUNC_START(cpu_feroceon_switch_mm) 453 449 #ifdef CONFIG_MMU 454 450 /* 455 451 * Note: we wish to call __flush_whole_cache but we need to preserve ··· 470 466 #else 471 467 ret lr 472 468 #endif 469 + SYM_FUNC_END(cpu_feroceon_switch_mm) 473 470 474 471 /* 475 472 * cpu_feroceon_set_pte_ext(ptep, pte, ext) ··· 478 473 * Set a PTE and flush it out 479 474 */ 480 475 .align 5 481 - ENTRY(cpu_feroceon_set_pte_ext) 476 + SYM_TYPED_FUNC_START(cpu_feroceon_set_pte_ext) 482 477 #ifdef CONFIG_MMU 483 478 armv3_set_pte_ext wc_disable=0 484 479 mov r0, r0 ··· 490 485 mcr p15, 0, r0, c7, c10, 4 @ drain WB 491 486 #endif 492 487 ret lr 488 + SYM_FUNC_END(cpu_feroceon_set_pte_ext) 493 489 494 490 /* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */ 495 491 .globl cpu_feroceon_suspend_size 496 492 .equ cpu_feroceon_suspend_size, 4 * 3 497 493 #ifdef CONFIG_ARM_CPU_SUSPEND 498 - ENTRY(cpu_feroceon_do_suspend) 494 + SYM_TYPED_FUNC_START(cpu_feroceon_do_suspend) 499 495 stmfd sp!, {r4 - r6, lr} 500 496 mrc p15, 0, r4, c13, c0, 0 @ PID 501 497 mrc p15, 0, r5, c3, c0, 0 @ Domain ID 502 498 mrc p15, 0, r6, c1, c0, 0 @ Control register 503 499 stmia r0, {r4 - r6} 504 500 ldmfd sp!, {r4 - r6, pc} 505 - ENDPROC(cpu_feroceon_do_suspend) 501 + SYM_FUNC_END(cpu_feroceon_do_suspend) 506 502 507 - ENTRY(cpu_feroceon_do_resume) 503 + SYM_TYPED_FUNC_START(cpu_feroceon_do_resume) 508 504 mov ip, #0 509 505 mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs 510 506 mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches ··· 515 509 mcr p15, 0, r1, c2, c0, 0 @ TTB address 516 510 mov r0, r6 @ control register 517 511 b cpu_resume_mmu 518 - ENDPROC(cpu_feroceon_do_resume) 512 + SYM_FUNC_END(cpu_feroceon_do_resume) 519 513 #endif 520 514 521 515 .type __feroceon_setup, #function
+18 -12
arch/arm/mm/proc-mohawk.S
··· 32 32 /* 33 33 * cpu_mohawk_proc_init() 34 34 */ 35 - ENTRY(cpu_mohawk_proc_init) 35 + SYM_TYPED_FUNC_START(cpu_mohawk_proc_init) 36 36 ret lr 37 + SYM_FUNC_END(cpu_mohawk_proc_init) 37 38 38 39 /* 39 40 * cpu_mohawk_proc_fin() 40 41 */ 41 - ENTRY(cpu_mohawk_proc_fin) 42 + SYM_TYPED_FUNC_START(cpu_mohawk_proc_fin) 42 43 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 43 44 bic r0, r0, #0x1800 @ ...iz........... 44 45 bic r0, r0, #0x0006 @ .............ca. 45 46 mcr p15, 0, r0, c1, c0, 0 @ disable caches 46 47 ret lr 48 + SYM_FUNC_END(cpu_mohawk_proc_fin) 47 49 48 50 /* 49 51 * cpu_mohawk_reset(loc) ··· 60 58 */ 61 59 .align 5 62 60 .pushsection .idmap.text, "ax" 63 - ENTRY(cpu_mohawk_reset) 61 + SYM_TYPED_FUNC_START(cpu_mohawk_reset) 64 62 mov ip, #0 65 63 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 66 64 mcr p15, 0, ip, c7, c10, 4 @ drain WB ··· 70 68 bic ip, ip, #0x1100 @ ...i...s........ 71 69 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 72 70 ret r0 73 - ENDPROC(cpu_mohawk_reset) 71 + SYM_FUNC_END(cpu_mohawk_reset) 74 72 .popsection 75 73 76 74 /* ··· 79 77 * Called with IRQs disabled 80 78 */ 81 79 .align 5 82 - ENTRY(cpu_mohawk_do_idle) 80 + SYM_TYPED_FUNC_START(cpu_mohawk_do_idle) 83 81 mov r0, #0 84 82 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 85 83 mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt 86 84 ret lr 85 + SYM_FUNC_END(cpu_mohawk_do_idle) 87 86 88 87 /* 89 88 * flush_icache_all() ··· 299 296 ret lr 300 297 SYM_FUNC_END(mohawk_dma_unmap_area) 301 298 302 - ENTRY(cpu_mohawk_dcache_clean_area) 299 + SYM_TYPED_FUNC_START(cpu_mohawk_dcache_clean_area) 303 300 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 304 301 add r0, r0, #CACHE_DLINESIZE 305 302 subs r1, r1, #CACHE_DLINESIZE 306 303 bhi 1b 307 304 mcr p15, 0, r0, c7, c10, 4 @ drain WB 308 305 ret lr 306 + SYM_FUNC_END(cpu_mohawk_dcache_clean_area) 309 307 310 308 /* 311 309 * cpu_mohawk_switch_mm(pgd) ··· 316 312 * pgd: new page tables 317 313 */ 318 314 .align 5 319 - ENTRY(cpu_mohawk_switch_mm) 315 + SYM_TYPED_FUNC_START(cpu_mohawk_switch_mm) 320 316 mov ip, #0 321 317 mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache 322 318 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache ··· 325 321 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 326 322 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 327 323 ret lr 324 + SYM_FUNC_END(cpu_mohawk_switch_mm) 328 325 329 326 /* 330 327 * cpu_mohawk_set_pte_ext(ptep, pte, ext) ··· 333 328 * Set a PTE and flush it out 334 329 */ 335 330 .align 5 336 - ENTRY(cpu_mohawk_set_pte_ext) 331 + SYM_TYPED_FUNC_START(cpu_mohawk_set_pte_ext) 337 332 #ifdef CONFIG_MMU 338 333 armv3_set_pte_ext 339 334 mov r0, r0 ··· 341 336 mcr p15, 0, r0, c7, c10, 4 @ drain WB 342 337 ret lr 343 338 #endif 339 + SYM_FUNC_END(cpu_mohawk_set_pte_ext) 344 340 345 341 .globl cpu_mohawk_suspend_size 346 342 .equ cpu_mohawk_suspend_size, 4 * 6 347 343 #ifdef CONFIG_ARM_CPU_SUSPEND 348 - ENTRY(cpu_mohawk_do_suspend) 344 + SYM_TYPED_FUNC_START(cpu_mohawk_do_suspend) 349 345 stmfd sp!, {r4 - r9, lr} 350 346 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 351 347 mrc p15, 0, r5, c15, c1, 0 @ CP access reg ··· 357 351 bic r4, r4, #2 @ clear frequency change bit 358 352 stmia r0, {r4 - r9} @ store cp regs 359 353 ldmia sp!, {r4 - r9, pc} 360 - ENDPROC(cpu_mohawk_do_suspend) 354 + SYM_FUNC_END(cpu_mohawk_do_suspend) 361 355 362 - ENTRY(cpu_mohawk_do_resume) 356 + SYM_TYPED_FUNC_START(cpu_mohawk_do_resume) 363 357 ldmia r0, {r4 - r9} @ load cp regs 364 358 mov ip, #0 365 359 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB ··· 375 369 mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg 376 370 mov r0, r9 @ control register 377 371 b cpu_resume_mmu 378 - ENDPROC(cpu_mohawk_do_resume) 372 + SYM_FUNC_END(cpu_mohawk_do_resume) 379 373 #endif 380 374 381 375 .type __mohawk_setup, #function
+15 -8
arch/arm/mm/proc-sa110.S
··· 12 12 */ 13 13 #include <linux/linkage.h> 14 14 #include <linux/init.h> 15 + #include <linux/cfi_types.h> 15 16 #include <linux/pgtable.h> 16 17 #include <asm/assembler.h> 17 18 #include <asm/asm-offsets.h> ··· 33 32 /* 34 33 * cpu_sa110_proc_init() 35 34 */ 36 - ENTRY(cpu_sa110_proc_init) 35 + SYM_TYPED_FUNC_START(cpu_sa110_proc_init) 37 36 mov r0, #0 38 37 mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching 39 38 ret lr 39 + SYM_FUNC_END(cpu_sa110_proc_init) 40 40 41 41 /* 42 42 * cpu_sa110_proc_fin() 43 43 */ 44 - ENTRY(cpu_sa110_proc_fin) 44 + SYM_TYPED_FUNC_START(cpu_sa110_proc_fin) 45 45 mov r0, #0 46 46 mcr p15, 0, r0, c15, c2, 2 @ Disable clock switching 47 47 mrc p15, 0, r0, c1, c0, 0 @ ctrl register ··· 50 48 bic r0, r0, #0x000e @ ............wca. 51 49 mcr p15, 0, r0, c1, c0, 0 @ disable caches 52 50 ret lr 51 + SYM_FUNC_END(cpu_sa110_proc_fin) 53 52 54 53 /* 55 54 * cpu_sa110_reset(loc) ··· 63 60 */ 64 61 .align 5 65 62 .pushsection .idmap.text, "ax" 66 - ENTRY(cpu_sa110_reset) 63 + SYM_TYPED_FUNC_START(cpu_sa110_reset) 67 64 mov ip, #0 68 65 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 69 66 mcr p15, 0, ip, c7, c10, 4 @ drain WB ··· 75 72 bic ip, ip, #0x1100 @ ...i...s........ 76 73 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 77 74 ret r0 78 - ENDPROC(cpu_sa110_reset) 75 + SYM_FUNC_END(cpu_sa110_reset) 79 76 .popsection 80 77 81 78 /* ··· 91 88 */ 92 89 .align 5 93 90 94 - ENTRY(cpu_sa110_do_idle) 91 + SYM_TYPED_FUNC_START(cpu_sa110_do_idle) 95 92 mcr p15, 0, ip, c15, c2, 2 @ disable clock switching 96 93 ldr r1, =UNCACHEABLE_ADDR @ load from uncacheable loc 97 94 ldr r1, [r1, #0] @ force switch to MCLK ··· 104 101 mov r0, r0 @ safety 105 102 mcr p15, 0, r0, c15, c1, 2 @ enable clock switching 106 103 ret lr 104 + SYM_FUNC_END(cpu_sa110_do_idle) 107 105 108 106 /* ================================= CACHE ================================ */ 109 107 ··· 117 113 * addr: cache-unaligned virtual address 118 114 */ 119 115 .align 5 120 - ENTRY(cpu_sa110_dcache_clean_area) 116 + SYM_TYPED_FUNC_START(cpu_sa110_dcache_clean_area) 121 117 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 122 118 add r0, r0, #DCACHELINESIZE 123 119 subs r1, r1, #DCACHELINESIZE 124 120 bhi 1b 125 121 ret lr 122 + SYM_FUNC_END(cpu_sa110_dcache_clean_area) 126 123 127 124 /* =============================== PageTable ============================== */ 128 125 ··· 135 130 * pgd: new page tables 136 131 */ 137 132 .align 5 138 - ENTRY(cpu_sa110_switch_mm) 133 + SYM_TYPED_FUNC_START(cpu_sa110_switch_mm) 139 134 #ifdef CONFIG_MMU 140 135 str lr, [sp, #-4]! 141 136 bl v4wb_flush_kern_cache_all @ clears IP ··· 145 140 #else 146 141 ret lr 147 142 #endif 143 + SYM_FUNC_END(cpu_sa110_switch_mm) 148 144 149 145 /* 150 146 * cpu_sa110_set_pte_ext(ptep, pte, ext) ··· 153 147 * Set a PTE and flush it out 154 148 */ 155 149 .align 5 156 - ENTRY(cpu_sa110_set_pte_ext) 150 + SYM_TYPED_FUNC_START(cpu_sa110_set_pte_ext) 157 151 #ifdef CONFIG_MMU 158 152 armv3_set_pte_ext wc_disable=0 159 153 mov r0, r0 ··· 161 155 mcr p15, 0, r0, c7, c10, 4 @ drain WB 162 156 #endif 163 157 ret lr 158 + SYM_FUNC_END(cpu_sa110_set_pte_ext) 164 159 165 160 .type __sa110_setup, #function 166 161 __sa110_setup:
+19 -12
arch/arm/mm/proc-sa1100.S
··· 17 17 */ 18 18 #include <linux/linkage.h> 19 19 #include <linux/init.h> 20 + #include <linux/cfi_types.h> 20 21 #include <linux/pgtable.h> 21 22 #include <asm/assembler.h> 22 23 #include <asm/asm-offsets.h> ··· 37 36 /* 38 37 * cpu_sa1100_proc_init() 39 38 */ 40 - ENTRY(cpu_sa1100_proc_init) 39 + SYM_TYPED_FUNC_START(cpu_sa1100_proc_init) 41 40 mov r0, #0 42 41 mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching 43 42 mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland 44 43 ret lr 44 + SYM_FUNC_END(cpu_sa1100_proc_init) 45 45 46 46 /* 47 47 * cpu_sa1100_proc_fin() ··· 51 49 * - Disable interrupts 52 50 * - Clean and turn off caches. 53 51 */ 54 - ENTRY(cpu_sa1100_proc_fin) 52 + SYM_TYPED_FUNC_START(cpu_sa1100_proc_fin) 55 53 mcr p15, 0, ip, c15, c2, 2 @ Disable clock switching 56 54 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 57 55 bic r0, r0, #0x1000 @ ...i............ 58 56 bic r0, r0, #0x000e @ ............wca. 59 57 mcr p15, 0, r0, c1, c0, 0 @ disable caches 60 58 ret lr 59 + SYM_FUNC_END(cpu_sa1100_proc_fin) 61 60 62 61 /* 63 62 * cpu_sa1100_reset(loc) ··· 71 68 */ 72 69 .align 5 73 70 .pushsection .idmap.text, "ax" 74 - ENTRY(cpu_sa1100_reset) 71 + SYM_TYPED_FUNC_START(cpu_sa1100_reset) 75 72 mov ip, #0 76 73 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 77 74 mcr p15, 0, ip, c7, c10, 4 @ drain WB ··· 83 80 bic ip, ip, #0x1100 @ ...i...s........ 84 81 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 85 82 ret r0 86 - ENDPROC(cpu_sa1100_reset) 83 + SYM_FUNC_END(cpu_sa1100_reset) 87 84 .popsection 88 85 89 86 /* ··· 98 95 * 3 = switch to fast processor clock 99 96 */ 100 97 .align 5 101 - ENTRY(cpu_sa1100_do_idle) 98 + SYM_TYPED_FUNC_START(cpu_sa1100_do_idle) 102 99 mov r0, r0 @ 4 nop padding 103 100 mov r0, r0 104 101 mov r0, r0 ··· 114 111 mov r0, r0 @ safety 115 112 mcr p15, 0, r0, c15, c1, 2 @ enable clock switching 116 113 ret lr 114 + SYM_FUNC_END(cpu_sa1100_do_idle) 117 115 118 116 /* ================================= CACHE ================================ */ 119 117 ··· 127 123 * addr: cache-unaligned virtual address 128 124 */ 129 125 .align 5 130 - ENTRY(cpu_sa1100_dcache_clean_area) 126 + SYM_TYPED_FUNC_START(cpu_sa1100_dcache_clean_area) 131 127 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 132 128 add r0, r0, #DCACHELINESIZE 133 129 subs r1, r1, #DCACHELINESIZE 134 130 bhi 1b 135 131 ret lr 132 + SYM_FUNC_END(cpu_sa1100_dcache_clean_area) 136 133 137 134 /* =============================== PageTable ============================== */ 138 135 ··· 145 140 * pgd: new page tables 146 141 */ 147 142 .align 5 148 - ENTRY(cpu_sa1100_switch_mm) 143 + SYM_TYPED_FUNC_START(cpu_sa1100_switch_mm) 149 144 #ifdef CONFIG_MMU 150 145 str lr, [sp, #-4]! 151 146 bl v4wb_flush_kern_cache_all @ clears IP ··· 156 151 #else 157 152 ret lr 158 153 #endif 154 + SYM_FUNC_END(cpu_sa1100_switch_mm) 159 155 160 156 /* 161 157 * cpu_sa1100_set_pte_ext(ptep, pte, ext) ··· 164 158 * Set a PTE and flush it out 165 159 */ 166 160 .align 5 167 - ENTRY(cpu_sa1100_set_pte_ext) 161 + SYM_TYPED_FUNC_START(cpu_sa1100_set_pte_ext) 168 162 #ifdef CONFIG_MMU 169 163 armv3_set_pte_ext wc_disable=0 170 164 mov r0, r0 ··· 172 166 mcr p15, 0, r0, c7, c10, 4 @ drain WB 173 167 #endif 174 168 ret lr 169 + SYM_FUNC_END(cpu_sa1100_set_pte_ext) 175 170 176 171 .globl cpu_sa1100_suspend_size 177 172 .equ cpu_sa1100_suspend_size, 4 * 3 178 173 #ifdef CONFIG_ARM_CPU_SUSPEND 179 - ENTRY(cpu_sa1100_do_suspend) 174 + SYM_TYPED_FUNC_START(cpu_sa1100_do_suspend) 180 175 stmfd sp!, {r4 - r6, lr} 181 176 mrc p15, 0, r4, c3, c0, 0 @ domain ID 182 177 mrc p15, 0, r5, c13, c0, 0 @ PID 183 178 mrc p15, 0, r6, c1, c0, 0 @ control reg 184 179 stmia r0, {r4 - r6} @ store cp regs 185 180 ldmfd sp!, {r4 - r6, pc} 186 - ENDPROC(cpu_sa1100_do_suspend) 181 + SYM_FUNC_END(cpu_sa1100_do_suspend) 187 182 188 - ENTRY(cpu_sa1100_do_resume) 183 + SYM_TYPED_FUNC_START(cpu_sa1100_do_resume) 189 184 ldmia r0, {r4 - r6} @ load cp regs 190 185 mov ip, #0 191 186 mcr p15, 0, ip, c8, c7, 0 @ flush I+D TLBs ··· 199 192 mcr p15, 0, r5, c13, c0, 0 @ PID 200 193 mov r0, r6 @ control register 201 194 b cpu_resume_mmu 202 - ENDPROC(cpu_sa1100_do_resume) 195 + SYM_FUNC_END(cpu_sa1100_do_resume) 203 196 #endif 204 197 205 198 .type __sa1100_setup, #function
+19 -12
arch/arm/mm/proc-v6.S
··· 8 8 * This is the "shell" of the ARMv6 processor support. 9 9 */ 10 10 #include <linux/init.h> 11 + #include <linux/cfi_types.h> 11 12 #include <linux/linkage.h> 12 13 #include <linux/pgtable.h> 13 14 #include <asm/assembler.h> ··· 35 34 36 35 .arch armv6 37 36 38 - ENTRY(cpu_v6_proc_init) 37 + SYM_TYPED_FUNC_START(cpu_v6_proc_init) 39 38 ret lr 39 + SYM_FUNC_END(cpu_v6_proc_init) 40 40 41 - ENTRY(cpu_v6_proc_fin) 41 + SYM_TYPED_FUNC_START(cpu_v6_proc_fin) 42 42 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 43 43 bic r0, r0, #0x1000 @ ...i............ 44 44 bic r0, r0, #0x0006 @ .............ca. 45 45 mcr p15, 0, r0, c1, c0, 0 @ disable caches 46 46 ret lr 47 + SYM_FUNC_END(cpu_v6_proc_fin) 47 48 48 49 /* 49 50 * cpu_v6_reset(loc) ··· 58 55 */ 59 56 .align 5 60 57 .pushsection .idmap.text, "ax" 61 - ENTRY(cpu_v6_reset) 58 + SYM_TYPED_FUNC_START(cpu_v6_reset) 62 59 mrc p15, 0, r1, c1, c0, 0 @ ctrl register 63 60 bic r1, r1, #0x1 @ ...............m 64 61 mcr p15, 0, r1, c1, c0, 0 @ disable MMU 65 62 mov r1, #0 66 63 mcr p15, 0, r1, c7, c5, 4 @ ISB 67 64 ret r0 68 - ENDPROC(cpu_v6_reset) 65 + SYM_FUNC_END(cpu_v6_reset) 69 66 .popsection 70 67 71 68 /* ··· 75 72 * 76 73 * IRQs are already disabled. 77 74 */ 78 - ENTRY(cpu_v6_do_idle) 75 + SYM_TYPED_FUNC_START(cpu_v6_do_idle) 79 76 mov r1, #0 80 77 mcr p15, 0, r1, c7, c10, 4 @ DWB - WFI may enter a low-power mode 81 78 mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt 82 79 ret lr 80 + SYM_FUNC_END(cpu_v6_do_idle) 83 81 84 - ENTRY(cpu_v6_dcache_clean_area) 82 + SYM_TYPED_FUNC_START(cpu_v6_dcache_clean_area) 85 83 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 86 84 add r0, r0, #D_CACHE_LINE_SIZE 87 85 subs r1, r1, #D_CACHE_LINE_SIZE 88 86 bhi 1b 89 87 ret lr 88 + SYM_FUNC_END(cpu_v6_dcache_clean_area) 90 89 91 90 /* 92 91 * cpu_v6_switch_mm(pgd_phys, tsk) ··· 100 95 * It is assumed that: 101 96 * - we are not using split page tables 102 97 */ 103 - ENTRY(cpu_v6_switch_mm) 98 + SYM_TYPED_FUNC_START(cpu_v6_switch_mm) 104 99 #ifdef CONFIG_MMU 105 100 mov r2, #0 106 101 mmid r1, r1 @ get mm->context.id ··· 118 113 mcr p15, 0, r1, c13, c0, 1 @ set context ID 119 114 #endif 120 115 ret lr 116 + SYM_FUNC_END(cpu_v6_switch_mm) 121 117 122 118 /* 123 119 * cpu_v6_set_pte_ext(ptep, pte, ext) ··· 132 126 */ 133 127 armv6_mt_table cpu_v6 134 128 135 - ENTRY(cpu_v6_set_pte_ext) 129 + SYM_TYPED_FUNC_START(cpu_v6_set_pte_ext) 136 130 #ifdef CONFIG_MMU 137 131 armv6_set_pte_ext cpu_v6 138 132 #endif 139 133 ret lr 134 + SYM_FUNC_END(cpu_v6_set_pte_ext) 140 135 141 136 /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ 142 137 .globl cpu_v6_suspend_size 143 138 .equ cpu_v6_suspend_size, 4 * 6 144 139 #ifdef CONFIG_ARM_CPU_SUSPEND 145 - ENTRY(cpu_v6_do_suspend) 140 + SYM_TYPED_FUNC_START(cpu_v6_do_suspend) 146 141 stmfd sp!, {r4 - r9, lr} 147 142 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID 148 143 #ifdef CONFIG_MMU ··· 155 148 mrc p15, 0, r9, c1, c0, 0 @ control register 156 149 stmia r0, {r4 - r9} 157 150 ldmfd sp!, {r4- r9, pc} 158 - ENDPROC(cpu_v6_do_suspend) 151 + SYM_FUNC_END(cpu_v6_do_suspend) 159 152 160 - ENTRY(cpu_v6_do_resume) 153 + SYM_TYPED_FUNC_START(cpu_v6_do_resume) 161 154 mov ip, #0 162 155 mcr p15, 0, ip, c7, c14, 0 @ clean+invalidate D cache 163 156 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache ··· 179 172 mcr p15, 0, ip, c7, c5, 4 @ ISB 180 173 mov r0, r9 @ control register 181 174 b cpu_resume_mmu 182 - ENDPROC(cpu_v6_do_resume) 175 + SYM_FUNC_END(cpu_v6_do_resume) 183 176 #endif 184 177 185 178 string cpu_v6_name, "ARMv6-compatible processor"
+4 -4
arch/arm/mm/proc-v7-2level.S
··· 40 40 * even on Cortex-A8 revisions not affected by 430973. 41 41 * If IBE is not set, the flush BTAC/BTB won't do anything. 42 42 */ 43 - ENTRY(cpu_v7_switch_mm) 43 + SYM_TYPED_FUNC_START(cpu_v7_switch_mm) 44 44 #ifdef CONFIG_MMU 45 45 mmid r1, r1 @ get mm->context.id 46 46 ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) ··· 59 59 isb 60 60 #endif 61 61 bx lr 62 - ENDPROC(cpu_v7_switch_mm) 62 + SYM_FUNC_END(cpu_v7_switch_mm) 63 63 64 64 /* 65 65 * cpu_v7_set_pte_ext(ptep, pte) ··· 71 71 * - pte - PTE value to store 72 72 * - ext - value for extended PTE bits 73 73 */ 74 - ENTRY(cpu_v7_set_pte_ext) 74 + SYM_TYPED_FUNC_START(cpu_v7_set_pte_ext) 75 75 #ifdef CONFIG_MMU 76 76 str r1, [r0] @ linux version 77 77 ··· 106 106 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte 107 107 #endif 108 108 bx lr 109 - ENDPROC(cpu_v7_set_pte_ext) 109 + SYM_FUNC_END(cpu_v7_set_pte_ext) 110 110 111 111 /* 112 112 * Memory region attributes with SCTLR.TRE=1
+4 -4
arch/arm/mm/proc-v7-3level.S
··· 42 42 * Set the translation table base pointer to be pgd_phys (physical address of 43 43 * the new TTB). 44 44 */ 45 - ENTRY(cpu_v7_switch_mm) 45 + SYM_TYPED_FUNC_START(cpu_v7_switch_mm) 46 46 #ifdef CONFIG_MMU 47 47 mmid r2, r2 48 48 asid r2, r2 ··· 51 51 isb 52 52 #endif 53 53 ret lr 54 - ENDPROC(cpu_v7_switch_mm) 54 + SYM_FUNC_END(cpu_v7_switch_mm) 55 55 56 56 #ifdef __ARMEB__ 57 57 #define rl r3 ··· 68 68 * - ptep - pointer to level 3 translation table entry 69 69 * - pte - PTE value to store (64-bit in r2 and r3) 70 70 */ 71 - ENTRY(cpu_v7_set_pte_ext) 71 + SYM_TYPED_FUNC_START(cpu_v7_set_pte_ext) 72 72 #ifdef CONFIG_MMU 73 73 tst rl, #L_PTE_VALID 74 74 beq 1f ··· 87 87 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte 88 88 #endif 89 89 ret lr 90 - ENDPROC(cpu_v7_set_pte_ext) 90 + SYM_FUNC_END(cpu_v7_set_pte_ext) 91 91 92 92 /* 93 93 * Memory region attributes for LPAE (defined in pgtable-3level.h):
+34 -32
arch/arm/mm/proc-v7.S
··· 7 7 * This is the "shell" of the ARMv7 processor support. 8 8 */ 9 9 #include <linux/arm-smccc.h> 10 + #include <linux/cfi_types.h> 10 11 #include <linux/init.h> 11 12 #include <linux/linkage.h> 12 13 #include <linux/pgtable.h> ··· 27 26 28 27 .arch armv7-a 29 28 30 - ENTRY(cpu_v7_proc_init) 29 + SYM_TYPED_FUNC_START(cpu_v7_proc_init) 31 30 ret lr 32 - ENDPROC(cpu_v7_proc_init) 31 + SYM_FUNC_END(cpu_v7_proc_init) 33 32 34 - ENTRY(cpu_v7_proc_fin) 33 + SYM_TYPED_FUNC_START(cpu_v7_proc_fin) 35 34 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 36 35 bic r0, r0, #0x1000 @ ...i............ 37 36 bic r0, r0, #0x0006 @ .............ca. 38 37 mcr p15, 0, r0, c1, c0, 0 @ disable caches 39 38 ret lr 40 - ENDPROC(cpu_v7_proc_fin) 39 + SYM_FUNC_END(cpu_v7_proc_fin) 41 40 42 41 /* 43 42 * cpu_v7_reset(loc, hyp) ··· 54 53 */ 55 54 .align 5 56 55 .pushsection .idmap.text, "ax" 57 - ENTRY(cpu_v7_reset) 56 + SYM_TYPED_FUNC_START(cpu_v7_reset) 58 57 mrc p15, 0, r2, c1, c0, 0 @ ctrl register 59 58 bic r2, r2, #0x1 @ ...............m 60 59 THUMB( bic r2, r2, #1 << 30 ) @ SCTLR.TE (Thumb exceptions) ··· 65 64 bne __hyp_soft_restart 66 65 #endif 67 66 bx r0 68 - ENDPROC(cpu_v7_reset) 67 + SYM_FUNC_END(cpu_v7_reset) 69 68 .popsection 70 69 71 70 /* ··· 75 74 * 76 75 * IRQs are already disabled. 77 76 */ 78 - ENTRY(cpu_v7_do_idle) 77 + SYM_TYPED_FUNC_START(cpu_v7_do_idle) 79 78 dsb @ WFI may enter a low-power mode 80 79 wfi 81 80 ret lr 82 - ENDPROC(cpu_v7_do_idle) 81 + SYM_FUNC_END(cpu_v7_do_idle) 83 82 84 - ENTRY(cpu_v7_dcache_clean_area) 83 + SYM_TYPED_FUNC_START(cpu_v7_dcache_clean_area) 85 84 ALT_SMP(W(nop)) @ MP extensions imply L1 PTW 86 85 ALT_UP_B(1f) 87 86 ret lr ··· 92 91 bhi 2b 93 92 dsb ishst 94 93 ret lr 95 - ENDPROC(cpu_v7_dcache_clean_area) 94 + SYM_FUNC_END(cpu_v7_dcache_clean_area) 96 95 97 96 #ifdef CONFIG_ARM_PSCI 98 97 .arch_extension sec 99 - ENTRY(cpu_v7_smc_switch_mm) 98 + SYM_TYPED_FUNC_START(cpu_v7_smc_switch_mm) 100 99 stmfd sp!, {r0 - r3} 101 100 movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1 102 101 movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1 103 102 smc #0 104 103 ldmfd sp!, {r0 - r3} 105 104 b cpu_v7_switch_mm 106 - ENDPROC(cpu_v7_smc_switch_mm) 105 + SYM_FUNC_END(cpu_v7_smc_switch_mm) 107 106 .arch_extension virt 108 - ENTRY(cpu_v7_hvc_switch_mm) 107 + SYM_TYPED_FUNC_START(cpu_v7_hvc_switch_mm) 109 108 stmfd sp!, {r0 - r3} 110 109 movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1 111 110 movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1 112 111 hvc #0 113 112 ldmfd sp!, {r0 - r3} 114 113 b cpu_v7_switch_mm 115 - ENDPROC(cpu_v7_hvc_switch_mm) 114 + SYM_FUNC_END(cpu_v7_hvc_switch_mm) 116 115 #endif 117 - ENTRY(cpu_v7_iciallu_switch_mm) 116 + 117 + SYM_TYPED_FUNC_START(cpu_v7_iciallu_switch_mm) 118 118 mov r3, #0 119 119 mcr p15, 0, r3, c7, c5, 0 @ ICIALLU 120 120 b cpu_v7_switch_mm 121 - ENDPROC(cpu_v7_iciallu_switch_mm) 122 - ENTRY(cpu_v7_bpiall_switch_mm) 121 + SYM_FUNC_END(cpu_v7_iciallu_switch_mm) 122 + SYM_TYPED_FUNC_START(cpu_v7_bpiall_switch_mm) 123 123 mov r3, #0 124 124 mcr p15, 0, r3, c7, c5, 6 @ flush BTAC/BTB 125 125 b cpu_v7_switch_mm 126 - ENDPROC(cpu_v7_bpiall_switch_mm) 126 + SYM_FUNC_END(cpu_v7_bpiall_switch_mm) 127 127 128 128 string cpu_v7_name, "ARMv7 Processor" 129 129 .align ··· 133 131 .globl cpu_v7_suspend_size 134 132 .equ cpu_v7_suspend_size, 4 * 9 135 133 #ifdef CONFIG_ARM_CPU_SUSPEND 136 - ENTRY(cpu_v7_do_suspend) 134 + SYM_TYPED_FUNC_START(cpu_v7_do_suspend) 137 135 stmfd sp!, {r4 - r11, lr} 138 136 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID 139 137 mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID ··· 152 150 mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control 153 151 stmia r0, {r5 - r11} 154 152 ldmfd sp!, {r4 - r11, pc} 155 - ENDPROC(cpu_v7_do_suspend) 153 + SYM_FUNC_END(cpu_v7_do_suspend) 156 154 157 - ENTRY(cpu_v7_do_resume) 155 + SYM_TYPED_FUNC_START(cpu_v7_do_resume) 158 156 mov ip, #0 159 157 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache 160 158 mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID ··· 188 186 dsb 189 187 mov r0, r8 @ control register 190 188 b cpu_resume_mmu 191 - ENDPROC(cpu_v7_do_resume) 189 + SYM_FUNC_END(cpu_v7_do_resume) 192 190 #endif 193 191 194 192 .globl cpu_ca9mp_suspend_size 195 193 .equ cpu_ca9mp_suspend_size, cpu_v7_suspend_size + 4 * 2 196 194 #ifdef CONFIG_ARM_CPU_SUSPEND 197 - ENTRY(cpu_ca9mp_do_suspend) 195 + SYM_TYPED_FUNC_START(cpu_ca9mp_do_suspend) 198 196 stmfd sp!, {r4 - r5} 199 197 mrc p15, 0, r4, c15, c0, 1 @ Diagnostic register 200 198 mrc p15, 0, r5, c15, c0, 0 @ Power register 201 199 stmia r0!, {r4 - r5} 202 200 ldmfd sp!, {r4 - r5} 203 201 b cpu_v7_do_suspend 204 - ENDPROC(cpu_ca9mp_do_suspend) 202 + SYM_FUNC_END(cpu_ca9mp_do_suspend) 205 203 206 - ENTRY(cpu_ca9mp_do_resume) 204 + SYM_TYPED_FUNC_START(cpu_ca9mp_do_resume) 207 205 ldmia r0!, {r4 - r5} 208 206 mrc p15, 0, r10, c15, c0, 1 @ Read Diagnostic register 209 207 teq r4, r10 @ Already restored? ··· 212 210 teq r5, r10 @ Already restored? 213 211 mcrne p15, 0, r5, c15, c0, 0 @ No, so restore it 214 212 b cpu_v7_do_resume 215 - ENDPROC(cpu_ca9mp_do_resume) 213 + SYM_FUNC_END(cpu_ca9mp_do_resume) 216 214 #endif 217 215 218 216 #ifdef CONFIG_CPU_PJ4B ··· 222 220 globl_equ cpu_pj4b_proc_fin, cpu_v7_proc_fin 223 221 globl_equ cpu_pj4b_reset, cpu_v7_reset 224 222 #ifdef CONFIG_PJ4B_ERRATA_4742 225 - ENTRY(cpu_pj4b_do_idle) 223 + SYM_TYPED_FUNC_START(cpu_pj4b_do_idle) 226 224 dsb @ WFI may enter a low-power mode 227 225 wfi 228 226 dsb @barrier 229 227 ret lr 230 - ENDPROC(cpu_pj4b_do_idle) 228 + SYM_FUNC_END(cpu_pj4b_do_idle) 231 229 #else 232 230 globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle 233 231 #endif 234 232 globl_equ cpu_pj4b_dcache_clean_area, cpu_v7_dcache_clean_area 235 233 #ifdef CONFIG_ARM_CPU_SUSPEND 236 - ENTRY(cpu_pj4b_do_suspend) 234 + SYM_TYPED_FUNC_START(cpu_pj4b_do_suspend) 237 235 stmfd sp!, {r6 - r10} 238 236 mrc p15, 1, r6, c15, c1, 0 @ save CP15 - extra features 239 237 mrc p15, 1, r7, c15, c2, 0 @ save CP15 - Aux Func Modes Ctrl 0 ··· 243 241 stmia r0!, {r6 - r10} 244 242 ldmfd sp!, {r6 - r10} 245 243 b cpu_v7_do_suspend 246 - ENDPROC(cpu_pj4b_do_suspend) 244 + SYM_FUNC_END(cpu_pj4b_do_suspend) 247 245 248 - ENTRY(cpu_pj4b_do_resume) 246 + SYM_TYPED_FUNC_START(cpu_pj4b_do_resume) 249 247 ldmia r0!, {r6 - r10} 250 248 mcr p15, 1, r6, c15, c1, 0 @ restore CP15 - extra features 251 249 mcr p15, 1, r7, c15, c2, 0 @ restore CP15 - Aux Func Modes Ctrl 0 ··· 253 251 mcr p15, 1, r9, c15, c1, 1 @ restore CP15 - Aux Debug Modes Ctrl 1 254 252 mcr p15, 0, r10, c9, c14, 0 @ restore CP15 - PMC 255 253 b cpu_v7_do_resume 256 - ENDPROC(cpu_pj4b_do_resume) 254 + SYM_FUNC_END(cpu_pj4b_do_resume) 257 255 #endif 258 256 .globl cpu_pj4b_suspend_size 259 257 .equ cpu_pj4b_suspend_size, cpu_v7_suspend_size + 4 * 5
+21 -20
arch/arm/mm/proc-v7m.S
··· 8 8 * This is the "shell" of the ARMv7-M processor support. 9 9 */ 10 10 #include <linux/linkage.h> 11 + #include <linux/cfi_types.h> 11 12 #include <asm/assembler.h> 12 13 #include <asm/page.h> 13 14 #include <asm/v7m.h> 14 15 #include "proc-macros.S" 15 16 16 - ENTRY(cpu_v7m_proc_init) 17 + SYM_TYPED_FUNC_START(cpu_v7m_proc_init) 17 18 ret lr 18 - ENDPROC(cpu_v7m_proc_init) 19 + SYM_FUNC_END(cpu_v7m_proc_init) 19 20 20 - ENTRY(cpu_v7m_proc_fin) 21 + SYM_TYPED_FUNC_START(cpu_v7m_proc_fin) 21 22 ret lr 22 - ENDPROC(cpu_v7m_proc_fin) 23 + SYM_FUNC_END(cpu_v7m_proc_fin) 23 24 24 25 /* 25 26 * cpu_v7m_reset(loc) ··· 32 31 * - loc - location to jump to for soft reset 33 32 */ 34 33 .align 5 35 - ENTRY(cpu_v7m_reset) 34 + SYM_TYPED_FUNC_START(cpu_v7m_reset) 36 35 ret r0 37 - ENDPROC(cpu_v7m_reset) 36 + SYM_FUNC_END(cpu_v7m_reset) 38 37 39 38 /* 40 39 * cpu_v7m_do_idle() ··· 43 42 * 44 43 * IRQs are already disabled. 45 44 */ 46 - ENTRY(cpu_v7m_do_idle) 45 + SYM_TYPED_FUNC_START(cpu_v7m_do_idle) 47 46 wfi 48 47 ret lr 49 - ENDPROC(cpu_v7m_do_idle) 48 + SYM_FUNC_END(cpu_v7m_do_idle) 50 49 51 - ENTRY(cpu_v7m_dcache_clean_area) 50 + SYM_TYPED_FUNC_START(cpu_v7m_dcache_clean_area) 52 51 ret lr 53 - ENDPROC(cpu_v7m_dcache_clean_area) 52 + SYM_FUNC_END(cpu_v7m_dcache_clean_area) 54 53 55 54 /* 56 55 * There is no MMU, so here is nothing to do. 57 56 */ 58 - ENTRY(cpu_v7m_switch_mm) 57 + SYM_TYPED_FUNC_START(cpu_v7m_switch_mm) 59 58 ret lr 60 - ENDPROC(cpu_v7m_switch_mm) 59 + SYM_FUNC_END(cpu_v7m_switch_mm) 61 60 62 61 .globl cpu_v7m_suspend_size 63 62 .equ cpu_v7m_suspend_size, 0 64 63 65 64 #ifdef CONFIG_ARM_CPU_SUSPEND 66 - ENTRY(cpu_v7m_do_suspend) 65 + SYM_TYPED_FUNC_START(cpu_v7m_do_suspend) 67 66 ret lr 68 - ENDPROC(cpu_v7m_do_suspend) 67 + SYM_FUNC_END(cpu_v7m_do_suspend) 69 68 70 - ENTRY(cpu_v7m_do_resume) 69 + SYM_TYPED_FUNC_START(cpu_v7m_do_resume) 71 70 ret lr 72 - ENDPROC(cpu_v7m_do_resume) 71 + SYM_FUNC_END(cpu_v7m_do_resume) 73 72 #endif 74 73 75 - ENTRY(cpu_cm7_dcache_clean_area) 74 + SYM_TYPED_FUNC_START(cpu_cm7_dcache_clean_area) 76 75 dcache_line_size r2, r3 77 76 movw r3, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_DCCMVAC 78 77 movt r3, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_DCCMVAC ··· 83 82 bhi 1b 84 83 dsb 85 84 ret lr 86 - ENDPROC(cpu_cm7_dcache_clean_area) 85 + SYM_FUNC_END(cpu_cm7_dcache_clean_area) 87 86 88 - ENTRY(cpu_cm7_proc_fin) 87 + SYM_TYPED_FUNC_START(cpu_cm7_proc_fin) 89 88 movw r2, #:lower16:(BASEADDR_V7M_SCB + V7M_SCB_CCR) 90 89 movt r2, #:upper16:(BASEADDR_V7M_SCB + V7M_SCB_CCR) 91 90 ldr r0, [r2] 92 91 bic r0, r0, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC) 93 92 str r0, [r2] 94 93 ret lr 95 - ENDPROC(cpu_cm7_proc_fin) 94 + SYM_FUNC_END(cpu_cm7_proc_fin) 96 95 97 96 .section ".init.text", "ax" 98 97
+18 -12
arch/arm/mm/proc-xsc3.S
··· 80 80 * 81 81 * Nothing too exciting at the moment 82 82 */ 83 - ENTRY(cpu_xsc3_proc_init) 83 + SYM_TYPED_FUNC_START(cpu_xsc3_proc_init) 84 84 ret lr 85 + SYM_FUNC_END(cpu_xsc3_proc_init) 85 86 86 87 /* 87 88 * cpu_xsc3_proc_fin() 88 89 */ 89 - ENTRY(cpu_xsc3_proc_fin) 90 + SYM_TYPED_FUNC_START(cpu_xsc3_proc_fin) 90 91 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 91 92 bic r0, r0, #0x1800 @ ...IZ........... 92 93 bic r0, r0, #0x0006 @ .............CA. 93 94 mcr p15, 0, r0, c1, c0, 0 @ disable caches 94 95 ret lr 96 + SYM_FUNC_END(cpu_xsc3_proc_fin) 95 97 96 98 /* 97 99 * cpu_xsc3_reset(loc) ··· 106 104 */ 107 105 .align 5 108 106 .pushsection .idmap.text, "ax" 109 - ENTRY(cpu_xsc3_reset) 107 + SYM_TYPED_FUNC_START(cpu_xsc3_reset) 110 108 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 111 109 msr cpsr_c, r1 @ reset CPSR 112 110 mrc p15, 0, r1, c1, c0, 0 @ ctrl register ··· 120 118 @ already containing those two last instructions to survive. 121 119 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs 122 120 ret r0 123 - ENDPROC(cpu_xsc3_reset) 121 + SYM_FUNC_END(cpu_xsc3_reset) 124 122 .popsection 125 123 126 124 /* ··· 135 133 */ 136 134 .align 5 137 135 138 - ENTRY(cpu_xsc3_do_idle) 136 + SYM_TYPED_FUNC_START(cpu_xsc3_do_idle) 139 137 mov r0, #1 140 138 mcr p14, 0, r0, c7, c0, 0 @ go to idle 141 139 ret lr 140 + SYM_FUNC_END(cpu_xsc3_do_idle) 142 141 143 142 /* ================================= CACHE ================================ */ 144 143 ··· 344 341 ret lr 345 342 SYM_FUNC_END(xsc3_dma_unmap_area) 346 343 347 - ENTRY(cpu_xsc3_dcache_clean_area) 344 + SYM_TYPED_FUNC_START(cpu_xsc3_dcache_clean_area) 348 345 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line 349 346 add r0, r0, #CACHELINESIZE 350 347 subs r1, r1, #CACHELINESIZE 351 348 bhi 1b 352 349 ret lr 350 + SYM_FUNC_END(cpu_xsc3_dcache_clean_area) 353 351 354 352 /* =============================== PageTable ============================== */ 355 353 ··· 362 358 * pgd: new page tables 363 359 */ 364 360 .align 5 365 - ENTRY(cpu_xsc3_switch_mm) 361 + SYM_TYPED_FUNC_START(cpu_xsc3_switch_mm) 366 362 clean_d_cache r1, r2 367 363 mcr p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB 368 364 mcr p15, 0, ip, c7, c10, 4 @ data write barrier ··· 371 367 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 372 368 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs 373 369 cpwait_ret lr, ip 370 + SYM_FUNC_END(cpu_xsc3_switch_mm) 374 371 375 372 /* 376 373 * cpu_xsc3_set_pte_ext(ptep, pte, ext) ··· 397 392 .long 0x00 @ unused 398 393 399 394 .align 5 400 - ENTRY(cpu_xsc3_set_pte_ext) 395 + SYM_TYPED_FUNC_START(cpu_xsc3_set_pte_ext) 401 396 xscale_set_pte_ext_prologue 402 397 403 398 tst r1, #L_PTE_SHARED @ shared? ··· 410 405 411 406 xscale_set_pte_ext_epilogue 412 407 ret lr 408 + SYM_FUNC_END(cpu_xsc3_set_pte_ext) 413 409 414 410 .ltorg 415 411 .align ··· 418 412 .globl cpu_xsc3_suspend_size 419 413 .equ cpu_xsc3_suspend_size, 4 * 6 420 414 #ifdef CONFIG_ARM_CPU_SUSPEND 421 - ENTRY(cpu_xsc3_do_suspend) 415 + SYM_TYPED_FUNC_START(cpu_xsc3_do_suspend) 422 416 stmfd sp!, {r4 - r9, lr} 423 417 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 424 418 mrc p15, 0, r5, c15, c1, 0 @ CP access reg ··· 429 423 bic r4, r4, #2 @ clear frequency change bit 430 424 stmia r0, {r4 - r9} @ store cp regs 431 425 ldmia sp!, {r4 - r9, pc} 432 - ENDPROC(cpu_xsc3_do_suspend) 426 + SYM_FUNC_END(cpu_xsc3_do_suspend) 433 427 434 - ENTRY(cpu_xsc3_do_resume) 428 + SYM_TYPED_FUNC_START(cpu_xsc3_do_resume) 435 429 ldmia r0, {r4 - r9} @ load cp regs 436 430 mov ip, #0 437 431 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB ··· 447 441 mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg 448 442 mov r0, r9 @ control register 449 443 b cpu_resume_mmu 450 - ENDPROC(cpu_xsc3_do_resume) 444 + SYM_FUNC_END(cpu_xsc3_do_resume) 451 445 #endif 452 446 453 447 .type __xsc3_setup, #function
+18 -12
arch/arm/mm/proc-xscale.S
··· 112 112 * 113 113 * Nothing too exciting at the moment 114 114 */ 115 - ENTRY(cpu_xscale_proc_init) 115 + SYM_TYPED_FUNC_START(cpu_xscale_proc_init) 116 116 @ enable write buffer coalescing. Some bootloader disable it 117 117 mrc p15, 0, r1, c1, c0, 1 118 118 bic r1, r1, #1 119 119 mcr p15, 0, r1, c1, c0, 1 120 120 ret lr 121 + SYM_FUNC_END(cpu_xscale_proc_init) 121 122 122 123 /* 123 124 * cpu_xscale_proc_fin() 124 125 */ 125 - ENTRY(cpu_xscale_proc_fin) 126 + SYM_TYPED_FUNC_START(cpu_xscale_proc_fin) 126 127 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 127 128 bic r0, r0, #0x1800 @ ...IZ........... 128 129 bic r0, r0, #0x0006 @ .............CA. 129 130 mcr p15, 0, r0, c1, c0, 0 @ disable caches 130 131 ret lr 132 + SYM_FUNC_END(cpu_xscale_proc_fin) 131 133 132 134 /* 133 135 * cpu_xscale_reset(loc) ··· 144 142 */ 145 143 .align 5 146 144 .pushsection .idmap.text, "ax" 147 - ENTRY(cpu_xscale_reset) 145 + SYM_TYPED_FUNC_START(cpu_xscale_reset) 148 146 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 149 147 msr cpsr_c, r1 @ reset CPSR 150 148 mcr p15, 0, r1, c10, c4, 1 @ unlock I-TLB ··· 162 160 @ already containing those two last instructions to survive. 163 161 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 164 162 ret r0 165 - ENDPROC(cpu_xscale_reset) 163 + SYM_FUNC_END(cpu_xscale_reset) 166 164 .popsection 167 165 168 166 /* ··· 177 175 */ 178 176 .align 5 179 177 180 - ENTRY(cpu_xscale_do_idle) 178 + SYM_TYPED_FUNC_START(cpu_xscale_do_idle) 181 179 mov r0, #1 182 180 mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE 183 181 ret lr 182 + SYM_FUNC_END(cpu_xscale_do_idle) 184 183 185 184 /* ================================= CACHE ================================ */ 186 185 ··· 431 428 ret lr 432 429 SYM_FUNC_END(xscale_dma_unmap_area) 433 430 434 - ENTRY(cpu_xscale_dcache_clean_area) 431 + SYM_TYPED_FUNC_START(cpu_xscale_dcache_clean_area) 435 432 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 436 433 add r0, r0, #CACHELINESIZE 437 434 subs r1, r1, #CACHELINESIZE 438 435 bhi 1b 439 436 ret lr 437 + SYM_FUNC_END(cpu_xscale_dcache_clean_area) 440 438 441 439 /* =============================== PageTable ============================== */ 442 440 ··· 449 445 * pgd: new page tables 450 446 */ 451 447 .align 5 452 - ENTRY(cpu_xscale_switch_mm) 448 + SYM_TYPED_FUNC_START(cpu_xscale_switch_mm) 453 449 clean_d_cache r1, r2 454 450 mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 455 451 mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 456 452 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 457 453 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 458 454 cpwait_ret lr, ip 455 + SYM_FUNC_END(cpu_xscale_switch_mm) 459 456 460 457 /* 461 458 * cpu_xscale_set_pte_ext(ptep, pte, ext) ··· 484 479 .long 0x00 @ unused 485 480 486 481 .align 5 487 - ENTRY(cpu_xscale_set_pte_ext) 482 + SYM_TYPED_FUNC_START(cpu_xscale_set_pte_ext) 488 483 xscale_set_pte_ext_prologue 489 484 490 485 @ ··· 502 497 503 498 xscale_set_pte_ext_epilogue 504 499 ret lr 500 + SYM_FUNC_END(cpu_xscale_set_pte_ext) 505 501 506 502 .ltorg 507 503 .align ··· 510 504 .globl cpu_xscale_suspend_size 511 505 .equ cpu_xscale_suspend_size, 4 * 6 512 506 #ifdef CONFIG_ARM_CPU_SUSPEND 513 - ENTRY(cpu_xscale_do_suspend) 507 + SYM_TYPED_FUNC_START(cpu_xscale_do_suspend) 514 508 stmfd sp!, {r4 - r9, lr} 515 509 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 516 510 mrc p15, 0, r5, c15, c1, 0 @ CP access reg ··· 521 515 bic r4, r4, #2 @ clear frequency change bit 522 516 stmia r0, {r4 - r9} @ store cp regs 523 517 ldmfd sp!, {r4 - r9, pc} 524 - ENDPROC(cpu_xscale_do_suspend) 518 + SYM_FUNC_END(cpu_xscale_do_suspend) 525 519 526 - ENTRY(cpu_xscale_do_resume) 520 + SYM_TYPED_FUNC_START(cpu_xscale_do_resume) 527 521 ldmia r0, {r4 - r9} @ load cp regs 528 522 mov ip, #0 529 523 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs ··· 536 530 mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg 537 531 mov r0, r9 @ control register 538 532 b cpu_resume_mmu 539 - ENDPROC(cpu_xscale_do_resume) 533 + SYM_FUNC_END(cpu_xscale_do_resume) 540 534 #endif 541 535 542 536 .type __xscale_setup, #function