Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: Drop SYNC_601() ISYNC_601() and SYNC()

Those macros are now empty at all time. Drop them.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/7990bb63fc53e460bfa94f8040184881d9e6fbc3.1601362098.git.christophe.leroy@csgroup.eu

authored by

Christophe Leroy and committed by
Michael Ellerman
d2a5cd83 e42a6400

+2 -45
-4
arch/powerpc/include/asm/ppc_asm.h
··· 382 382 #endif 383 383 384 384 /* various errata or part fixups */ 385 - #define SYNC 386 - #define SYNC_601 387 - #define ISYNC_601 388 - 389 385 #if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E) 390 386 #define MFTB(dest) \ 391 387 90: mfspr dest, SPRN_TBRL; \
+1 -16
arch/powerpc/kernel/entry_32.S
··· 234 234 mtspr SPRN_SRR0,r11 235 235 mtspr SPRN_SRR1,r10 236 236 mtlr r9 237 - SYNC 238 237 RFI /* jump to handler, enable MMU */ 239 238 240 239 #if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500) ··· 263 264 LOAD_REG_IMMEDIATE(r0, MSR_KERNEL) 264 265 mtspr SPRN_SRR0,r12 265 266 mtspr SPRN_SRR1,r0 266 - SYNC 267 267 RFI 268 268 269 269 reenable_mmu: ··· 321 323 #endif 322 324 mtspr SPRN_SRR0,r9 323 325 mtspr SPRN_SRR1,r10 324 - SYNC 325 326 RFI 326 327 _ASM_NOKPROBE_SYMBOL(stack_ovf) 327 328 #endif ··· 408 411 /* disable interrupts so current_thread_info()->flags can't change */ 409 412 LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) /* doesn't include MSR_EE */ 410 413 /* Note: We don't bother telling lockdep about it */ 411 - SYNC 412 414 mtmsr r10 413 415 lwz r9,TI_FLAGS(r2) 414 416 li r8,-MAX_ERRNO ··· 470 474 #endif 471 475 mtspr SPRN_SRR0,r7 472 476 mtspr SPRN_SRR1,r8 473 - SYNC 474 477 RFI 475 478 _ASM_NOKPROBE_SYMBOL(syscall_exit_finish) 476 479 #ifdef CONFIG_44x ··· 562 567 * lockdep as we are supposed to have IRQs on at this point 563 568 */ 564 569 ori r10,r10,MSR_EE 565 - SYNC 566 570 mtmsr r10 567 571 568 572 /* Save NVGPRS if they're not saved already */ ··· 600 606 #endif 601 607 mtspr SPRN_SRR0, r9 602 608 mtspr SPRN_SRR1, r10 603 - SYNC 604 609 RFI 605 610 _ASM_NOKPROBE_SYMBOL(ret_from_kernel_syscall) 606 611 ··· 803 810 REST_GPR(9, r11) 804 811 REST_GPR(12, r11) 805 812 lwz r11,GPR11(r11) 806 - SYNC 807 813 RFI 808 814 _ASM_NOKPROBE_SYMBOL(fast_exception_return) 809 815 ··· 864 872 * from the interrupt. */ 865 873 /* Note: We don't bother telling lockdep about it */ 866 874 LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) 867 - SYNC /* Some chip revs have problems here... */ 868 875 mtmsr r10 /* disable interrupts */ 869 876 870 877 lwz r3,_MSR(r1) /* Returning to user mode? */ ··· 1026 1035 * exc_exit_restart below. -- paulus 1027 1036 */ 1028 1037 LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI) 1029 - SYNC 1030 1038 mtmsr r10 /* clear the RI bit */ 1031 1039 .globl exc_exit_restart 1032 1040 exc_exit_restart: ··· 1036 1046 lwz r1,GPR1(r1) 1037 1047 .globl exc_exit_restart_end 1038 1048 exc_exit_restart_end: 1039 - SYNC 1040 1049 RFI 1041 1050 _ASM_NOKPROBE_SYMBOL(exc_exit_restart) 1042 1051 _ASM_NOKPROBE_SYMBOL(exc_exit_restart_end) ··· 1263 1274 mfmsr r10 1264 1275 #endif 1265 1276 ori r10,r10,MSR_EE 1266 - SYNC 1267 1277 mtmsr r10 /* hard-enable interrupts */ 1268 1278 bl schedule 1269 1279 recheck: ··· 1271 1283 * TI_FLAGS aren't advertised. 1272 1284 */ 1273 1285 LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) 1274 - SYNC 1275 1286 mtmsr r10 /* disable interrupts */ 1276 1287 lwz r9,TI_FLAGS(r2) 1277 1288 andi. r0,r9,_TIF_NEED_RESCHED ··· 1279 1292 beq restore_user 1280 1293 do_user_signal: /* r10 contains MSR_KERNEL here */ 1281 1294 ori r10,r10,MSR_EE 1282 - SYNC 1283 1295 mtmsr r10 /* hard-enable interrupts */ 1284 1296 /* save r13-r31 in the exception frame, if not already done */ 1285 1297 lwz r3,_TRAP(r1) ··· 1368 1382 mfmsr r9 1369 1383 stw r9,8(r1) 1370 1384 LOAD_REG_IMMEDIATE(r0,MSR_KERNEL) 1371 - SYNC /* disable interrupts so SRR0/1 */ 1372 - mtmsr r0 /* don't get trashed */ 1385 + mtmsr r0 /* disable interrupts so SRR0/1 don't get trashed */ 1373 1386 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR) 1374 1387 mtlr r6 1375 1388 stw r7, THREAD + RTAS_SP(r2)
-1
arch/powerpc/kernel/fpu.S
··· 87 87 oris r5,r5,MSR_VSX@h 88 88 END_FTR_SECTION_IFSET(CPU_FTR_VSX) 89 89 #endif 90 - SYNC 91 90 MTMSRD(r5) /* enable use of fpu now */ 92 91 isync 93 92 /* enable use of FP after return */
-9
arch/powerpc/kernel/head_32.S
··· 219 219 lis r0,start_here@h 220 220 ori r0,r0,start_here@l 221 221 mtspr SPRN_SRR0,r0 222 - SYNC 223 222 RFI /* enables MMU */ 224 223 225 224 /* ··· 783 784 mtcr r11 784 785 lwz r11, THR11(r10) 785 786 mfspr r10, SPRN_SPRG_SCRATCH0 786 - SYNC 787 787 RFI 788 788 789 789 1: /* ISI */ 790 790 mtcr r11 791 791 mfspr r11, SPRN_SPRG_SCRATCH1 792 792 mfspr r10, SPRN_SPRG_SCRATCH0 793 - SYNC 794 793 RFI 795 794 796 795 stack_overflow: ··· 879 882 set to map the 0xf0000000 - 0xffffffff region */ 880 883 mfmsr r0 881 884 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */ 882 - SYNC 883 885 mtmsr r0 884 886 isync 885 887 ··· 926 930 ori r3,r3,start_secondary@l 927 931 mtspr SPRN_SRR0,r3 928 932 mtspr SPRN_SRR1,r4 929 - SYNC 930 933 RFI 931 934 #endif /* CONFIG_SMP */ 932 935 ··· 1069 1074 .align 4 1070 1075 mtspr SPRN_SRR0,r4 1071 1076 mtspr SPRN_SRR1,r3 1072 - SYNC 1073 1077 RFI 1074 1078 /* Load up the kernel context */ 1075 1079 2: bl load_up_mmu ··· 1093 1099 ori r3,r3,start_kernel@l 1094 1100 mtspr SPRN_SRR0,r3 1095 1101 mtspr SPRN_SRR1,r4 1096 - SYNC 1097 1102 RFI 1098 1103 1099 1104 /* ··· 1210 1217 .align 4 1211 1218 mtspr SPRN_SRR0, r4 1212 1219 mtspr SPRN_SRR1, r3 1213 - SYNC 1214 1220 RFI 1215 1221 1: bl clear_bats 1216 1222 lis r3, BATS@ha ··· 1229 1237 mtmsr r3 1230 1238 mtspr SPRN_SRR0, r7 1231 1239 mtspr SPRN_SRR1, r6 1232 - SYNC 1233 1240 RFI 1234 1241 1235 1242 flush_tlbs:
-1
arch/powerpc/kernel/head_32.h
··· 222 222 #endif 223 223 mtspr SPRN_SRR1,r10 224 224 mtspr SPRN_SRR0,r11 225 - SYNC 226 225 RFI /* jump to handler, enable MMU */ 227 226 99: b ret_from_kernel_syscall 228 227 .endm
+1 -2
arch/powerpc/kernel/l2cr_6xx.S
··· 256 256 sync 257 257 258 258 /* Restore MSR (restores EE and DR bits to original state) */ 259 - SYNC 260 259 mtmsr r7 261 260 isync 262 261 ··· 376 377 1: bdnz 1b 377 378 378 379 /* Restore MSR (restores EE and DR bits to original state) */ 379 - 4: SYNC 380 + 4: 380 381 mtmsr r7 381 382 isync 382 383 blr
-12
arch/powerpc/mm/book3s32/hash_low.S
··· 199 199 * covered by a BAT). -- paulus 200 200 */ 201 201 mfmsr r9 202 - SYNC 203 202 rlwinm r0,r9,0,17,15 /* clear bit 16 (MSR_EE) */ 204 203 rlwinm r0,r0,0,28,26 /* clear MSR_DR */ 205 204 mtmsr r0 206 - SYNC_601 207 205 isync 208 206 209 207 #ifdef CONFIG_SMP ··· 260 262 261 263 /* reenable interrupts and DR */ 262 264 mtmsr r9 263 - SYNC_601 264 265 isync 265 266 266 267 lwz r0,4(r1) ··· 503 506 * covered by a BAT). -- paulus 504 507 */ 505 508 mfmsr r10 506 - SYNC 507 509 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ 508 510 rlwinm r0,r0,0,28,26 /* clear MSR_DR */ 509 511 mtmsr r0 510 - SYNC_601 511 512 isync 512 513 513 514 /* First find a PTE in the range that has _PAGE_HASHPTE set */ ··· 624 629 #endif 625 630 626 631 19: mtmsr r10 627 - SYNC_601 628 632 isync 629 633 blr 630 634 EXPORT_SYMBOL(flush_hash_pages) ··· 637 643 lwz r8,TASK_CPU(r2) 638 644 oris r8,r8,11 639 645 mfmsr r10 640 - SYNC 641 646 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ 642 647 rlwinm r0,r0,0,28,26 /* clear DR */ 643 648 mtmsr r0 644 - SYNC_601 645 649 isync 646 650 lis r9,mmu_hash_lock@h 647 651 ori r9,r9,mmu_hash_lock@l ··· 656 664 li r0,0 657 665 stw r0,0(r9) /* clear mmu_hash_lock */ 658 666 mtmsr r10 659 - SYNC_601 660 667 isync 661 668 #else /* CONFIG_SMP */ 662 669 tlbie r3 ··· 672 681 lwz r8,TASK_CPU(r2) 673 682 oris r8,r8,10 674 683 mfmsr r10 675 - SYNC 676 684 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ 677 685 rlwinm r0,r0,0,28,26 /* clear DR */ 678 686 mtmsr r0 679 - SYNC_601 680 687 isync 681 688 lis r9,mmu_hash_lock@h 682 689 ori r9,r9,mmu_hash_lock@l ··· 698 709 li r0,0 699 710 stw r0,0(r9) /* clear mmu_hash_lock */ 700 711 mtmsr r10 701 - SYNC_601 702 712 isync 703 713 #endif /* CONFIG_SMP */ 704 714 blr