Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'fixes' into next

Merge our fixes branch from the 4.15 cycle.

Unusually the fixes branch saw some significant features merged,
notably the RFI flush patches, so we want the code in next to be
tested against that, to avoid any surprises when the two are merged.

There's also some other work on the panic handling that was reverted
in fixes and we now want to do properly in next, which would conflict.

And we also fix a few other minor merge conflicts.

+672 -94
+1
arch/powerpc/Kconfig
··· 168 168 select GENERIC_CLOCKEVENTS_BROADCAST if SMP 169 169 select GENERIC_CMOS_UPDATE 170 170 select GENERIC_CPU_AUTOPROBE 171 + select GENERIC_CPU_VULNERABILITIES if PPC_BOOK3S_64 171 172 select GENERIC_IRQ_SHOW 172 173 select GENERIC_IRQ_SHOW_LEVEL 173 174 select GENERIC_SMP_IDLE_THREAD
+6
arch/powerpc/include/asm/exception-64e.h
··· 209 209 ori r3,r3,vector_offset@l; \ 210 210 mtspr SPRN_IVOR##vector_number,r3; 211 211 212 + #define RFI_TO_KERNEL \ 213 + rfi 214 + 215 + #define RFI_TO_USER \ 216 + rfi 217 + 212 218 #endif /* _ASM_POWERPC_EXCEPTION_64E_H */ 213 219
+55 -2
arch/powerpc/include/asm/exception-64s.h
··· 74 74 */ 75 75 #define EX_R3 EX_DAR 76 76 77 + /* 78 + * Macros for annotating the expected destination of (h)rfid 79 + * 80 + * The nop instructions allow us to insert one or more instructions to flush the 81 + * L1-D cache when returning to userspace or a guest. 82 + */ 83 + #define RFI_FLUSH_SLOT \ 84 + RFI_FLUSH_FIXUP_SECTION; \ 85 + nop; \ 86 + nop; \ 87 + nop 88 + 89 + #define RFI_TO_KERNEL \ 90 + rfid 91 + 92 + #define RFI_TO_USER \ 93 + RFI_FLUSH_SLOT; \ 94 + rfid; \ 95 + b rfi_flush_fallback 96 + 97 + #define RFI_TO_USER_OR_KERNEL \ 98 + RFI_FLUSH_SLOT; \ 99 + rfid; \ 100 + b rfi_flush_fallback 101 + 102 + #define RFI_TO_GUEST \ 103 + RFI_FLUSH_SLOT; \ 104 + rfid; \ 105 + b rfi_flush_fallback 106 + 107 + #define HRFI_TO_KERNEL \ 108 + hrfid 109 + 110 + #define HRFI_TO_USER \ 111 + RFI_FLUSH_SLOT; \ 112 + hrfid; \ 113 + b hrfi_flush_fallback 114 + 115 + #define HRFI_TO_USER_OR_KERNEL \ 116 + RFI_FLUSH_SLOT; \ 117 + hrfid; \ 118 + b hrfi_flush_fallback 119 + 120 + #define HRFI_TO_GUEST \ 121 + RFI_FLUSH_SLOT; \ 122 + hrfid; \ 123 + b hrfi_flush_fallback 124 + 125 + #define HRFI_TO_UNKNOWN \ 126 + RFI_FLUSH_SLOT; \ 127 + hrfid; \ 128 + b hrfi_flush_fallback 129 + 77 130 #ifdef CONFIG_RELOCATABLE 78 131 #define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ 79 132 mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ ··· 293 240 mtspr SPRN_##h##SRR0,r12; \ 294 241 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ 295 242 mtspr SPRN_##h##SRR1,r10; \ 296 - h##rfid; \ 243 + h##RFI_TO_KERNEL; \ 297 244 b . /* prevent speculative execution */ 298 245 #define EXCEPTION_PROLOG_PSERIES_1(label, h) \ 299 246 __EXCEPTION_PROLOG_PSERIES_1(label, h) ··· 307 254 mtspr SPRN_##h##SRR0,r12; \ 308 255 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ 309 256 mtspr SPRN_##h##SRR1,r10; \ 310 - h##rfid; \ 257 + h##RFI_TO_KERNEL; \ 311 258 b . /* prevent speculative execution */ 312 259 313 260 #define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \
+13
arch/powerpc/include/asm/feature-fixups.h
··· 187 187 FTR_ENTRY_OFFSET label##1b-label##3b; \ 188 188 .popsection; 189 189 190 + #define RFI_FLUSH_FIXUP_SECTION \ 191 + 951: \ 192 + .pushsection __rfi_flush_fixup,"a"; \ 193 + .align 2; \ 194 + 952: \ 195 + FTR_ENTRY_OFFSET 951b-952b; \ 196 + .popsection; 197 + 198 + 190 199 #ifndef __ASSEMBLY__ 200 + #include <linux/types.h> 201 + 202 + extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; 203 + 191 204 void apply_feature_fixups(void); 192 205 void setup_feature_keys(void); 193 206 #endif
+1
arch/powerpc/include/asm/hvcall.h
··· 353 353 #define PROC_TABLE_GTSE 0x01 354 354 355 355 #ifndef __ASSEMBLY__ 356 + #include <linux/types.h> 356 357 357 358 /** 358 359 * plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments
+1
arch/powerpc/include/asm/machdep.h
··· 76 76 77 77 void __noreturn (*restart)(char *cmd); 78 78 void __noreturn (*halt)(void); 79 + void (*panic)(char *str); 79 80 void (*cpu_die)(void); 80 81 81 82 long (*time_init)(void); /* Optional, may be NULL */
+10
arch/powerpc/include/asm/paca.h
··· 232 232 struct sibling_subcore_state *sibling_subcore_state; 233 233 #endif 234 234 #endif 235 + #ifdef CONFIG_PPC_BOOK3S_64 236 + /* 237 + * rfi fallback flush must be in its own cacheline to prevent 238 + * other paca data leaking into the L1d 239 + */ 240 + u64 exrfi[EX_SIZE] __aligned(0x80); 241 + void *rfi_flush_fallback_area; 242 + u64 l1d_flush_congruence; 243 + u64 l1d_flush_sets; 244 + #endif 235 245 }; 236 246 237 247 extern void copy_mm_to_paca(struct mm_struct *mm);
+14
arch/powerpc/include/asm/setup.h
··· 24 24 25 25 void check_for_initrd(void); 26 26 void initmem_init(void); 27 + void setup_panic(void); 27 28 #define ARCH_PANIC_TIMEOUT 180 28 29 29 30 #ifdef CONFIG_PPC_PSERIES ··· 38 37 static inline void pseries_big_endian_exceptions(void) {} 39 38 static inline void pseries_little_endian_exceptions(void) {} 40 39 #endif /* CONFIG_PPC_PSERIES */ 40 + 41 + void rfi_flush_enable(bool enable); 42 + 43 + /* These are bit flags */ 44 + enum l1d_flush_type { 45 + L1D_FLUSH_NONE = 0x1, 46 + L1D_FLUSH_FALLBACK = 0x2, 47 + L1D_FLUSH_ORI = 0x4, 48 + L1D_FLUSH_MTTRIG = 0x8, 49 + }; 50 + 51 + void __init setup_rfi_flush(enum l1d_flush_type, bool enable); 52 + void do_rfi_flush_fixups(enum l1d_flush_type types); 41 53 42 54 #endif /* !__ASSEMBLY__ */ 43 55
+5
arch/powerpc/kernel/asm-offsets.c
··· 237 237 OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp); 238 238 OFFSET(PACA_IN_MCE, paca_struct, in_mce); 239 239 OFFSET(PACA_IN_NMI, paca_struct, in_nmi); 240 + OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, rfi_flush_fallback_area); 241 + OFFSET(PACA_EXRFI, paca_struct, exrfi); 242 + OFFSET(PACA_L1D_FLUSH_CONGRUENCE, paca_struct, l1d_flush_congruence); 243 + OFFSET(PACA_L1D_FLUSH_SETS, paca_struct, l1d_flush_sets); 244 + 240 245 #endif 241 246 OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id); 242 247 OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
+2
arch/powerpc/kernel/cpu_setup_power.S
··· 98 98 li r0,0 99 99 mtspr SPRN_PSSCR,r0 100 100 mtspr SPRN_LPID,r0 101 + mtspr SPRN_PID,r0 101 102 mfspr r3,SPRN_LPCR 102 103 LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) 103 104 or r3, r3, r4 ··· 122 121 li r0,0 123 122 mtspr SPRN_PSSCR,r0 124 123 mtspr SPRN_LPID,r0 124 + mtspr SPRN_PID,r0 125 125 mfspr r3,SPRN_LPCR 126 126 LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) 127 127 or r3, r3, r4
+36 -8
arch/powerpc/kernel/entry_64.S
··· 37 37 #include <asm/tm.h> 38 38 #include <asm/ppc-opcode.h> 39 39 #include <asm/export.h> 40 + #ifdef CONFIG_PPC_BOOK3S 41 + #include <asm/exception-64s.h> 42 + #else 43 + #include <asm/exception-64e.h> 44 + #endif 40 45 41 46 /* 42 47 * System calls. ··· 266 261 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 267 262 268 263 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ 264 + ld r2,GPR2(r1) 265 + ld r1,GPR1(r1) 266 + mtlr r4 267 + mtcr r5 268 + mtspr SPRN_SRR0,r7 269 + mtspr SPRN_SRR1,r8 270 + RFI_TO_USER 271 + b . /* prevent speculative execution */ 272 + 273 + /* exit to kernel */ 269 274 1: ld r2,GPR2(r1) 270 275 ld r1,GPR1(r1) 271 276 mtlr r4 272 277 mtcr r5 273 278 mtspr SPRN_SRR0,r7 274 279 mtspr SPRN_SRR1,r8 275 - RFI 280 + RFI_TO_KERNEL 276 281 b . /* prevent speculative execution */ 277 282 278 283 .Lsyscall_error: ··· 411 396 mtmsrd r10, 1 412 397 mtspr SPRN_SRR0, r11 413 398 mtspr SPRN_SRR1, r12 414 - 415 - rfid 399 + RFI_TO_USER 416 400 b . /* prevent speculative execution */ 417 401 #endif 418 402 _ASM_NOKPROBE_SYMBOL(system_call_common); ··· 891 877 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 892 878 ACCOUNT_CPU_USER_EXIT(r13, r2, r4) 893 879 REST_GPR(13, r1) 894 - 1: 880 + 895 881 mtspr SPRN_SRR1,r3 896 882 897 883 ld r2,_CCR(r1) ··· 904 890 ld r3,GPR3(r1) 905 891 ld r4,GPR4(r1) 906 892 ld r1,GPR1(r1) 893 + RFI_TO_USER 894 + b . /* prevent speculative execution */ 907 895 908 - rfid 896 + 1: mtspr SPRN_SRR1,r3 897 + 898 + ld r2,_CCR(r1) 899 + mtcrf 0xFF,r2 900 + ld r2,_NIP(r1) 901 + mtspr SPRN_SRR0,r2 902 + 903 + ld r0,GPR0(r1) 904 + ld r2,GPR2(r1) 905 + ld r3,GPR3(r1) 906 + ld r4,GPR4(r1) 907 + ld r1,GPR1(r1) 908 + RFI_TO_KERNEL 909 909 b . /* prevent speculative execution */ 910 910 911 911 #endif /* CONFIG_PPC_BOOK3E */ ··· 1109 1081 1110 1082 mtspr SPRN_SRR0,r5 1111 1083 mtspr SPRN_SRR1,r6 1112 - rfid 1084 + RFI_TO_KERNEL 1113 1085 b . /* prevent speculative execution */ 1114 1086 1115 1087 rtas_return_loc: ··· 1139 1111 1140 1112 mtspr SPRN_SRR0,r3 1141 1113 mtspr SPRN_SRR1,r4 1142 - rfid 1114 + RFI_TO_KERNEL 1143 1115 b . /* prevent speculative execution */ 1144 1116 _ASM_NOKPROBE_SYMBOL(__enter_rtas) 1145 1117 _ASM_NOKPROBE_SYMBOL(rtas_return_loc) ··· 1212 1184 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE) 1213 1185 andc r11,r11,r12 1214 1186 mtsrr1 r11 1215 - rfid 1187 + RFI_TO_KERNEL 1216 1188 #endif /* CONFIG_PPC_BOOK3E */ 1217 1189 1218 1190 1: /* Return from OF */
+124 -13
arch/powerpc/kernel/exceptions-64s.S
··· 256 256 LOAD_HANDLER(r12, machine_check_handle_early) 257 257 1: mtspr SPRN_SRR0,r12 258 258 mtspr SPRN_SRR1,r11 259 - rfid 259 + RFI_TO_KERNEL 260 260 b . /* prevent speculative execution */ 261 261 2: 262 262 /* Stack overflow. Stay on emergency stack and panic. ··· 445 445 li r3,MSR_ME 446 446 andc r10,r10,r3 /* Turn off MSR_ME */ 447 447 mtspr SPRN_SRR1,r10 448 - rfid 448 + RFI_TO_KERNEL 449 449 b . 450 450 2: 451 451 /* ··· 463 463 */ 464 464 bl machine_check_queue_event 465 465 MACHINE_CHECK_HANDLER_WINDUP 466 - rfid 466 + RFI_TO_USER_OR_KERNEL 467 467 9: 468 468 /* Deliver the machine check to host kernel in V mode. */ 469 469 MACHINE_CHECK_HANDLER_WINDUP ··· 598 598 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 599 599 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ 600 600 601 + andi. r9,r11,MSR_PR // Check for exception from userspace 602 + cmpdi cr4,r9,MSR_PR // And save the result in CR4 for later 603 + 601 604 /* 602 605 * Test MSR_RI before calling slb_allocate_realmode, because the 603 606 * MSR in r11 gets clobbered. However we still want to allocate ··· 627 624 628 625 /* All done -- return from exception. */ 629 626 627 + bne cr4,1f /* returning to kernel */ 628 + 630 629 .machine push 631 630 .machine "power4" 632 631 mtcrf 0x80,r9 632 + mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */ 633 633 mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */ 634 634 mtcrf 0x02,r9 /* I/D indication is in cr6 */ 635 635 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ ··· 646 640 ld r11,PACA_EXSLB+EX_R11(r13) 647 641 ld r12,PACA_EXSLB+EX_R12(r13) 648 642 ld r13,PACA_EXSLB+EX_R13(r13) 649 - rfid 643 + RFI_TO_USER 650 644 b . /* prevent speculative execution */ 645 + 1: 646 + .machine push 647 + .machine "power4" 648 + mtcrf 0x80,r9 649 + mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */ 650 + mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */ 651 + mtcrf 0x02,r9 /* I/D indication is in cr6 */ 652 + mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ 653 + .machine pop 654 + 655 + RESTORE_CTR(r9, PACA_EXSLB) 656 + RESTORE_PPR_PACA(PACA_EXSLB, r9) 657 + mr r3,r12 658 + ld r9,PACA_EXSLB+EX_R9(r13) 659 + ld r10,PACA_EXSLB+EX_R10(r13) 660 + ld r11,PACA_EXSLB+EX_R11(r13) 661 + ld r12,PACA_EXSLB+EX_R12(r13) 662 + ld r13,PACA_EXSLB+EX_R13(r13) 663 + RFI_TO_KERNEL 664 + b . /* prevent speculative execution */ 665 + 651 666 652 667 2: std r3,PACA_EXSLB+EX_DAR(r13) 653 668 mr r3,r12 ··· 678 651 mtspr SPRN_SRR0,r10 679 652 ld r10,PACAKMSR(r13) 680 653 mtspr SPRN_SRR1,r10 681 - rfid 654 + RFI_TO_KERNEL 682 655 b . 683 656 684 657 8: std r3,PACA_EXSLB+EX_DAR(r13) ··· 689 662 mtspr SPRN_SRR0,r10 690 663 ld r10,PACAKMSR(r13) 691 664 mtspr SPRN_SRR1,r10 692 - rfid 665 + RFI_TO_KERNEL 693 666 b . 694 667 695 668 EXC_COMMON_BEGIN(unrecov_slb) ··· 934 907 mtspr SPRN_SRR0,r10 ; \ 935 908 ld r10,PACAKMSR(r13) ; \ 936 909 mtspr SPRN_SRR1,r10 ; \ 937 - rfid ; \ 910 + RFI_TO_KERNEL ; \ 938 911 b . ; /* prevent speculative execution */ 939 912 940 913 #ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH ··· 950 923 xori r12,r12,MSR_LE ; \ 951 924 mtspr SPRN_SRR1,r12 ; \ 952 925 mr r13,r9 ; \ 953 - rfid ; /* return to userspace */ \ 926 + RFI_TO_USER ; /* return to userspace */ \ 954 927 b . ; /* prevent speculative execution */ 955 928 #else 956 929 #define SYSCALL_FASTENDIAN_TEST ··· 1096 1069 mtcr r11 1097 1070 REST_GPR(11, r1) 1098 1071 ld r1,GPR1(r1) 1099 - hrfid 1072 + HRFI_TO_USER_OR_KERNEL 1100 1073 1101 1074 1: mtcr r11 1102 1075 REST_GPR(11, r1) ··· 1347 1320 ld r11,PACA_EXGEN+EX_R11(r13) 1348 1321 ld r12,PACA_EXGEN+EX_R12(r13) 1349 1322 ld r13,PACA_EXGEN+EX_R13(r13) 1350 - HRFID 1323 + HRFI_TO_UNKNOWN 1351 1324 b . 1352 1325 #endif 1353 1326 ··· 1451 1424 ld r10,PACA_EXGEN+EX_R10(r13); \ 1452 1425 ld r11,PACA_EXGEN+EX_R11(r13); \ 1453 1426 /* returns to kernel where r13 must be set up, so don't restore it */ \ 1454 - ##_H##rfid; \ 1427 + ##_H##RFI_TO_KERNEL; \ 1455 1428 b .; \ 1456 1429 MASKED_DEC_HANDLER(_H) 1430 + 1431 + TRAMP_REAL_BEGIN(rfi_flush_fallback) 1432 + SET_SCRATCH0(r13); 1433 + GET_PACA(r13); 1434 + std r9,PACA_EXRFI+EX_R9(r13) 1435 + std r10,PACA_EXRFI+EX_R10(r13) 1436 + std r11,PACA_EXRFI+EX_R11(r13) 1437 + std r12,PACA_EXRFI+EX_R12(r13) 1438 + std r8,PACA_EXRFI+EX_R13(r13) 1439 + mfctr r9 1440 + ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 1441 + ld r11,PACA_L1D_FLUSH_SETS(r13) 1442 + ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13) 1443 + /* 1444 + * The load adresses are at staggered offsets within cachelines, 1445 + * which suits some pipelines better (on others it should not 1446 + * hurt). 1447 + */ 1448 + addi r12,r12,8 1449 + mtctr r11 1450 + DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 1451 + 1452 + /* order ld/st prior to dcbt stop all streams with flushing */ 1453 + sync 1454 + 1: li r8,0 1455 + .rept 8 /* 8-way set associative */ 1456 + ldx r11,r10,r8 1457 + add r8,r8,r12 1458 + xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not 1459 + add r8,r8,r11 // Add 0, this creates a dependency on the ldx 1460 + .endr 1461 + addi r10,r10,128 /* 128 byte cache line */ 1462 + bdnz 1b 1463 + 1464 + mtctr r9 1465 + ld r9,PACA_EXRFI+EX_R9(r13) 1466 + ld r10,PACA_EXRFI+EX_R10(r13) 1467 + ld r11,PACA_EXRFI+EX_R11(r13) 1468 + ld r12,PACA_EXRFI+EX_R12(r13) 1469 + ld r8,PACA_EXRFI+EX_R13(r13) 1470 + GET_SCRATCH0(r13); 1471 + rfid 1472 + 1473 + TRAMP_REAL_BEGIN(hrfi_flush_fallback) 1474 + SET_SCRATCH0(r13); 1475 + GET_PACA(r13); 1476 + std r9,PACA_EXRFI+EX_R9(r13) 1477 + std r10,PACA_EXRFI+EX_R10(r13) 1478 + std r11,PACA_EXRFI+EX_R11(r13) 1479 + std r12,PACA_EXRFI+EX_R12(r13) 1480 + std r8,PACA_EXRFI+EX_R13(r13) 1481 + mfctr r9 1482 + ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 1483 + ld r11,PACA_L1D_FLUSH_SETS(r13) 1484 + ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13) 1485 + /* 1486 + * The load adresses are at staggered offsets within cachelines, 1487 + * which suits some pipelines better (on others it should not 1488 + * hurt). 1489 + */ 1490 + addi r12,r12,8 1491 + mtctr r11 1492 + DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 1493 + 1494 + /* order ld/st prior to dcbt stop all streams with flushing */ 1495 + sync 1496 + 1: li r8,0 1497 + .rept 8 /* 8-way set associative */ 1498 + ldx r11,r10,r8 1499 + add r8,r8,r12 1500 + xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not 1501 + add r8,r8,r11 // Add 0, this creates a dependency on the ldx 1502 + .endr 1503 + addi r10,r10,128 /* 128 byte cache line */ 1504 + bdnz 1b 1505 + 1506 + mtctr r9 1507 + ld r9,PACA_EXRFI+EX_R9(r13) 1508 + ld r10,PACA_EXRFI+EX_R10(r13) 1509 + ld r11,PACA_EXRFI+EX_R11(r13) 1510 + ld r12,PACA_EXRFI+EX_R12(r13) 1511 + ld r8,PACA_EXRFI+EX_R13(r13) 1512 + GET_SCRATCH0(r13); 1513 + hrfid 1457 1514 1458 1515 /* 1459 1516 * Real mode exceptions actually use this too, but alternate ··· 1558 1447 addi r13, r13, 4 1559 1448 mtspr SPRN_SRR0, r13 1560 1449 GET_SCRATCH0(r13) 1561 - rfid 1450 + RFI_TO_KERNEL 1562 1451 b . 1563 1452 1564 1453 TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt) ··· 1570 1459 addi r13, r13, 4 1571 1460 mtspr SPRN_HSRR0, r13 1572 1461 GET_SCRATCH0(r13) 1573 - hrfid 1462 + HRFI_TO_KERNEL 1574 1463 b . 1575 1464 #endif 1576 1465
-22
arch/powerpc/kernel/fadump.c
··· 1462 1462 return; 1463 1463 } 1464 1464 1465 - static int fadump_panic_event(struct notifier_block *this, 1466 - unsigned long event, void *ptr) 1467 - { 1468 - /* 1469 - * If firmware-assisted dump has been registered then trigger 1470 - * firmware-assisted dump and let firmware handle everything 1471 - * else. If this returns, then fadump was not registered, so 1472 - * go through the rest of the panic path. 1473 - */ 1474 - crash_fadump(NULL, ptr); 1475 - 1476 - return NOTIFY_DONE; 1477 - } 1478 - 1479 - static struct notifier_block fadump_panic_block = { 1480 - .notifier_call = fadump_panic_event, 1481 - .priority = INT_MIN /* may not return; must be done last */ 1482 - }; 1483 - 1484 1465 /* 1485 1466 * Prepare for firmware-assisted dump. 1486 1467 */ ··· 1493 1512 else if (fw_dump.reserve_dump_area_size) 1494 1513 init_fadump_mem_struct(&fdm, fw_dump.reserve_dump_area_start); 1495 1514 fadump_init_files(); 1496 - 1497 - atomic_notifier_chain_register(&panic_notifier_list, 1498 - &fadump_panic_block); 1499 1515 1500 1516 return 1; 1501 1517 }
+1 -1
arch/powerpc/kernel/process.c
··· 1409 1409 1410 1410 printk("NIP: "REG" LR: "REG" CTR: "REG"\n", 1411 1411 regs->nip, regs->link, regs->ctr); 1412 - printk("REGS: %p TRAP: %04lx %s (%s)\n", 1412 + printk("REGS: %px TRAP: %04lx %s (%s)\n", 1413 1413 regs, regs->trap, print_tainted(), init_utsname()->release); 1414 1414 printk("MSR: "REG" ", regs->msr); 1415 1415 print_msr_bits(regs->msr);
+27 -10
arch/powerpc/kernel/setup-common.c
··· 242 242 unsigned short maj; 243 243 unsigned short min; 244 244 245 - /* We only show online cpus: disable preempt (overzealous, I 246 - * knew) to prevent cpu going down. */ 247 - preempt_disable(); 248 - if (!cpu_online(cpu_id)) { 249 - preempt_enable(); 250 - return 0; 251 - } 252 - 253 245 #ifdef CONFIG_SMP 254 246 pvr = per_cpu(cpu_pvr, cpu_id); 255 247 #else ··· 347 355 (loops_per_jiffy / (5000/HZ)) % 100); 348 356 #endif 349 357 seq_printf(m, "\n"); 350 - 351 - preempt_enable(); 352 358 353 359 /* If this is the last cpu, print the summary */ 354 360 if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids) ··· 696 706 } 697 707 EXPORT_SYMBOL(check_legacy_ioport); 698 708 709 + static int ppc_panic_event(struct notifier_block *this, 710 + unsigned long event, void *ptr) 711 + { 712 + /* 713 + * If firmware-assisted dump has been registered then trigger 714 + * firmware-assisted dump and let firmware handle everything else. 715 + */ 716 + crash_fadump(NULL, ptr); 717 + ppc_md.panic(ptr); /* May not return */ 718 + return NOTIFY_DONE; 719 + } 720 + 721 + static struct notifier_block ppc_panic_block = { 722 + .notifier_call = ppc_panic_event, 723 + .priority = INT_MIN /* may not return; must be done last */ 724 + }; 725 + 726 + void __init setup_panic(void) 727 + { 728 + if (!ppc_md.panic) 729 + return; 730 + atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block); 731 + } 732 + 699 733 #ifdef CONFIG_CHECK_CACHE_COHERENCY 700 734 /* 701 735 * For platforms that have configurable cache-coherency. This function ··· 863 849 864 850 /* Probe the machine type, establish ppc_md. */ 865 851 probe_machine(); 852 + 853 + /* Setup panic notifier if requested by the platform. */ 854 + setup_panic(); 866 855 867 856 /* 868 857 * Configure ppc_md.power_save (ppc32 only, 64-bit machines do
+139
arch/powerpc/kernel/setup_64.c
··· 36 36 #include <linux/memory.h> 37 37 #include <linux/nmi.h> 38 38 39 + #include <asm/debugfs.h> 39 40 #include <asm/io.h> 40 41 #include <asm/kdump.h> 41 42 #include <asm/prom.h> ··· 809 808 return 0; 810 809 } 811 810 early_initcall(disable_hardlockup_detector); 811 + 812 + #ifdef CONFIG_PPC_BOOK3S_64 813 + static enum l1d_flush_type enabled_flush_types; 814 + static void *l1d_flush_fallback_area; 815 + static bool no_rfi_flush; 816 + bool rfi_flush; 817 + 818 + static int __init handle_no_rfi_flush(char *p) 819 + { 820 + pr_info("rfi-flush: disabled on command line."); 821 + no_rfi_flush = true; 822 + return 0; 823 + } 824 + early_param("no_rfi_flush", handle_no_rfi_flush); 825 + 826 + /* 827 + * The RFI flush is not KPTI, but because users will see doco that says to use 828 + * nopti we hijack that option here to also disable the RFI flush. 829 + */ 830 + static int __init handle_no_pti(char *p) 831 + { 832 + pr_info("rfi-flush: disabling due to 'nopti' on command line.\n"); 833 + handle_no_rfi_flush(NULL); 834 + return 0; 835 + } 836 + early_param("nopti", handle_no_pti); 837 + 838 + static void do_nothing(void *unused) 839 + { 840 + /* 841 + * We don't need to do the flush explicitly, just enter+exit kernel is 842 + * sufficient, the RFI exit handlers will do the right thing. 843 + */ 844 + } 845 + 846 + void rfi_flush_enable(bool enable) 847 + { 848 + if (rfi_flush == enable) 849 + return; 850 + 851 + if (enable) { 852 + do_rfi_flush_fixups(enabled_flush_types); 853 + on_each_cpu(do_nothing, NULL, 1); 854 + } else 855 + do_rfi_flush_fixups(L1D_FLUSH_NONE); 856 + 857 + rfi_flush = enable; 858 + } 859 + 860 + static void init_fallback_flush(void) 861 + { 862 + u64 l1d_size, limit; 863 + int cpu; 864 + 865 + l1d_size = ppc64_caches.l1d.size; 866 + limit = min(ppc64_bolted_size(), ppc64_rma_size); 867 + 868 + /* 869 + * Align to L1d size, and size it at 2x L1d size, to catch possible 870 + * hardware prefetch runoff. We don't have a recipe for load patterns to 871 + * reliably avoid the prefetcher. 872 + */ 873 + l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit)); 874 + memset(l1d_flush_fallback_area, 0, l1d_size * 2); 875 + 876 + for_each_possible_cpu(cpu) { 877 + /* 878 + * The fallback flush is currently coded for 8-way 879 + * associativity. Different associativity is possible, but it 880 + * will be treated as 8-way and may not evict the lines as 881 + * effectively. 882 + * 883 + * 128 byte lines are mandatory. 884 + */ 885 + u64 c = l1d_size / 8; 886 + 887 + paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area; 888 + paca[cpu].l1d_flush_congruence = c; 889 + paca[cpu].l1d_flush_sets = c / 128; 890 + } 891 + } 892 + 893 + void __init setup_rfi_flush(enum l1d_flush_type types, bool enable) 894 + { 895 + if (types & L1D_FLUSH_FALLBACK) { 896 + pr_info("rfi-flush: Using fallback displacement flush\n"); 897 + init_fallback_flush(); 898 + } 899 + 900 + if (types & L1D_FLUSH_ORI) 901 + pr_info("rfi-flush: Using ori type flush\n"); 902 + 903 + if (types & L1D_FLUSH_MTTRIG) 904 + pr_info("rfi-flush: Using mttrig type flush\n"); 905 + 906 + enabled_flush_types = types; 907 + 908 + if (!no_rfi_flush) 909 + rfi_flush_enable(enable); 910 + } 911 + 912 + #ifdef CONFIG_DEBUG_FS 913 + static int rfi_flush_set(void *data, u64 val) 914 + { 915 + if (val == 1) 916 + rfi_flush_enable(true); 917 + else if (val == 0) 918 + rfi_flush_enable(false); 919 + else 920 + return -EINVAL; 921 + 922 + return 0; 923 + } 924 + 925 + static int rfi_flush_get(void *data, u64 *val) 926 + { 927 + *val = rfi_flush ? 1 : 0; 928 + return 0; 929 + } 930 + 931 + DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n"); 932 + 933 + static __init int rfi_flush_debugfs_init(void) 934 + { 935 + debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush); 936 + return 0; 937 + } 938 + device_initcall(rfi_flush_debugfs_init); 939 + #endif 940 + 941 + ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) 942 + { 943 + if (rfi_flush) 944 + return sprintf(buf, "Mitigation: RFI Flush\n"); 945 + 946 + return sprintf(buf, "Vulnerable\n"); 947 + } 948 + #endif /* CONFIG_PPC_BOOK3S_64 */
+9
arch/powerpc/kernel/vmlinux.lds.S
··· 132 132 /* Read-only data */ 133 133 RO_DATA(PAGE_SIZE) 134 134 135 + #ifdef CONFIG_PPC64 136 + . = ALIGN(8); 137 + __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) { 138 + __start___rfi_flush_fixup = .; 139 + *(__rfi_flush_fixup) 140 + __stop___rfi_flush_fixup = .; 141 + } 142 + #endif 143 + 135 144 EXCEPTION_TABLE(0) 136 145 137 146 NOTES :kernel :notes
+4 -5
arch/powerpc/kvm/book3s_hv_rmhandlers.S
··· 79 79 mtmsrd r0,1 /* clear RI in MSR */ 80 80 mtsrr0 r5 81 81 mtsrr1 r6 82 - RFI 82 + RFI_TO_KERNEL 83 83 84 84 kvmppc_call_hv_entry: 85 85 BEGIN_FTR_SECTION ··· 199 199 mtmsrd r6, 1 /* Clear RI in MSR */ 200 200 mtsrr0 r8 201 201 mtsrr1 r7 202 - RFI 202 + RFI_TO_KERNEL 203 203 204 204 /* Virtual-mode return */ 205 205 .Lvirt_return: ··· 1167 1167 1168 1168 ld r0, VCPU_GPR(R0)(r4) 1169 1169 ld r4, VCPU_GPR(R4)(r4) 1170 - 1171 - hrfid 1170 + HRFI_TO_GUEST 1172 1171 b . 1173 1172 1174 1173 secondary_too_late: ··· 3320 3321 ld r4, PACAKMSR(r13) 3321 3322 mtspr SPRN_SRR0, r3 3322 3323 mtspr SPRN_SRR1, r4 3323 - rfid 3324 + RFI_TO_KERNEL 3324 3325 9: addi r3, r1, STACK_FRAME_OVERHEAD 3325 3326 bl kvmppc_bad_interrupt 3326 3327 b 9b
+5 -2
arch/powerpc/kvm/book3s_rmhandlers.S
··· 46 46 47 47 #define FUNC(name) name 48 48 49 + #define RFI_TO_KERNEL RFI 50 + #define RFI_TO_GUEST RFI 51 + 49 52 .macro INTERRUPT_TRAMPOLINE intno 50 53 51 54 .global kvmppc_trampoline_\intno ··· 144 141 GET_SCRATCH0(r13) 145 142 146 143 /* And get back into the code */ 147 - RFI 144 + RFI_TO_KERNEL 148 145 #endif 149 146 150 147 /* ··· 167 164 ori r5, r5, MSR_EE 168 165 mtsrr0 r7 169 166 mtsrr1 r6 170 - RFI 167 + RFI_TO_KERNEL 171 168 172 169 #include "book3s_segment.S"
+2 -2
arch/powerpc/kvm/book3s_segment.S
··· 156 156 PPC_LL r9, SVCPU_R9(r3) 157 157 PPC_LL r3, (SVCPU_R3)(r3) 158 158 159 - RFI 159 + RFI_TO_GUEST 160 160 kvmppc_handler_trampoline_enter_end: 161 161 162 162 ··· 407 407 cmpwi r12, BOOK3S_INTERRUPT_DOORBELL 408 408 beqa BOOK3S_INTERRUPT_DOORBELL 409 409 410 - RFI 410 + RFI_TO_KERNEL 411 411 kvmppc_handler_trampoline_exit_end:
+4 -3
arch/powerpc/kvm/book3s_xive.c
··· 725 725 726 726 /* Return the per-cpu state for state saving/migration */ 727 727 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT | 728 - (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT; 728 + (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT | 729 + (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT; 729 730 } 730 731 731 732 int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) ··· 1559 1558 1560 1559 /* 1561 1560 * Restore P and Q. If the interrupt was pending, we 1562 - * force both P and Q, which will trigger a resend. 1561 + * force Q and !P, which will trigger a resend. 1563 1562 * 1564 1563 * That means that a guest that had both an interrupt 1565 1564 * pending (queued) and Q set will restore with only ··· 1567 1566 * is perfectly fine as coalescing interrupts that haven't 1568 1567 * been presented yet is always allowed. 1569 1568 */ 1570 - if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING) 1569 + if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING)) 1571 1570 state->old_p = true; 1572 1571 if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING) 1573 1572 state->old_q = true;
+41
arch/powerpc/lib/feature-fixups.c
··· 116 116 } 117 117 } 118 118 119 + #ifdef CONFIG_PPC_BOOK3S_64 120 + void do_rfi_flush_fixups(enum l1d_flush_type types) 121 + { 122 + unsigned int instrs[3], *dest; 123 + long *start, *end; 124 + int i; 125 + 126 + start = PTRRELOC(&__start___rfi_flush_fixup), 127 + end = PTRRELOC(&__stop___rfi_flush_fixup); 128 + 129 + instrs[0] = 0x60000000; /* nop */ 130 + instrs[1] = 0x60000000; /* nop */ 131 + instrs[2] = 0x60000000; /* nop */ 132 + 133 + if (types & L1D_FLUSH_FALLBACK) 134 + /* b .+16 to fallback flush */ 135 + instrs[0] = 0x48000010; 136 + 137 + i = 0; 138 + if (types & L1D_FLUSH_ORI) { 139 + instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ 140 + instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/ 141 + } 142 + 143 + if (types & L1D_FLUSH_MTTRIG) 144 + instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */ 145 + 146 + for (i = 0; start < end; start++, i++) { 147 + dest = (void *)start + *start; 148 + 149 + pr_devel("patching dest %lx\n", (unsigned long)dest); 150 + 151 + patch_instruction(dest, instrs[0]); 152 + patch_instruction(dest + 1, instrs[1]); 153 + patch_instruction(dest + 2, instrs[2]); 154 + } 155 + 156 + printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i); 157 + } 158 + #endif /* CONFIG_PPC_BOOK3S_64 */ 159 + 119 160 void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) 120 161 { 121 162 long *start, *end;
+6 -1
arch/powerpc/mm/fault.c
··· 153 153 return __bad_area_nosemaphore(regs, address, SEGV_PKUERR, pkey); 154 154 } 155 155 156 + static noinline int bad_access(struct pt_regs *regs, unsigned long address) 157 + { 158 + return __bad_area(regs, address, SEGV_ACCERR, 0); 159 + } 160 + 156 161 static int do_sigbus(struct pt_regs *regs, unsigned long address, 157 162 unsigned int fault) 158 163 { ··· 507 502 508 503 good_area: 509 504 if (unlikely(access_error(is_write, is_exec, vma))) 510 - return bad_area(regs, address); 505 + return bad_access(regs, address); 511 506 512 507 /* 513 508 * If for any reason at all we couldn't handle the fault,
+8 -4
arch/powerpc/perf/core-book3s.c
··· 410 410 int ret; 411 411 __u64 target; 412 412 413 - if (is_kernel_addr(addr)) 414 - return branch_target((unsigned int *)addr); 413 + if (is_kernel_addr(addr)) { 414 + if (probe_kernel_read(&instr, (void *)addr, sizeof(instr))) 415 + return 0; 416 + 417 + return branch_target(&instr); 418 + } 415 419 416 420 /* Userspace: need copy instruction here then translate it */ 417 421 pagefault_disable(); ··· 1419 1415 int n = 0; 1420 1416 struct perf_event *event; 1421 1417 1422 - if (!is_software_event(group)) { 1418 + if (group->pmu->task_ctx_nr == perf_hw_context) { 1423 1419 if (n >= max_count) 1424 1420 return -1; 1425 1421 ctrs[n] = group; ··· 1427 1423 events[n++] = group->hw.config; 1428 1424 } 1429 1425 list_for_each_entry(event, &group->sibling_list, group_entry) { 1430 - if (!is_software_event(event) && 1426 + if (event->pmu->task_ctx_nr == perf_hw_context && 1431 1427 event->state != PERF_EVENT_STATE_OFF) { 1432 1428 if (n >= max_count) 1433 1429 return -1;
+16 -1
arch/powerpc/perf/imc-pmu.c
··· 323 323 return 0; 324 324 325 325 /* 326 + * Check whether nest_imc is registered. We could end up here if the 327 + * cpuhotplug callback registration fails. i.e, callback invokes the 328 + * offline path for all successfully registered nodes. At this stage, 329 + * nest_imc pmu will not be registered and we should return here. 330 + * 331 + * We return with a zero since this is not an offline failure. And 332 + * cpuhp_setup_state() returns the actual failure reason to the caller, 333 + * which in turn will call the cleanup routine. 334 + */ 335 + if (!nest_pmus) 336 + return 0; 337 + 338 + /* 326 339 * Now that this cpu is one of the designated, 327 340 * find a next cpu a) which is online and b) in same chip. 328 341 */ ··· 1192 1179 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs); 1193 1180 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]); 1194 1181 kfree(pmu_ptr); 1195 - kfree(per_nest_pmu_arr); 1196 1182 } 1197 1183 1198 1184 /* ··· 1207 1195 if (nest_pmus == 1) { 1208 1196 cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE); 1209 1197 kfree(nest_imc_refc); 1198 + kfree(per_nest_pmu_arr); 1210 1199 } 1211 1200 1212 1201 if (nest_pmus > 0) ··· 1343 1330 ret = nest_pmu_cpumask_init(); 1344 1331 if (ret) { 1345 1332 mutex_unlock(&nest_init_lock); 1333 + kfree(nest_imc_refc); 1334 + kfree(per_nest_pmu_arr); 1346 1335 goto err_free; 1347 1336 } 1348 1337 }
+49
arch/powerpc/platforms/powernv/setup.c
··· 37 37 #include <asm/kexec.h> 38 38 #include <asm/smp.h> 39 39 #include <asm/tm.h> 40 + #include <asm/setup.h> 40 41 41 42 #include "powernv.h" 43 + 44 + static void pnv_setup_rfi_flush(void) 45 + { 46 + struct device_node *np, *fw_features; 47 + enum l1d_flush_type type; 48 + int enable; 49 + 50 + /* Default to fallback in case fw-features are not available */ 51 + type = L1D_FLUSH_FALLBACK; 52 + enable = 1; 53 + 54 + np = of_find_node_by_name(NULL, "ibm,opal"); 55 + fw_features = of_get_child_by_name(np, "fw-features"); 56 + of_node_put(np); 57 + 58 + if (fw_features) { 59 + np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2"); 60 + if (np && of_property_read_bool(np, "enabled")) 61 + type = L1D_FLUSH_MTTRIG; 62 + 63 + of_node_put(np); 64 + 65 + np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0"); 66 + if (np && of_property_read_bool(np, "enabled")) 67 + type = L1D_FLUSH_ORI; 68 + 69 + of_node_put(np); 70 + 71 + /* Enable unless firmware says NOT to */ 72 + enable = 2; 73 + np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0"); 74 + if (np && of_property_read_bool(np, "disabled")) 75 + enable--; 76 + 77 + of_node_put(np); 78 + 79 + np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1"); 80 + if (np && of_property_read_bool(np, "disabled")) 81 + enable--; 82 + 83 + of_node_put(np); 84 + of_node_put(fw_features); 85 + } 86 + 87 + setup_rfi_flush(type, enable > 0); 88 + } 42 89 43 90 static void __init pnv_setup_arch(void) 44 91 { 45 92 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); 93 + 94 + pnv_setup_rfi_flush(); 46 95 47 96 /* Initialize SMP */ 48 97 pnv_smp_init();
+15
arch/powerpc/platforms/ps3/setup.c
··· 104 104 ps3_sys_manager_halt(); /* never returns */ 105 105 } 106 106 107 + static void ps3_panic(char *str) 108 + { 109 + DBG("%s:%d %s\n", __func__, __LINE__, str); 110 + 111 + smp_send_stop(); 112 + printk("\n"); 113 + printk(" System does not reboot automatically.\n"); 114 + printk(" Please press POWER button.\n"); 115 + printk("\n"); 116 + 117 + while(1) 118 + lv1_pause(1); 119 + } 120 + 107 121 #if defined(CONFIG_FB_PS3) || defined(CONFIG_FB_PS3_MODULE) || \ 108 122 defined(CONFIG_PS3_FLASH) || defined(CONFIG_PS3_FLASH_MODULE) 109 123 static void __init prealloc(struct ps3_prealloc *p) ··· 269 255 .probe = ps3_probe, 270 256 .setup_arch = ps3_setup_arch, 271 257 .init_IRQ = ps3_init_IRQ, 258 + .panic = ps3_panic, 272 259 .get_boot_time = ps3_get_boot_time, 273 260 .set_dabr = ps3_set_dabr, 274 261 .calibrate_decr = ps3_calibrate_decr,
+18 -3
arch/powerpc/platforms/pseries/dlpar.c
··· 574 574 575 575 static CLASS_ATTR_RW(dlpar); 576 576 577 - static int __init pseries_dlpar_init(void) 577 + int __init dlpar_workqueue_init(void) 578 578 { 579 + if (pseries_hp_wq) 580 + return 0; 581 + 579 582 pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue", 580 - WQ_UNBOUND, 1); 583 + WQ_UNBOUND, 1); 584 + 585 + return pseries_hp_wq ? 0 : -ENOMEM; 586 + } 587 + 588 + static int __init dlpar_sysfs_init(void) 589 + { 590 + int rc; 591 + 592 + rc = dlpar_workqueue_init(); 593 + if (rc) 594 + return rc; 595 + 581 596 return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr); 582 597 } 583 - machine_device_initcall(pseries, pseries_dlpar_init); 598 + machine_device_initcall(pseries, dlpar_sysfs_init); 584 599
+2
arch/powerpc/platforms/pseries/pseries.h
··· 98 98 return CMO_PageSize; 99 99 } 100 100 101 + int dlpar_workqueue_init(void); 102 + 101 103 #endif /* _PSERIES_PSERIES_H */
+2 -1
arch/powerpc/platforms/pseries/ras.c
··· 69 69 /* Hotplug Events */ 70 70 np = of_find_node_by_path("/event-sources/hot-plug-events"); 71 71 if (np != NULL) { 72 - request_event_sources_irqs(np, ras_hotplug_interrupt, 72 + if (dlpar_workqueue_init() == 0) 73 + request_event_sources_irqs(np, ras_hotplug_interrupt, 73 74 "RAS_HOTPLUG"); 74 75 of_node_put(np); 75 76 }
+36
arch/powerpc/platforms/pseries/setup.c
··· 459 459 of_pci_check_probe_only(); 460 460 } 461 461 462 + static void pseries_setup_rfi_flush(void) 463 + { 464 + struct h_cpu_char_result result; 465 + enum l1d_flush_type types; 466 + bool enable; 467 + long rc; 468 + 469 + /* Enable by default */ 470 + enable = true; 471 + 472 + rc = plpar_get_cpu_characteristics(&result); 473 + if (rc == H_SUCCESS) { 474 + types = L1D_FLUSH_NONE; 475 + 476 + if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2) 477 + types |= L1D_FLUSH_MTTRIG; 478 + if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30) 479 + types |= L1D_FLUSH_ORI; 480 + 481 + /* Use fallback if nothing set in hcall */ 482 + if (types == L1D_FLUSH_NONE) 483 + types = L1D_FLUSH_FALLBACK; 484 + 485 + if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) 486 + enable = false; 487 + } else { 488 + /* Default to fallback if case hcall is not available */ 489 + types = L1D_FLUSH_FALLBACK; 490 + } 491 + 492 + setup_rfi_flush(types, enable); 493 + } 494 + 462 495 static void __init pSeries_setup_arch(void) 463 496 { 464 497 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); ··· 508 475 loops_per_jiffy = 50000000; 509 476 510 477 fwnmi_init(); 478 + 479 + pseries_setup_rfi_flush(); 511 480 512 481 /* By default, only probe PCI (can be overridden by rtas_pci) */ 513 482 pci_add_flags(PCI_PROBE_ONLY); ··· 761 726 .pcibios_fixup = pSeries_final_fixup, 762 727 .restart = rtas_restart, 763 728 .halt = rtas_halt, 729 + .panic = rtas_os_term, 764 730 .get_boot_time = rtas_get_boot_time, 765 731 .get_rtc_time = rtas_get_rtc_time, 766 732 .set_rtc_time = rtas_set_rtc_time,
+20 -16
arch/powerpc/xmon/xmon.c
··· 1590 1590 printf("kernel BUG at %s:%u!\n", 1591 1591 bug->file, bug->line); 1592 1592 #else 1593 - printf("kernel BUG at %p!\n", (void *)bug->bug_addr); 1593 + printf("kernel BUG at %px!\n", (void *)bug->bug_addr); 1594 1594 #endif 1595 1595 #endif /* CONFIG_BUG */ 1596 1596 } ··· 2329 2329 2330 2330 p = &paca[cpu]; 2331 2331 2332 - printf("paca for cpu 0x%x @ %p:\n", cpu, p); 2332 + printf("paca for cpu 0x%x @ %px:\n", cpu, p); 2333 2333 2334 2334 printf(" %-*s = %s\n", 20, "possible", cpu_possible(cpu) ? "yes" : "no"); 2335 2335 printf(" %-*s = %s\n", 20, "present", cpu_present(cpu) ? "yes" : "no"); ··· 2344 2344 DUMP(p, kernel_toc, "lx"); 2345 2345 DUMP(p, kernelbase, "lx"); 2346 2346 DUMP(p, kernel_msr, "lx"); 2347 - DUMP(p, emergency_sp, "p"); 2347 + DUMP(p, emergency_sp, "px"); 2348 2348 #ifdef CONFIG_PPC_BOOK3S_64 2349 - DUMP(p, nmi_emergency_sp, "p"); 2350 - DUMP(p, mc_emergency_sp, "p"); 2349 + DUMP(p, nmi_emergency_sp, "px"); 2350 + DUMP(p, mc_emergency_sp, "px"); 2351 2351 DUMP(p, in_nmi, "x"); 2352 2352 DUMP(p, in_mce, "x"); 2353 2353 DUMP(p, hmi_event_available, "x"); ··· 2375 2375 DUMP(p, slb_cache_ptr, "x"); 2376 2376 for (i = 0; i < SLB_CACHE_ENTRIES; i++) 2377 2377 printf(" slb_cache[%d]: = 0x%016lx\n", i, p->slb_cache[i]); 2378 + 2379 + DUMP(p, rfi_flush_fallback_area, "px"); 2380 + DUMP(p, l1d_flush_congruence, "llx"); 2381 + DUMP(p, l1d_flush_sets, "llx"); 2378 2382 #endif 2379 2383 DUMP(p, dscr_default, "llx"); 2380 2384 #ifdef CONFIG_PPC_BOOK3E 2381 - DUMP(p, pgd, "p"); 2382 - DUMP(p, kernel_pgd, "p"); 2383 - DUMP(p, tcd_ptr, "p"); 2384 - DUMP(p, mc_kstack, "p"); 2385 - DUMP(p, crit_kstack, "p"); 2386 - DUMP(p, dbg_kstack, "p"); 2385 + DUMP(p, pgd, "px"); 2386 + DUMP(p, kernel_pgd, "px"); 2387 + DUMP(p, tcd_ptr, "px"); 2388 + DUMP(p, mc_kstack, "px"); 2389 + DUMP(p, crit_kstack, "px"); 2390 + DUMP(p, dbg_kstack, "px"); 2387 2391 #endif 2388 - DUMP(p, __current, "p"); 2392 + DUMP(p, __current, "px"); 2389 2393 DUMP(p, kstack, "lx"); 2390 2394 printf(" kstack_base = 0x%016lx\n", p->kstack & ~(THREAD_SIZE - 1)); 2391 2395 DUMP(p, stab_rr, "lx"); ··· 2407 2403 #endif 2408 2404 2409 2405 #ifdef CONFIG_PPC_POWERNV 2410 - DUMP(p, core_idle_state_ptr, "p"); 2406 + DUMP(p, core_idle_state_ptr, "px"); 2411 2407 DUMP(p, thread_idle_state, "x"); 2412 2408 DUMP(p, thread_mask, "x"); 2413 2409 DUMP(p, subcore_sibling_mask, "x"); ··· 2949 2945 (tsk->exit_state & EXIT_DEAD) ? 'E' : 2950 2946 (tsk->state & TASK_INTERRUPTIBLE) ? 'S' : '?'; 2951 2947 2952 - printf("%p %016lx %6d %6d %c %2d %s\n", tsk, 2948 + printf("%px %016lx %6d %6d %c %2d %s\n", tsk, 2953 2949 tsk->thread.ksp, 2954 2950 tsk->pid, tsk->parent->pid, 2955 2951 state, task_thread_info(tsk)->cpu, ··· 2992 2988 2993 2989 if (setjmp(bus_error_jmp) != 0) { 2994 2990 catch_memory_errors = 0; 2995 - printf("*** Error dumping pte for task %p\n", tsk); 2991 + printf("*** Error dumping pte for task %px\n", tsk); 2996 2992 return; 2997 2993 } 2998 2994 ··· 3078 3074 3079 3075 if (setjmp(bus_error_jmp) != 0) { 3080 3076 catch_memory_errors = 0; 3081 - printf("*** Error dumping task %p\n", tsk); 3077 + printf("*** Error dumping task %px\n", tsk); 3082 3078 return; 3083 3079 } 3084 3080