Merge tag 'powerpc-6.0-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:

- Fix handling of PCI domains in /proc on 32-bit systems using the
recently added support for numbering buses from zero for each domain.

- A fix and a revert for some changes to use READ/WRITE_ONCE() which
caused problems with KASAN enabled due to sanitisation calls being
introduced in low-level paths that can't cope with it.

- Fix build errors on 32-bit caused by the syscall table being
misaligned sometimes.

- Two fixes to get IBM Cell native machines booting again, which had
bit-rotted while my QS22 was temporarily out of action.

- Fix the papr_scm driver to not assume the order of events returned by
the hypervisor is stable, and a related compile fix.

Thanks to Aneesh Kumar K.V, Christophe Leroy, Jordan Niethe, Kajol Jain,
Masahiro Yamada, Nathan Chancellor, Pali Rohár, Vaibhav Jain, and Zhouyi
Zhou.

* tag 'powerpc-6.0-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
powerpc/papr_scm: Ensure rc is always initialized in papr_scm_pmu_register()
Revert "powerpc/irq: Don't open code irq_soft_mask helpers"
powerpc: Fix hard_irq_disable() with sanitizer
powerpc/rtas: Fix RTAS MSR[HV] handling for Cell
Revert "powerpc: Remove unused FW_FEATURE_NATIVE references"
powerpc: align syscall table for ppc32
powerpc/pci: Enable PCI domains in /proc when PCI bus numbers are not unique
powerpc/papr_scm: Fix nvdimm event mappings

Changed files
+89 -69
arch
powerpc
include
kernel
platforms
pseries
+8
arch/powerpc/include/asm/firmware.h
··· 83 83 FW_FEATURE_POWERNV_ALWAYS = 0, 84 84 FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, 85 85 FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, 86 + FW_FEATURE_NATIVE_POSSIBLE = 0, 87 + FW_FEATURE_NATIVE_ALWAYS = 0, 86 88 FW_FEATURE_POSSIBLE = 87 89 #ifdef CONFIG_PPC_PSERIES 88 90 FW_FEATURE_PSERIES_POSSIBLE | ··· 94 92 #endif 95 93 #ifdef CONFIG_PPC_PS3 96 94 FW_FEATURE_PS3_POSSIBLE | 95 + #endif 96 + #ifdef CONFIG_PPC_HASH_MMU_NATIVE 97 + FW_FEATURE_NATIVE_ALWAYS | 97 98 #endif 98 99 0, 99 100 FW_FEATURE_ALWAYS = ··· 108 103 #endif 109 104 #ifdef CONFIG_PPC_PS3 110 105 FW_FEATURE_PS3_ALWAYS & 106 + #endif 107 + #ifdef CONFIG_PPC_HASH_MMU_NATIVE 108 + FW_FEATURE_NATIVE_ALWAYS & 111 109 #endif 112 110 FW_FEATURE_POSSIBLE, 113 111
+38 -8
arch/powerpc/include/asm/hw_irq.h
··· 113 113 114 114 static inline notrace unsigned long irq_soft_mask_return(void) 115 115 { 116 - return READ_ONCE(local_paca->irq_soft_mask); 116 + unsigned long flags; 117 + 118 + asm volatile( 119 + "lbz %0,%1(13)" 120 + : "=r" (flags) 121 + : "i" (offsetof(struct paca_struct, irq_soft_mask))); 122 + 123 + return flags; 117 124 } 118 125 119 126 /* ··· 147 140 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 148 141 WARN_ON(mask && !(mask & IRQS_DISABLED)); 149 142 150 - WRITE_ONCE(local_paca->irq_soft_mask, mask); 151 - barrier(); 143 + asm volatile( 144 + "stb %0,%1(13)" 145 + : 146 + : "r" (mask), 147 + "i" (offsetof(struct paca_struct, irq_soft_mask)) 148 + : "memory"); 152 149 } 153 150 154 151 static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask) 155 152 { 156 - unsigned long flags = irq_soft_mask_return(); 153 + unsigned long flags; 157 154 158 - irq_soft_mask_set(mask); 155 + #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG 156 + WARN_ON(mask && !(mask & IRQS_DISABLED)); 157 + #endif 158 + 159 + asm volatile( 160 + "lbz %0,%1(13); stb %2,%1(13)" 161 + : "=&r" (flags) 162 + : "i" (offsetof(struct paca_struct, irq_soft_mask)), 163 + "r" (mask) 164 + : "memory"); 159 165 160 166 return flags; 161 167 } 162 168 163 169 static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask) 164 170 { 165 - unsigned long flags = irq_soft_mask_return(); 171 + unsigned long flags, tmp; 166 172 167 - irq_soft_mask_set(flags | mask); 173 + asm volatile( 174 + "lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)" 175 + : "=&r" (flags), "=r" (tmp) 176 + : "i" (offsetof(struct paca_struct, irq_soft_mask)), 177 + "r" (mask) 178 + : "memory"); 179 + 180 + #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG 181 + WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED)); 182 + #endif 168 183 169 184 return flags; 170 185 } ··· 311 282 flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \ 312 283 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \ 313 284 if (!arch_irqs_disabled_flags(flags)) { \ 314 - WRITE_ONCE(local_paca->saved_r1, current_stack_pointer);\ 285 + asm volatile("std%X0 %1,%0" : "=m" (local_paca->saved_r1) \ 286 + : "r" (current_stack_pointer)); \ 315 287 trace_hardirqs_off(); \ 316 288 } \ 317 289 } while(0)
+9
arch/powerpc/kernel/pci_32.c
··· 245 245 246 246 printk(KERN_INFO "PCI: Probing PCI hardware\n"); 247 247 248 + #ifdef CONFIG_PPC_PCI_BUS_NUM_DOMAIN_DEPENDENT 249 + /* 250 + * Enable PCI domains in /proc when PCI bus numbers are not unique 251 + * across all PCI domains to prevent conflicts. And keep PCI domain 0 252 + * backward compatible in /proc for video cards. 253 + */ 254 + pci_add_flags(PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0); 255 + #endif 256 + 248 257 if (pci_has_flag(PCI_REASSIGN_ALL_BUS)) 249 258 pci_assign_all_buses = 1; 250 259
+4
arch/powerpc/kernel/rtas_entry.S
··· 109 109 * its critical regions (as specified in PAPR+ section 7.2.1). MSR[S] 110 110 * is not impacted by RFI_TO_KERNEL (only urfid can unset it). So if 111 111 * MSR[S] is set, it will remain when entering RTAS. 112 + * If we're in HV mode, RTAS must also run in HV mode, so extract MSR_HV 113 + * from the saved MSR value and insert into the value RTAS will use. 112 114 */ 115 + extrdi r0, r6, 1, 63 - MSR_HV_LG 113 116 LOAD_REG_IMMEDIATE(r6, MSR_ME | MSR_RI) 117 + insrdi r6, r0, 1, 63 - MSR_HV_LG 114 118 115 119 li r0,0 116 120 mtmsrd r0,1 /* disable RI before using SRR0/1 */
+1
arch/powerpc/kernel/systbl.S
··· 18 18 .p2align 3 19 19 #define __SYSCALL(nr, entry) .8byte entry 20 20 #else 21 + .p2align 2 21 22 #define __SYSCALL(nr, entry) .long entry 22 23 #endif 23 24
+29 -61
arch/powerpc/platforms/pseries/papr_scm.c
··· 124 124 125 125 /* The bits which needs to be overridden */ 126 126 u64 health_bitmap_inject_mask; 127 - 128 - /* array to have event_code and stat_id mappings */ 129 - u8 *nvdimm_events_map; 130 127 }; 131 128 132 129 static int papr_scm_pmem_flush(struct nd_region *nd_region, ··· 347 350 #ifdef CONFIG_PERF_EVENTS 348 351 #define to_nvdimm_pmu(_pmu) container_of(_pmu, struct nvdimm_pmu, pmu) 349 352 353 + static const char * const nvdimm_events_map[] = { 354 + [1] = "CtlResCt", 355 + [2] = "CtlResTm", 356 + [3] = "PonSecs ", 357 + [4] = "MemLife ", 358 + [5] = "CritRscU", 359 + [6] = "HostLCnt", 360 + [7] = "HostSCnt", 361 + [8] = "HostSDur", 362 + [9] = "HostLDur", 363 + [10] = "MedRCnt ", 364 + [11] = "MedWCnt ", 365 + [12] = "MedRDur ", 366 + [13] = "MedWDur ", 367 + [14] = "CchRHCnt", 368 + [15] = "CchWHCnt", 369 + [16] = "FastWCnt", 370 + }; 371 + 350 372 static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev, u64 *count) 351 373 { 352 374 struct papr_scm_perf_stat *stat; ··· 373 357 struct papr_scm_priv *p = dev_get_drvdata(dev); 374 358 int rc, size; 375 359 360 + /* Invalid eventcode */ 361 + if (event->attr.config == 0 || event->attr.config >= ARRAY_SIZE(nvdimm_events_map)) 362 + return -EINVAL; 363 + 376 364 /* Allocate request buffer enough to hold single performance stat */ 377 365 size = sizeof(struct papr_scm_perf_stats) + 378 366 sizeof(struct papr_scm_perf_stat); 379 367 380 - if (!p || !p->nvdimm_events_map) 368 + if (!p) 381 369 return -EINVAL; 382 370 383 371 stats = kzalloc(size, GFP_KERNEL); ··· 390 370 391 371 stat = &stats->scm_statistic[0]; 392 372 memcpy(&stat->stat_id, 393 - &p->nvdimm_events_map[event->attr.config * sizeof(stat->stat_id)], 373 + nvdimm_events_map[event->attr.config], 394 374 sizeof(stat->stat_id)); 395 375 stat->stat_val = 0; 396 376 ··· 478 458 papr_scm_pmu_read(event); 479 459 } 480 460 481 - static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu *nd_pmu) 482 - { 483 - struct papr_scm_perf_stat *stat; 484 - struct papr_scm_perf_stats *stats; 485 - u32 available_events; 486 - int index, rc = 0; 487 - 488 - if (!p->stat_buffer_len) 489 - return -ENOENT; 490 - 491 - available_events = (p->stat_buffer_len - sizeof(struct papr_scm_perf_stats)) 492 - / sizeof(struct papr_scm_perf_stat); 493 - if (available_events == 0) 494 - return -EOPNOTSUPP; 495 - 496 - /* Allocate the buffer for phyp where stats are written */ 497 - stats = kzalloc(p->stat_buffer_len, GFP_KERNEL); 498 - if (!stats) { 499 - rc = -ENOMEM; 500 - return rc; 501 - } 502 - 503 - /* Called to get list of events supported */ 504 - rc = drc_pmem_query_stats(p, stats, 0); 505 - if (rc) 506 - goto out; 507 - 508 - /* 509 - * Allocate memory and populate nvdimm_event_map. 510 - * Allocate an extra element for NULL entry 511 - */ 512 - p->nvdimm_events_map = kcalloc(available_events + 1, 513 - sizeof(stat->stat_id), 514 - GFP_KERNEL); 515 - if (!p->nvdimm_events_map) { 516 - rc = -ENOMEM; 517 - goto out; 518 - } 519 - 520 - /* Copy all stat_ids to event map */ 521 - for (index = 0, stat = stats->scm_statistic; 522 - index < available_events; index++, ++stat) { 523 - memcpy(&p->nvdimm_events_map[index * sizeof(stat->stat_id)], 524 - &stat->stat_id, sizeof(stat->stat_id)); 525 - } 526 - out: 527 - kfree(stats); 528 - return rc; 529 - } 530 - 531 461 static void papr_scm_pmu_register(struct papr_scm_priv *p) 532 462 { 533 463 struct nvdimm_pmu *nd_pmu; ··· 489 519 goto pmu_err_print; 490 520 } 491 521 492 - rc = papr_scm_pmu_check_events(p, nd_pmu); 493 - if (rc) 522 + if (!p->stat_buffer_len) { 523 + rc = -ENOENT; 494 524 goto pmu_check_events_err; 525 + } 495 526 496 527 nd_pmu->pmu.task_ctx_nr = perf_invalid_context; 497 528 nd_pmu->pmu.name = nvdimm_name(p->nvdimm); ··· 510 539 511 540 rc = register_nvdimm_pmu(nd_pmu, p->pdev); 512 541 if (rc) 513 - goto pmu_register_err; 542 + goto pmu_check_events_err; 514 543 515 544 /* 516 545 * Set archdata.priv value to nvdimm_pmu structure, to handle the ··· 519 548 p->pdev->archdata.priv = nd_pmu; 520 549 return; 521 550 522 - pmu_register_err: 523 - kfree(p->nvdimm_events_map); 524 551 pmu_check_events_err: 525 552 kfree(nd_pmu); 526 553 pmu_err_print: ··· 1529 1560 unregister_nvdimm_pmu(pdev->archdata.priv); 1530 1561 1531 1562 pdev->archdata.priv = NULL; 1532 - kfree(p->nvdimm_events_map); 1533 1563 kfree(p->bus_desc.provider_name); 1534 1564 kfree(p); 1535 1565