Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux

Pull more powerpc updates from Michael Ellerman:
"Here's some more updates for powerpc for 3.18.

They are a bit late I know, though must are actually bug fixes. In my
defence I nearly cut the top of my finger off last weekend in a
gruesome bike maintenance accident, so I spent a good part of the week
waiting around for doctors. True story, I can send photos if you like :)

Probably the most interesting fix is the sys_call_table one, which
enables syscall tracing for powerpc. There's a fix for HMI handling
for old firmware, more endian fixes for firmware interfaces, more EEH
fixes, Anton fixed our routine that gets the current stack pointer,
and a few other misc bits"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux: (22 commits)
powerpc: Only do dynamic DMA zone limits on platforms that need it
powerpc: sync pseries_le_defconfig with pseries_defconfig
powerpc: Add printk levels to setup_system output
powerpc/vphn: NUMA node code expects big-endian
powerpc/msi: Use WARN_ON() in msi bitmap selftests
powerpc/msi: Fix the msi bitmap alignment tests
powerpc/eeh: Block CFG upon frozen Shiner adapter
powerpc/eeh: Don't collect logs on PE with blocked config space
powerpc/eeh: Block PCI config access upon frozen PE
powerpc/pseries: Drop config requests in EEH accessors
powerpc/powernv: Drop config requests in EEH accessors
powerpc/eeh: Rename flag EEH_PE_RESET to EEH_PE_CFG_BLOCKED
powerpc/eeh: Fix condition for isolated state
powerpc/pseries: Make CPU hotplug path endian safe
powerpc/pseries: Use dump_stack instead of show_stack
powerpc: Rename __get_SP() to current_stack_pointer()
powerpc: Reimplement __get_SP() as a function not a define
powerpc/numa: Add ability to disable and debug topology updates
powerpc/numa: check error return from proc_create
powerpc/powernv: Fallback to old HMI handling behavior for old firmware
...

+261 -119
+6
Documentation/kernel-parameters.txt
··· 3465 3465 e.g. base its process migration decisions on it. 3466 3466 Default is on. 3467 3467 3468 + topology_updates= [KNL, PPC, NUMA] 3469 + Format: {off} 3470 + Specify if the kernel should ignore (off) 3471 + topology updates sent by the hypervisor to this 3472 + LPAR. 3473 + 3468 3474 tp720= [HW,PS2] 3469 3475 3470 3476 tpm_suspend_pcr=[HW,TPM]
+6 -1
arch/powerpc/configs/pseries_le_defconfig
··· 48 48 CONFIG_IRQ_ALL_CPUS=y 49 49 CONFIG_MEMORY_HOTPLUG=y 50 50 CONFIG_MEMORY_HOTREMOVE=y 51 - CONFIG_CMA=y 52 51 CONFIG_PPC_64K_PAGES=y 53 52 CONFIG_PPC_SUBPAGE_PROT=y 54 53 CONFIG_SCHED_SMT=y ··· 137 138 CONFIG_NETPOLL_TRAP=y 138 139 CONFIG_TUN=m 139 140 CONFIG_VIRTIO_NET=m 141 + CONFIG_VHOST_NET=m 140 142 CONFIG_VORTEX=y 141 143 CONFIG_ACENIC=m 142 144 CONFIG_ACENIC_OMIT_TIGON_I=y ··· 303 303 # CONFIG_CRYPTO_ANSI_CPRNG is not set 304 304 CONFIG_CRYPTO_DEV_NX=y 305 305 CONFIG_CRYPTO_DEV_NX_ENCRYPT=m 306 + CONFIG_VIRTUALIZATION=y 307 + CONFIG_KVM_BOOK3S_64=m 308 + CONFIG_KVM_BOOK3S_64_HV=y 309 + CONFIG_TRANSPARENT_HUGEPAGE=y 310 + CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y 306 311 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+2 -1
arch/powerpc/include/asm/eeh.h
··· 71 71 72 72 #define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */ 73 73 #define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */ 74 - #define EEH_PE_RESET (1 << 2) /* PE reset in progress */ 74 + #define EEH_PE_CFG_BLOCKED (1 << 2) /* Block config access */ 75 75 76 76 #define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */ 77 + #define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */ 77 78 78 79 struct eeh_pe { 79 80 int type; /* PE type: PHB/Bus/Device */
+1 -1
arch/powerpc/include/asm/perf_event.h
··· 34 34 do { \ 35 35 (regs)->result = 0; \ 36 36 (regs)->nip = __ip; \ 37 - (regs)->gpr[1] = *(unsigned long *)__get_SP(); \ 37 + (regs)->gpr[1] = current_stack_pointer(); \ 38 38 asm volatile("mfmsr %0" : "=r" ((regs)->msr)); \ 39 39 } while (0) 40 40 #endif
+1 -2
arch/powerpc/include/asm/reg.h
··· 1265 1265 1266 1266 #define proc_trap() asm volatile("trap") 1267 1267 1268 - #define __get_SP() ({unsigned long sp; \ 1269 - asm volatile("mr %0,1": "=r" (sp)); sp;}) 1268 + extern unsigned long current_stack_pointer(void); 1270 1269 1271 1270 extern unsigned long scom970_read(unsigned int address); 1272 1271 extern void scom970_write(unsigned int address, unsigned long value);
+1 -1
arch/powerpc/include/asm/syscall.h
··· 19 19 20 20 /* ftrace syscalls requires exporting the sys_call_table */ 21 21 #ifdef CONFIG_FTRACE_SYSCALLS 22 - extern const unsigned long *sys_call_table; 22 + extern const unsigned long sys_call_table[]; 23 23 #endif /* CONFIG_FTRACE_SYSCALLS */ 24 24 25 25 static inline long syscall_get_nr(struct task_struct *task,
+8
arch/powerpc/kernel/dma.c
··· 53 53 #else 54 54 struct page *page; 55 55 int node = dev_to_node(dev); 56 + #ifdef CONFIG_FSL_SOC 56 57 u64 pfn = get_pfn_limit(dev); 57 58 int zone; 59 + 60 + /* 61 + * This code should be OK on other platforms, but we have drivers that 62 + * don't set coherent_dma_mask. As a workaround we just ifdef it. This 63 + * whole routine needs some serious cleanup. 64 + */ 58 65 59 66 zone = dma_pfn_limit_to_zone(pfn); 60 67 if (zone < 0) { ··· 80 73 break; 81 74 #endif 82 75 }; 76 + #endif /* CONFIG_FSL_SOC */ 83 77 84 78 /* ignore region specifiers */ 85 79 flag &= ~(__GFP_HIGHMEM);
+13 -6
arch/powerpc/kernel/eeh.c
··· 257 257 struct eeh_dev *edev, *tmp; 258 258 size_t *plen = flag; 259 259 260 + /* If the PE's config space is blocked, 0xFF's will be 261 + * returned. It's pointless to collect the log in this 262 + * case. 263 + */ 264 + if (pe->state & EEH_PE_CFG_BLOCKED) 265 + return NULL; 266 + 260 267 eeh_pe_for_each_dev(pe, edev, tmp) 261 268 *plen += eeh_dump_dev_log(edev, pci_regs_buf + *plen, 262 269 EEH_PCI_REGS_LOG_LEN - *plen); ··· 680 673 switch (state) { 681 674 case pcie_deassert_reset: 682 675 eeh_ops->reset(pe, EEH_RESET_DEACTIVATE); 683 - eeh_pe_state_clear(pe, EEH_PE_RESET); 676 + eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); 684 677 break; 685 678 case pcie_hot_reset: 686 - eeh_pe_state_mark(pe, EEH_PE_RESET); 679 + eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); 687 680 eeh_ops->reset(pe, EEH_RESET_HOT); 688 681 break; 689 682 case pcie_warm_reset: 690 - eeh_pe_state_mark(pe, EEH_PE_RESET); 683 + eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); 691 684 eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); 692 685 break; 693 686 default: 694 - eeh_pe_state_clear(pe, EEH_PE_RESET); 687 + eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); 695 688 return -EINVAL; 696 689 }; 697 690 ··· 1530 1523 switch (option) { 1531 1524 case EEH_RESET_DEACTIVATE: 1532 1525 ret = eeh_ops->reset(pe, option); 1533 - eeh_pe_state_clear(pe, EEH_PE_RESET); 1526 + eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); 1534 1527 if (ret) 1535 1528 break; 1536 1529 ··· 1545 1538 */ 1546 1539 eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); 1547 1540 1548 - eeh_pe_state_mark(pe, EEH_PE_RESET); 1541 + eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); 1549 1542 ret = eeh_ops->reset(pe, option); 1550 1543 break; 1551 1544 default:
+6 -6
arch/powerpc/kernel/eeh_driver.c
··· 528 528 eeh_pe_dev_traverse(pe, eeh_report_error, &result); 529 529 530 530 /* Issue reset */ 531 - eeh_pe_state_mark(pe, EEH_PE_RESET); 531 + eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); 532 532 ret = eeh_reset_pe(pe); 533 533 if (ret) { 534 - eeh_pe_state_clear(pe, EEH_PE_RECOVERING | EEH_PE_RESET); 534 + eeh_pe_state_clear(pe, EEH_PE_RECOVERING | EEH_PE_CFG_BLOCKED); 535 535 return ret; 536 536 } 537 - eeh_pe_state_clear(pe, EEH_PE_RESET); 537 + eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); 538 538 539 539 /* Unfreeze the PE */ 540 540 ret = eeh_clear_pe_frozen_state(pe, true); ··· 601 601 * config accesses. So we prefer to block them. However, controlled 602 602 * PCI config accesses initiated from EEH itself are allowed. 603 603 */ 604 - eeh_pe_state_mark(pe, EEH_PE_RESET); 604 + eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); 605 605 rc = eeh_reset_pe(pe); 606 606 if (rc) { 607 - eeh_pe_state_clear(pe, EEH_PE_RESET); 607 + eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); 608 608 return rc; 609 609 } 610 610 ··· 613 613 /* Restore PE */ 614 614 eeh_ops->configure_bridge(pe); 615 615 eeh_pe_restore_bars(pe); 616 - eeh_pe_state_clear(pe, EEH_PE_RESET); 616 + eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); 617 617 618 618 /* Clear frozen state */ 619 619 rc = eeh_clear_pe_frozen_state(pe, false);
+9 -1
arch/powerpc/kernel/eeh_pe.c
··· 525 525 pe->state |= state; 526 526 527 527 /* Offline PCI devices if applicable */ 528 - if (state != EEH_PE_ISOLATED) 528 + if (!(state & EEH_PE_ISOLATED)) 529 529 return NULL; 530 530 531 531 eeh_pe_for_each_dev(pe, edev, tmp) { ··· 533 533 if (pdev) 534 534 pdev->error_state = pci_channel_io_frozen; 535 535 } 536 + 537 + /* Block PCI config access if required */ 538 + if (pe->state & EEH_PE_CFG_RESTRICTED) 539 + pe->state |= EEH_PE_CFG_BLOCKED; 536 540 537 541 return NULL; 538 542 } ··· 614 610 615 611 pdev->error_state = pci_channel_io_normal; 616 612 } 613 + 614 + /* Unblock PCI config access if required */ 615 + if (pe->state & EEH_PE_CFG_RESTRICTED) 616 + pe->state &= ~EEH_PE_CFG_BLOCKED; 617 617 618 618 return NULL; 619 619 }
-5
arch/powerpc/kernel/exceptions-64s.S
··· 1270 1270 addi r3,r1,STACK_FRAME_OVERHEAD 1271 1271 bl hmi_exception_realmode 1272 1272 /* Windup the stack. */ 1273 - /* Clear MSR_RI before setting SRR0 and SRR1. */ 1274 - li r0,MSR_RI 1275 - mfmsr r9 /* get MSR value */ 1276 - andc r9,r9,r0 1277 - mtmsrd r9,1 /* Clear MSR_RI */ 1278 1273 /* Move original HSRR0 and HSRR1 into the respective regs */ 1279 1274 ld r9,_MSR(r1) 1280 1275 mtspr SPRN_HSRR1,r9
+1 -1
arch/powerpc/kernel/irq.c
··· 466 466 #ifdef CONFIG_DEBUG_STACKOVERFLOW 467 467 long sp; 468 468 469 - sp = __get_SP() & (THREAD_SIZE-1); 469 + sp = current_stack_pointer() & (THREAD_SIZE-1); 470 470 471 471 /* check for stack overflow: is there less than 2KB free? */ 472 472 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
+4
arch/powerpc/kernel/misc.S
··· 114 114 mtlr r0 115 115 mr r3,r4 116 116 blr 117 + 118 + _GLOBAL(current_stack_pointer) 119 + PPC_LL r3,0(r1) 120 + blr
+2
arch/powerpc/kernel/ppc_ksyms.c
··· 41 41 #ifdef CONFIG_EPAPR_PARAVIRT 42 42 EXPORT_SYMBOL(epapr_hypercall_start); 43 43 #endif 44 + 45 + EXPORT_SYMBOL(current_stack_pointer);
+1 -1
arch/powerpc/kernel/process.c
··· 1545 1545 tsk = current; 1546 1546 if (sp == 0) { 1547 1547 if (tsk == current) 1548 - asm("mr %0,1" : "=r" (sp)); 1548 + sp = current_stack_pointer(); 1549 1549 else 1550 1550 sp = tsk->thread.ksp; 1551 1551 }
+11 -19
arch/powerpc/kernel/rtas_pci.c
··· 66 66 return PCIBIOS_DEVICE_NOT_FOUND; 67 67 if (!config_access_valid(pdn, where)) 68 68 return PCIBIOS_BAD_REGISTER_NUMBER; 69 + #ifdef CONFIG_EEH 70 + if (pdn->edev && pdn->edev->pe && 71 + (pdn->edev->pe->state & EEH_PE_CFG_BLOCKED)) 72 + return PCIBIOS_SET_FAILED; 73 + #endif 69 74 70 75 addr = rtas_config_addr(pdn->busno, pdn->devfn, where); 71 76 buid = pdn->phb->buid; ··· 95 90 struct device_node *busdn, *dn; 96 91 struct pci_dn *pdn; 97 92 bool found = false; 98 - #ifdef CONFIG_EEH 99 - struct eeh_dev *edev; 100 - #endif 101 93 int ret; 102 94 103 95 /* Search only direct children of the bus */ ··· 111 109 112 110 if (!found) 113 111 return PCIBIOS_DEVICE_NOT_FOUND; 114 - #ifdef CONFIG_EEH 115 - edev = of_node_to_eeh_dev(dn); 116 - if (edev && edev->pe && edev->pe->state & EEH_PE_RESET) 117 - return PCIBIOS_DEVICE_NOT_FOUND; 118 - #endif 119 112 120 113 ret = rtas_read_config(pdn, where, size, val); 121 114 if (*val == EEH_IO_ERROR_VALUE(size) && ··· 129 132 return PCIBIOS_DEVICE_NOT_FOUND; 130 133 if (!config_access_valid(pdn, where)) 131 134 return PCIBIOS_BAD_REGISTER_NUMBER; 135 + #ifdef CONFIG_EEH 136 + if (pdn->edev && pdn->edev->pe && 137 + (pdn->edev->pe->state & EEH_PE_CFG_BLOCKED)) 138 + return PCIBIOS_SET_FAILED; 139 + #endif 132 140 133 141 addr = rtas_config_addr(pdn->busno, pdn->devfn, where); 134 142 buid = pdn->phb->buid; ··· 157 155 struct device_node *busdn, *dn; 158 156 struct pci_dn *pdn; 159 157 bool found = false; 160 - #ifdef CONFIG_EEH 161 - struct eeh_dev *edev; 162 - #endif 163 - int ret; 164 158 165 159 /* Search only direct children of the bus */ 166 160 busdn = pci_bus_to_OF_node(bus); ··· 171 173 172 174 if (!found) 173 175 return PCIBIOS_DEVICE_NOT_FOUND; 174 - #ifdef CONFIG_EEH 175 - edev = of_node_to_eeh_dev(dn); 176 - if (edev && edev->pe && (edev->pe->state & EEH_PE_RESET)) 177 - return PCIBIOS_DEVICE_NOT_FOUND; 178 - #endif 179 - ret = rtas_write_config(pdn, where, size, val); 180 176 181 - return ret; 177 + return rtas_write_config(pdn, where, size, val); 182 178 } 183 179 184 180 static struct pci_ops rtas_pci_ops = {
+16 -16
arch/powerpc/kernel/setup_64.c
··· 522 522 smp_release_cpus(); 523 523 #endif 524 524 525 - printk("Starting Linux PPC64 %s\n", init_utsname()->version); 525 + pr_info("Starting Linux PPC64 %s\n", init_utsname()->version); 526 526 527 - printk("-----------------------------------------------------\n"); 528 - printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); 529 - printk("phys_mem_size = 0x%llx\n", memblock_phys_mem_size()); 527 + pr_info("-----------------------------------------------------\n"); 528 + pr_info("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); 529 + pr_info("phys_mem_size = 0x%llx\n", memblock_phys_mem_size()); 530 530 531 531 if (ppc64_caches.dline_size != 0x80) 532 - printk("dcache_line_size = 0x%x\n", ppc64_caches.dline_size); 532 + pr_info("dcache_line_size = 0x%x\n", ppc64_caches.dline_size); 533 533 if (ppc64_caches.iline_size != 0x80) 534 - printk("icache_line_size = 0x%x\n", ppc64_caches.iline_size); 534 + pr_info("icache_line_size = 0x%x\n", ppc64_caches.iline_size); 535 535 536 - printk("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features); 537 - printk(" possible = 0x%016lx\n", CPU_FTRS_POSSIBLE); 538 - printk(" always = 0x%016lx\n", CPU_FTRS_ALWAYS); 539 - printk("cpu_user_features = 0x%08x 0x%08x\n", cur_cpu_spec->cpu_user_features, 536 + pr_info("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features); 537 + pr_info(" possible = 0x%016lx\n", CPU_FTRS_POSSIBLE); 538 + pr_info(" always = 0x%016lx\n", CPU_FTRS_ALWAYS); 539 + pr_info("cpu_user_features = 0x%08x 0x%08x\n", cur_cpu_spec->cpu_user_features, 540 540 cur_cpu_spec->cpu_user_features2); 541 - printk("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features); 542 - printk("firmware_features = 0x%016lx\n", powerpc_firmware_features); 541 + pr_info("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features); 542 + pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features); 543 543 544 544 #ifdef CONFIG_PPC_STD_MMU_64 545 545 if (htab_address) 546 - printk("htab_address = 0x%p\n", htab_address); 546 + pr_info("htab_address = 0x%p\n", htab_address); 547 547 548 - printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); 548 + pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask); 549 549 #endif 550 550 551 551 if (PHYSICAL_START > 0) 552 - printk("physical_start = 0x%llx\n", 552 + pr_info("physical_start = 0x%llx\n", 553 553 (unsigned long long)PHYSICAL_START); 554 - printk("-----------------------------------------------------\n"); 554 + pr_info("-----------------------------------------------------\n"); 555 555 556 556 DBG(" <- setup_system()\n"); 557 557 }
+1 -1
arch/powerpc/kernel/stacktrace.c
··· 50 50 { 51 51 unsigned long sp; 52 52 53 - asm("mr %0,1" : "=r" (sp)); 53 + sp = current_stack_pointer(); 54 54 55 55 save_context_stack(trace, sp, current, 1); 56 56 }
+39 -2
arch/powerpc/mm/numa.c
··· 8 8 * as published by the Free Software Foundation; either version 9 9 * 2 of the License, or (at your option) any later version. 10 10 */ 11 + #define pr_fmt(fmt) "numa: " fmt 12 + 11 13 #include <linux/threads.h> 12 14 #include <linux/bootmem.h> 13 15 #include <linux/init.h> ··· 1155 1153 } 1156 1154 early_param("numa", early_numa); 1157 1155 1156 + static bool topology_updates_enabled = true; 1157 + 1158 + static int __init early_topology_updates(char *p) 1159 + { 1160 + if (!p) 1161 + return 0; 1162 + 1163 + if (!strcmp(p, "off")) { 1164 + pr_info("Disabling topology updates\n"); 1165 + topology_updates_enabled = false; 1166 + } 1167 + 1168 + return 0; 1169 + } 1170 + early_param("topology_updates", early_topology_updates); 1171 + 1158 1172 #ifdef CONFIG_MEMORY_HOTPLUG 1159 1173 /* 1160 1174 * Find the node associated with a hot added memory section for ··· 1460 1442 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; 1461 1443 u64 flags = 1; 1462 1444 int hwcpu = get_hard_smp_processor_id(cpu); 1445 + int i; 1463 1446 1464 1447 rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu); 1448 + for (i = 0; i < 6; i++) 1449 + retbuf[i] = cpu_to_be64(retbuf[i]); 1465 1450 vphn_unpack_associativity(retbuf, associativity); 1466 1451 1467 1452 return rc; ··· 1560 1539 struct device *dev; 1561 1540 int weight, new_nid, i = 0; 1562 1541 1542 + if (!prrn_enabled && !vphn_enabled) 1543 + return 0; 1544 + 1563 1545 weight = cpumask_weight(&cpu_associativity_changes_mask); 1564 1546 if (!weight) 1565 1547 return 0; ··· 1614 1590 ud->next = &updates[i]; 1615 1591 } 1616 1592 cpu = cpu_last_thread_sibling(cpu); 1593 + } 1594 + 1595 + pr_debug("Topology update for the following CPUs:\n"); 1596 + if (cpumask_weight(&updated_cpus)) { 1597 + for (ud = &updates[0]; ud; ud = ud->next) { 1598 + pr_debug("cpu %d moving from node %d " 1599 + "to %d\n", ud->cpu, 1600 + ud->old_nid, ud->new_nid); 1601 + } 1617 1602 } 1618 1603 1619 1604 /* ··· 1833 1800 1834 1801 static int topology_update_init(void) 1835 1802 { 1836 - start_topology_update(); 1837 - proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops); 1803 + /* Do not poll for changes if disabled at boot */ 1804 + if (topology_updates_enabled) 1805 + start_topology_update(); 1806 + 1807 + if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops)) 1808 + return -ENOMEM; 1838 1809 1839 1810 return 0; 1840 1811 }
+1 -1
arch/powerpc/platforms/powernv/eeh-ioda.c
··· 373 373 * moving forward, we have to return operational 374 374 * state during PE reset. 375 375 */ 376 - if (pe->state & EEH_PE_RESET) { 376 + if (pe->state & EEH_PE_CFG_BLOCKED) { 377 377 result = (EEH_STATE_MMIO_ACTIVE | 378 378 EEH_STATE_DMA_ACTIVE | 379 379 EEH_STATE_MMIO_ENABLED |
+55 -2
arch/powerpc/platforms/powernv/eeh-powernv.c
··· 169 169 } 170 170 171 171 /* 172 + * If the PE contains any one of following adapters, the 173 + * PCI config space can't be accessed when dumping EEH log. 174 + * Otherwise, we will run into fenced PHB caused by shortage 175 + * of outbound credits in the adapter. The PCI config access 176 + * should be blocked until PE reset. MMIO access is dropped 177 + * by hardware certainly. In order to drop PCI config requests, 178 + * one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which 179 + * will be checked in the backend for PE state retrival. If 180 + * the PE becomes frozen for the first time and the flag has 181 + * been set for the PE, we will set EEH_PE_CFG_BLOCKED for 182 + * that PE to block its config space. 183 + * 184 + * Broadcom Austin 4-ports NICs (14e4:1657) 185 + * Broadcom Shiner 2-ports 10G NICs (14e4:168e) 186 + */ 187 + if ((dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x1657) || 188 + (dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x168e)) 189 + edev->pe->state |= EEH_PE_CFG_RESTRICTED; 190 + 191 + /* 172 192 * Cache the PE primary bus, which can't be fetched when 173 193 * full hotplug is in progress. In that case, all child 174 194 * PCI devices of the PE are expected to be removed prior ··· 403 383 return ret; 404 384 } 405 385 386 + static inline bool powernv_eeh_cfg_blocked(struct device_node *dn) 387 + { 388 + struct eeh_dev *edev = of_node_to_eeh_dev(dn); 389 + 390 + if (!edev || !edev->pe) 391 + return false; 392 + 393 + if (edev->pe->state & EEH_PE_CFG_BLOCKED) 394 + return true; 395 + 396 + return false; 397 + } 398 + 399 + static int powernv_eeh_read_config(struct device_node *dn, 400 + int where, int size, u32 *val) 401 + { 402 + if (powernv_eeh_cfg_blocked(dn)) { 403 + *val = 0xFFFFFFFF; 404 + return PCIBIOS_SET_FAILED; 405 + } 406 + 407 + return pnv_pci_cfg_read(dn, where, size, val); 408 + } 409 + 410 + static int powernv_eeh_write_config(struct device_node *dn, 411 + int where, int size, u32 val) 412 + { 413 + if (powernv_eeh_cfg_blocked(dn)) 414 + return PCIBIOS_SET_FAILED; 415 + 416 + return pnv_pci_cfg_write(dn, where, size, val); 417 + } 418 + 406 419 /** 407 420 * powernv_eeh_next_error - Retrieve next EEH error to handle 408 421 * @pe: Affected PE ··· 493 440 .get_log = powernv_eeh_get_log, 494 441 .configure_bridge = powernv_eeh_configure_bridge, 495 442 .err_inject = powernv_eeh_err_inject, 496 - .read_config = pnv_pci_cfg_read, 497 - .write_config = pnv_pci_cfg_write, 443 + .read_config = powernv_eeh_read_config, 444 + .write_config = powernv_eeh_write_config, 498 445 .next_error = powernv_eeh_next_error, 499 446 .restore_config = powernv_eeh_restore_config 500 447 };
+21
arch/powerpc/platforms/powernv/opal.c
··· 194 194 * fwnmi area at 0x7000 to provide the glue space to OPAL 195 195 */ 196 196 glue = 0x7000; 197 + 198 + /* 199 + * Check if we are running on newer firmware that exports 200 + * OPAL_HANDLE_HMI token. If yes, then don't ask OPAL to patch 201 + * the HMI interrupt and we catch it directly in Linux. 202 + * 203 + * For older firmware (i.e currently released POWER8 System Firmware 204 + * as of today <= SV810_087), we fallback to old behavior and let OPAL 205 + * patch the HMI vector and handle it inside OPAL firmware. 206 + * 207 + * For newer firmware (in development/yet to be released) we will 208 + * start catching/handling HMI directly in Linux. 209 + */ 210 + if (!opal_check_token(OPAL_HANDLE_HMI)) { 211 + pr_info("opal: Old firmware detected, OPAL handles HMIs.\n"); 212 + opal_register_exception_handler( 213 + OPAL_HYPERVISOR_MAINTENANCE_HANDLER, 214 + 0, glue); 215 + glue += 128; 216 + } 217 + 197 218 opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue); 198 219 #endif 199 220
+1 -1
arch/powerpc/platforms/powernv/pci.c
··· 505 505 edev = of_node_to_eeh_dev(dn); 506 506 if (edev) { 507 507 if (edev->pe && 508 - (edev->pe->state & EEH_PE_RESET)) 508 + (edev->pe->state & EEH_PE_CFG_BLOCKED)) 509 509 return false; 510 510 511 511 if (edev->mode & EEH_DEV_REMOVED)
+11 -11
arch/powerpc/platforms/pseries/dlpar.c
··· 25 25 #include <asm/rtas.h> 26 26 27 27 struct cc_workarea { 28 - u32 drc_index; 29 - u32 zero; 30 - u32 name_offset; 31 - u32 prop_length; 32 - u32 prop_offset; 28 + __be32 drc_index; 29 + __be32 zero; 30 + __be32 name_offset; 31 + __be32 prop_length; 32 + __be32 prop_offset; 33 33 }; 34 34 35 35 void dlpar_free_cc_property(struct property *prop) ··· 49 49 if (!prop) 50 50 return NULL; 51 51 52 - name = (char *)ccwa + ccwa->name_offset; 52 + name = (char *)ccwa + be32_to_cpu(ccwa->name_offset); 53 53 prop->name = kstrdup(name, GFP_KERNEL); 54 54 55 - prop->length = ccwa->prop_length; 56 - value = (char *)ccwa + ccwa->prop_offset; 55 + prop->length = be32_to_cpu(ccwa->prop_length); 56 + value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset); 57 57 prop->value = kmemdup(value, prop->length, GFP_KERNEL); 58 58 if (!prop->value) { 59 59 dlpar_free_cc_property(prop); ··· 79 79 if (!dn) 80 80 return NULL; 81 81 82 - name = (char *)ccwa + ccwa->name_offset; 82 + name = (char *)ccwa + be32_to_cpu(ccwa->name_offset); 83 83 dn->full_name = kasprintf(GFP_KERNEL, "%s/%s", path, name); 84 84 if (!dn->full_name) { 85 85 kfree(dn); ··· 126 126 #define CALL_AGAIN -2 127 127 #define ERR_CFG_USE -9003 128 128 129 - struct device_node *dlpar_configure_connector(u32 drc_index, 129 + struct device_node *dlpar_configure_connector(__be32 drc_index, 130 130 struct device_node *parent) 131 131 { 132 132 struct device_node *dn; ··· 414 414 if (!parent) 415 415 return -ENODEV; 416 416 417 - dn = dlpar_configure_connector(drc_index, parent); 417 + dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent); 418 418 if (!dn) 419 419 return -EINVAL; 420 420
+2 -2
arch/powerpc/platforms/pseries/hotplug-cpu.c
··· 247 247 unsigned int cpu; 248 248 cpumask_var_t candidate_mask, tmp; 249 249 int err = -ENOSPC, len, nthreads, i; 250 - const u32 *intserv; 250 + const __be32 *intserv; 251 251 252 252 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); 253 253 if (!intserv) ··· 293 293 for_each_cpu(cpu, tmp) { 294 294 BUG_ON(cpu_present(cpu)); 295 295 set_cpu_present(cpu, true); 296 - set_hard_smp_processor_id(cpu, *intserv++); 296 + set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++)); 297 297 } 298 298 err = 0; 299 299 out_unlock:
+5 -6
arch/powerpc/platforms/pseries/iommu.c
··· 30 30 #include <linux/mm.h> 31 31 #include <linux/memblock.h> 32 32 #include <linux/spinlock.h> 33 - #include <linux/sched.h> /* for show_stack */ 34 33 #include <linux/string.h> 35 34 #include <linux/pci.h> 36 35 #include <linux/dma-mapping.h> ··· 167 168 printk("\tindex = 0x%llx\n", (u64)tbl->it_index); 168 169 printk("\ttcenum = 0x%llx\n", (u64)tcenum); 169 170 printk("\ttce val = 0x%llx\n", tce ); 170 - show_stack(current, (unsigned long *)__get_SP()); 171 + dump_stack(); 171 172 } 172 173 173 174 tcenum++; ··· 256 257 printk("\tindex = 0x%llx\n", (u64)tbl->it_index); 257 258 printk("\tnpages = 0x%llx\n", (u64)npages); 258 259 printk("\ttce[0] val = 0x%llx\n", tcep[0]); 259 - show_stack(current, (unsigned long *)__get_SP()); 260 + dump_stack(); 260 261 } 261 262 return ret; 262 263 } ··· 272 273 printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc); 273 274 printk("\tindex = 0x%llx\n", (u64)tbl->it_index); 274 275 printk("\ttcenum = 0x%llx\n", (u64)tcenum); 275 - show_stack(current, (unsigned long *)__get_SP()); 276 + dump_stack(); 276 277 } 277 278 278 279 tcenum++; ··· 291 292 printk("\trc = %lld\n", rc); 292 293 printk("\tindex = 0x%llx\n", (u64)tbl->it_index); 293 294 printk("\tnpages = 0x%llx\n", (u64)npages); 294 - show_stack(current, (unsigned long *)__get_SP()); 295 + dump_stack(); 295 296 } 296 297 } 297 298 ··· 306 307 printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc); 307 308 printk("\tindex = 0x%llx\n", (u64)tbl->it_index); 308 309 printk("\ttcenum = 0x%llx\n", (u64)tcenum); 309 - show_stack(current, (unsigned long *)__get_SP()); 310 + dump_stack(); 310 311 } 311 312 312 313 return tce_ret;
+2 -1
arch/powerpc/platforms/pseries/pseries.h
··· 56 56 /* Dynamic logical Partitioning/Mobility */ 57 57 extern void dlpar_free_cc_nodes(struct device_node *); 58 58 extern void dlpar_free_cc_property(struct property *); 59 - extern struct device_node *dlpar_configure_connector(u32, struct device_node *); 59 + extern struct device_node *dlpar_configure_connector(__be32, 60 + struct device_node *); 60 61 extern int dlpar_attach_node(struct device_node *); 61 62 extern int dlpar_detach_node(struct device_node *); 62 63
+35 -31
arch/powerpc/sysdev/msi_bitmap.c
··· 145 145 146 146 #ifdef CONFIG_MSI_BITMAP_SELFTEST 147 147 148 - #define check(x) \ 149 - if (!(x)) printk("msi_bitmap: test failed at line %d\n", __LINE__); 150 - 151 148 static void __init test_basics(void) 152 149 { 153 150 struct msi_bitmap bmp; 154 - int i, size = 512; 151 + int rc, i, size = 512; 155 152 156 153 /* Can't allocate a bitmap of 0 irqs */ 157 - check(msi_bitmap_alloc(&bmp, 0, NULL) != 0); 154 + WARN_ON(msi_bitmap_alloc(&bmp, 0, NULL) == 0); 158 155 159 156 /* of_node may be NULL */ 160 - check(0 == msi_bitmap_alloc(&bmp, size, NULL)); 157 + WARN_ON(msi_bitmap_alloc(&bmp, size, NULL)); 161 158 162 159 /* Should all be free by default */ 163 - check(0 == bitmap_find_free_region(bmp.bitmap, size, 164 - get_count_order(size))); 160 + WARN_ON(bitmap_find_free_region(bmp.bitmap, size, get_count_order(size))); 165 161 bitmap_release_region(bmp.bitmap, 0, get_count_order(size)); 166 162 167 163 /* With no node, there's no msi-available-ranges, so expect > 0 */ 168 - check(msi_bitmap_reserve_dt_hwirqs(&bmp) > 0); 164 + WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp) <= 0); 169 165 170 166 /* Should all still be free */ 171 - check(0 == bitmap_find_free_region(bmp.bitmap, size, 172 - get_count_order(size))); 167 + WARN_ON(bitmap_find_free_region(bmp.bitmap, size, get_count_order(size))); 173 168 bitmap_release_region(bmp.bitmap, 0, get_count_order(size)); 174 169 175 170 /* Check we can fill it up and then no more */ 176 171 for (i = 0; i < size; i++) 177 - check(msi_bitmap_alloc_hwirqs(&bmp, 1) >= 0); 172 + WARN_ON(msi_bitmap_alloc_hwirqs(&bmp, 1) < 0); 178 173 179 - check(msi_bitmap_alloc_hwirqs(&bmp, 1) < 0); 174 + WARN_ON(msi_bitmap_alloc_hwirqs(&bmp, 1) >= 0); 180 175 181 176 /* Should all be allocated */ 182 - check(bitmap_find_free_region(bmp.bitmap, size, 0) < 0); 177 + WARN_ON(bitmap_find_free_region(bmp.bitmap, size, 0) >= 0); 183 178 184 179 /* And if we free one we can then allocate another */ 185 180 msi_bitmap_free_hwirqs(&bmp, size / 2, 1); 186 - check(msi_bitmap_alloc_hwirqs(&bmp, 1) == size / 2); 181 + WARN_ON(msi_bitmap_alloc_hwirqs(&bmp, 1) != size / 2); 182 + 183 + /* Free most of them for the alignment tests */ 184 + msi_bitmap_free_hwirqs(&bmp, 3, size - 3); 187 185 188 186 /* Check we get a naturally aligned offset */ 189 - check(msi_bitmap_alloc_hwirqs(&bmp, 2) % 2 == 0); 190 - check(msi_bitmap_alloc_hwirqs(&bmp, 4) % 4 == 0); 191 - check(msi_bitmap_alloc_hwirqs(&bmp, 8) % 8 == 0); 192 - check(msi_bitmap_alloc_hwirqs(&bmp, 9) % 16 == 0); 193 - check(msi_bitmap_alloc_hwirqs(&bmp, 3) % 4 == 0); 194 - check(msi_bitmap_alloc_hwirqs(&bmp, 7) % 8 == 0); 195 - check(msi_bitmap_alloc_hwirqs(&bmp, 121) % 128 == 0); 187 + rc = msi_bitmap_alloc_hwirqs(&bmp, 2); 188 + WARN_ON(rc < 0 && rc % 2 != 0); 189 + rc = msi_bitmap_alloc_hwirqs(&bmp, 4); 190 + WARN_ON(rc < 0 && rc % 4 != 0); 191 + rc = msi_bitmap_alloc_hwirqs(&bmp, 8); 192 + WARN_ON(rc < 0 && rc % 8 != 0); 193 + rc = msi_bitmap_alloc_hwirqs(&bmp, 9); 194 + WARN_ON(rc < 0 && rc % 16 != 0); 195 + rc = msi_bitmap_alloc_hwirqs(&bmp, 3); 196 + WARN_ON(rc < 0 && rc % 4 != 0); 197 + rc = msi_bitmap_alloc_hwirqs(&bmp, 7); 198 + WARN_ON(rc < 0 && rc % 8 != 0); 199 + rc = msi_bitmap_alloc_hwirqs(&bmp, 121); 200 + WARN_ON(rc < 0 && rc % 128 != 0); 196 201 197 202 msi_bitmap_free(&bmp); 198 203 199 - /* Clients may check bitmap == NULL for "not-allocated" */ 200 - check(bmp.bitmap == NULL); 204 + /* Clients may WARN_ON bitmap == NULL for "not-allocated" */ 205 + WARN_ON(bmp.bitmap != NULL); 201 206 202 207 kfree(bmp.bitmap); 203 208 } ··· 224 219 of_node_init(&of_node); 225 220 of_node.full_name = node_name; 226 221 227 - check(0 == msi_bitmap_alloc(&bmp, size, &of_node)); 222 + WARN_ON(msi_bitmap_alloc(&bmp, size, &of_node)); 228 223 229 224 /* No msi-available-ranges, so expect > 0 */ 230 - check(msi_bitmap_reserve_dt_hwirqs(&bmp) > 0); 225 + WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp) <= 0); 231 226 232 227 /* Should all still be free */ 233 - check(0 == bitmap_find_free_region(bmp.bitmap, size, 234 - get_count_order(size))); 228 + WARN_ON(bitmap_find_free_region(bmp.bitmap, size, get_count_order(size))); 235 229 bitmap_release_region(bmp.bitmap, 0, get_count_order(size)); 236 230 237 231 /* Now create a fake msi-available-ranges property */ ··· 244 240 of_node.properties = &prop; 245 241 246 242 /* msi-available-ranges, so expect == 0 */ 247 - check(msi_bitmap_reserve_dt_hwirqs(&bmp) == 0); 243 + WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp)); 248 244 249 245 /* Check we got the expected result */ 250 - check(0 == bitmap_parselist(expected_str, expected, size)); 251 - check(bitmap_equal(expected, bmp.bitmap, size)); 246 + WARN_ON(bitmap_parselist(expected_str, expected, size)); 247 + WARN_ON(!bitmap_equal(expected, bmp.bitmap, size)); 252 248 253 249 msi_bitmap_free(&bmp); 254 250 kfree(bmp.bitmap);