Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'fixes' into next

We ended up with an ugly conflict between fixes and next in ftrace.h
involving multiple nested ifdefs, and the automatic resolution is
wrong. So merge fixes into next so we can fix it up.

+79 -20
+8
Documentation/ABI/testing/sysfs-class-cxl
··· 246 246 Returns 1 if the psl timebase register is synchronized 247 247 with the core timebase register, 0 otherwise. 248 248 Users: https://github.com/ibm-capi/libcxl 249 + 250 + What: /sys/class/cxl/<card>/tunneled_ops_supported 251 + Date: May 2018 252 + Contact: linuxppc-dev@lists.ozlabs.org 253 + Description: read only 254 + Returns 1 if tunneled operations are supported in capi mode, 255 + 0 otherwise. 256 + Users: https://github.com/ibm-capi/libcxl
+24 -9
arch/powerpc/include/asm/ftrace.h
··· 65 65 #endif /* CONFIG_FUNCTION_TRACER */ 66 66 67 67 #ifndef __ASSEMBLY__ 68 - #if defined(CONFIG_FTRACE_SYSCALLS) && defined(PPC64_ELF_ABI_v1) 68 + #ifdef CONFIG_FTRACE_SYSCALLS 69 + /* 70 + * Some syscall entry functions on powerpc start with "ppc_" (fork and clone, 71 + * for instance) or ppc32_/ppc64_. We should also match the sys_ variant with 72 + * those. 73 + */ 69 74 #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME 75 + #ifdef PPC64_ELF_ABI_v1 70 76 static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) 71 77 { 72 - /* 73 - * Compare the symbol name with the system call name. Skip the .sys or .SyS 74 - * prefix from the symbol name and the sys prefix from the system call name and 75 - * just match the rest. This is only needed on ppc64 since symbol names on 76 - * 32bit do not start with a period so the generic function will work. 77 - */ 78 - return !strcmp(sym + 4, name + 3); 78 + /* We need to skip past the initial dot, and the __se_sys alias */ 79 + return !strcmp(sym + 1, name) || 80 + (!strncmp(sym, ".__se_sys", 9) && !strcmp(sym + 6, name)) || 81 + (!strncmp(sym, ".ppc_", 5) && !strcmp(sym + 5, name + 4)) || 82 + (!strncmp(sym, ".ppc32_", 7) && !strcmp(sym + 7, name + 4)) || 83 + (!strncmp(sym, ".ppc64_", 7) && !strcmp(sym + 7, name + 4)); 79 84 } 80 - #endif /* CONFIG_FTRACE_SYSCALLS && PPC64_ELF_ABI_v1 */ 85 + #else 86 + static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) 87 + { 88 + return !strcmp(sym, name) || 89 + (!strncmp(sym, "__se_sys", 8) && !strcmp(sym + 5, name)) || 90 + (!strncmp(sym, "ppc_", 4) && !strcmp(sym + 4, name + 4)) || 91 + (!strncmp(sym, "ppc32_", 6) && !strcmp(sym + 6, name + 4)) || 92 + (!strncmp(sym, "ppc64_", 6) && !strcmp(sym + 6, name + 4)); 93 + } 94 + #endif /* PPC64_ELF_ABI_v1 */ 95 + #endif /* CONFIG_FTRACE_SYSCALLS */ 81 96 82 97 #ifdef CONFIG_PPC64 83 98 #include <asm/paca.h>
-1
arch/powerpc/include/asm/paca.h
··· 165 165 u64 saved_msr; /* MSR saved here by enter_rtas */ 166 166 u16 trap_save; /* Used when bad stack is encountered */ 167 167 u8 irq_soft_mask; /* mask for irq soft masking */ 168 - u8 soft_enabled; /* irq soft-enable flag */ 169 168 u8 irq_happened; /* irq happened while soft-disabled */ 170 169 u8 io_sync; /* writel() needs spin_unlock sync */ 171 170 u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
+5 -8
arch/powerpc/include/asm/topology.h
··· 91 91 extern int stop_topology_update(void); 92 92 extern int prrn_is_enabled(void); 93 93 extern int find_and_online_cpu_nid(int cpu); 94 + extern int timed_topology_update(int nsecs); 94 95 #else 95 96 static inline int start_topology_update(void) 96 97 { ··· 109 108 { 110 109 return 0; 111 110 } 111 + static inline int timed_topology_update(int nsecs) 112 + { 113 + return 0; 114 + } 112 115 #endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */ 113 - 114 - #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_NEED_MULTIPLE_NODES) 115 - #if defined(CONFIG_PPC_SPLPAR) 116 - extern int timed_topology_update(int nsecs); 117 - #else 118 - #define timed_topology_update(nsecs) 119 - #endif /* CONFIG_PPC_SPLPAR */ 120 - #endif /* CONFIG_HOTPLUG_CPU || CONFIG_NEED_MULTIPLE_NODES */ 121 116 122 117 #include <asm-generic/topology.h> 123 118
+6
arch/powerpc/kernel/cpu_setup_power.S
··· 28 28 beqlr 29 29 li r0,0 30 30 mtspr SPRN_LPID,r0 31 + mtspr SPRN_PCR,r0 31 32 mfspr r3,SPRN_LPCR 32 33 li r4,(LPCR_LPES1 >> LPCR_LPES_SH) 33 34 bl __init_LPCR_ISA206 ··· 42 41 beqlr 43 42 li r0,0 44 43 mtspr SPRN_LPID,r0 44 + mtspr SPRN_PCR,r0 45 45 mfspr r3,SPRN_LPCR 46 46 li r4,(LPCR_LPES1 >> LPCR_LPES_SH) 47 47 bl __init_LPCR_ISA206 ··· 59 57 beqlr 60 58 li r0,0 61 59 mtspr SPRN_LPID,r0 60 + mtspr SPRN_PCR,r0 62 61 mfspr r3,SPRN_LPCR 63 62 ori r3, r3, LPCR_PECEDH 64 63 li r4,0 /* LPES = 0 */ ··· 81 78 beqlr 82 79 li r0,0 83 80 mtspr SPRN_LPID,r0 81 + mtspr SPRN_PCR,r0 84 82 mfspr r3,SPRN_LPCR 85 83 ori r3, r3, LPCR_PECEDH 86 84 li r4,0 /* LPES = 0 */ ··· 103 99 mtspr SPRN_PSSCR,r0 104 100 mtspr SPRN_LPID,r0 105 101 mtspr SPRN_PID,r0 102 + mtspr SPRN_PCR,r0 106 103 mfspr r3,SPRN_LPCR 107 104 LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) 108 105 or r3, r3, r4 ··· 128 123 mtspr SPRN_PSSCR,r0 129 124 mtspr SPRN_LPID,r0 130 125 mtspr SPRN_PID,r0 126 + mtspr SPRN_PCR,r0 131 127 mfspr r3,SPRN_LPCR 132 128 LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) 133 129 or r3, r3, r4
+1
arch/powerpc/kernel/dt_cpu_ftrs.c
··· 101 101 if (hv_mode) { 102 102 mtspr(SPRN_LPID, 0); 103 103 mtspr(SPRN_HFSCR, system_registers.hfscr); 104 + mtspr(SPRN_PCR, 0); 104 105 } 105 106 mtspr(SPRN_FSCR, system_registers.fscr); 106 107
+12 -2
arch/powerpc/platforms/powernv/opal-nvram.c
··· 44 44 return count; 45 45 } 46 46 47 + /* 48 + * This can be called in the panic path with interrupts off, so use 49 + * mdelay in that case. 50 + */ 47 51 static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index) 48 52 { 49 53 s64 rc = OPAL_BUSY; ··· 62 58 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { 63 59 rc = opal_write_nvram(__pa(buf), count, off); 64 60 if (rc == OPAL_BUSY_EVENT) { 65 - msleep(OPAL_BUSY_DELAY_MS); 61 + if (in_interrupt() || irqs_disabled()) 62 + mdelay(OPAL_BUSY_DELAY_MS); 63 + else 64 + msleep(OPAL_BUSY_DELAY_MS); 66 65 opal_poll_events(NULL); 67 66 } else if (rc == OPAL_BUSY) { 68 - msleep(OPAL_BUSY_DELAY_MS); 67 + if (in_interrupt() || irqs_disabled()) 68 + mdelay(OPAL_BUSY_DELAY_MS); 69 + else 70 + msleep(OPAL_BUSY_DELAY_MS); 69 71 } 70 72 } 71 73
+1
drivers/misc/cxl/cxl.h
··· 717 717 bool perst_select_user; 718 718 bool perst_same_image; 719 719 bool psl_timebase_synced; 720 + bool tunneled_ops_supported; 720 721 721 722 /* 722 723 * number of contexts mapped on to this card. Possible values are:
+12
drivers/misc/cxl/pci.c
··· 1742 1742 /* Required for devices using CAPP DMA mode, harmless for others */ 1743 1743 pci_set_master(dev); 1744 1744 1745 + adapter->tunneled_ops_supported = false; 1746 + 1747 + if (cxl_is_power9()) { 1748 + if (pnv_pci_set_tunnel_bar(dev, 0x00020000E0000000ull, 1)) 1749 + dev_info(&dev->dev, "Tunneled operations unsupported\n"); 1750 + else 1751 + adapter->tunneled_ops_supported = true; 1752 + } 1753 + 1745 1754 if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode))) 1746 1755 goto err; 1747 1756 ··· 1776 1767 static void cxl_deconfigure_adapter(struct cxl *adapter) 1777 1768 { 1778 1769 struct pci_dev *pdev = to_pci_dev(adapter->dev.parent); 1770 + 1771 + if (cxl_is_power9()) 1772 + pnv_pci_set_tunnel_bar(pdev, 0x00020000E0000000ull, 0); 1779 1773 1780 1774 cxl_native_release_psl_err_irq(adapter); 1781 1775 cxl_unmap_adapter_regs(adapter);
+10
drivers/misc/cxl/sysfs.c
··· 78 78 return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced); 79 79 } 80 80 81 + static ssize_t tunneled_ops_supported_show(struct device *device, 82 + struct device_attribute *attr, 83 + char *buf) 84 + { 85 + struct cxl *adapter = to_cxl_adapter(device); 86 + 87 + return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->tunneled_ops_supported); 88 + } 89 + 81 90 static ssize_t reset_adapter_store(struct device *device, 82 91 struct device_attribute *attr, 83 92 const char *buf, size_t count) ··· 192 183 __ATTR_RO(base_image), 193 184 __ATTR_RO(image_loaded), 194 185 __ATTR_RO(psl_timebase_synced), 186 + __ATTR_RO(tunneled_ops_supported), 195 187 __ATTR_RW(load_image_on_perst), 196 188 __ATTR_RW(perst_reloads_same_image), 197 189 __ATTR(reset, S_IWUSR, NULL, reset_adapter_store),