Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'powerpc-4.14-6' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:
"Some more powerpc fixes for 4.14.

This is bigger than I like to send at rc7, but that's at least partly
because I didn't send any fixes last week. If it wasn't for the IMC
driver, which is new and getting heavy testing, the diffstat would
look a bit better. I've also added ftrace on big endian to my test
suite, so we shouldn't break that again in future.

- A fix to the handling of misaligned paste instructions (P9 only),
where a change to a #define has caused the check for the
instruction to always fail.

- The preempt handling was unbalanced in the radix THP flush (P9
only). Though we don't generally use preempt we want to keep it
working as much as possible.

- Two fixes for IMC (P9 only), one when booting with restricted
number of CPUs and one in the error handling when initialisation
fails due to firmware etc.

- A revert to fix function_graph on big endian machines, and then a
rework of the reverted patch to fix kprobes blacklist handling on
big endian machines.

Thanks to: Anju T Sudhakar, Guilherme G. Piccoli, Madhavan Srinivasan,
Naveen N. Rao, Nicholas Piggin, Paul Mackerras"

* tag 'powerpc-4.14-6' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
powerpc/perf: Fix core-imc hotplug callback failure during imc initialization
powerpc/kprobes: Dereference function pointers only if the address does not belong to kernel text
Revert "powerpc64/elfv1: Only dereference function descriptor for non-text symbols"
powerpc/64s/radix: Fix preempt imbalance in TLB flush
powerpc: Fix check for copy/paste instructions in alignment handler
powerpc/perf: Fix IMC allocation routine

+26 -13
+1 -9
arch/powerpc/include/asm/code-patching.h
··· 83 83 * On PPC64 ABIv1 the function pointer actually points to the 84 84 * function's descriptor. The first entry in the descriptor is the 85 85 * address of the function text. 86 - * 87 - * However, we may also receive pointer to an assembly symbol. To 88 - * detect that, we first check if the function pointer we receive 89 - * already points to kernel/module text and we only dereference it 90 - * if it doesn't. 91 86 */ 92 - if (kernel_text_address((unsigned long)func)) 93 - return (unsigned long)func; 94 - else 95 - return ((func_descr_t *)func)->entry; 87 + return ((func_descr_t *)func)->entry; 96 88 #else 97 89 return (unsigned long)func; 98 90 #endif
+1 -1
arch/powerpc/kernel/align.c
··· 332 332 * when pasting to a co-processor. Furthermore, paste_last is the 333 333 * synchronisation point for preceding copy/paste sequences. 334 334 */ 335 - if ((instr & 0xfc0006fe) == PPC_INST_COPY) 335 + if ((instr & 0xfc0006fe) == (PPC_INST_COPY & 0xfc0006fe)) 336 336 return -EIO; 337 337 338 338 r = analyse_instr(&op, regs, instr);
+6 -1
arch/powerpc/kernel/kprobes.c
··· 600 600 601 601 unsigned long arch_deref_entry_point(void *entry) 602 602 { 603 - return ppc_global_function_entry(entry); 603 + #ifdef PPC64_ELF_ABI_v1 604 + if (!kernel_text_address((unsigned long)entry)) 605 + return ppc_global_function_entry(entry); 606 + else 607 + #endif 608 + return (unsigned long)entry; 604 609 } 605 610 NOKPROBE_SYMBOL(arch_deref_entry_point); 606 611
+2
arch/powerpc/mm/tlb-radix.c
··· 360 360 361 361 362 362 pid = mm ? mm->context.id : 0; 363 + preempt_disable(); 363 364 if (unlikely(pid == MMU_NO_CONTEXT)) 364 365 goto no_context; 365 366 366 367 /* 4k page size, just blow the world */ 367 368 if (PAGE_SIZE == 0x1000) { 368 369 radix__flush_all_mm(mm); 370 + preempt_enable(); 369 371 return; 370 372 } 371 373
+16 -2
arch/powerpc/perf/imc-pmu.c
··· 607 607 if (!cpumask_test_and_clear_cpu(cpu, &core_imc_cpumask)) 608 608 return 0; 609 609 610 + /* 611 + * Check whether core_imc is registered. We could end up here 612 + * if the cpuhotplug callback registration fails. i.e, callback 613 + * invokes the offline path for all sucessfully registered cpus. 614 + * At this stage, core_imc pmu will not be registered and we 615 + * should return here. 616 + * 617 + * We return with a zero since this is not an offline failure. 618 + * And cpuhp_setup_state() returns the actual failure reason 619 + * to the caller, which inturn will call the cleanup routine. 620 + */ 621 + if (!core_imc_pmu->pmu.event_init) 622 + return 0; 623 + 610 624 /* Find any online cpu in that core except the current "cpu" */ 611 625 ncpu = cpumask_any_but(cpu_sibling_mask(cpu), cpu); 612 626 ··· 1118 1104 1119 1105 static void cleanup_all_core_imc_memory(void) 1120 1106 { 1121 - int i, nr_cores = num_present_cpus() / threads_per_core; 1107 + int i, nr_cores = DIV_ROUND_UP(num_present_cpus(), threads_per_core); 1122 1108 struct imc_mem_info *ptr = core_imc_pmu->mem_info; 1123 1109 int size = core_imc_pmu->counter_mem_size; 1124 1110 ··· 1226 1212 if (!pmu_ptr->pmu.name) 1227 1213 return -ENOMEM; 1228 1214 1229 - nr_cores = num_present_cpus() / threads_per_core; 1215 + nr_cores = DIV_ROUND_UP(num_present_cpus(), threads_per_core); 1230 1216 pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info), 1231 1217 GFP_KERNEL); 1232 1218