Merge tag 'powerpc-4.7-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:

- tm: Always reclaim in start_thread() for exec() class syscalls from
Cyril Bur

- tm: Avoid SLB faults in treclaim/trecheckpoint when RI=0 from Michael
Neuling

- eeh: Fix wrong argument passed to eeh_rmv_device() from Gavin Shan

- Initialise pci_io_base as early as possible from Darren Stevens

* tag 'powerpc-4.7-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
powerpc: Initialise pci_io_base as early as possible
powerpc/tm: Avoid SLB faults in treclaim/trecheckpoint when RI=0
powerpc/eeh: Fix wrong argument passed to eeh_rmv_device()
powerpc/tm: Always reclaim in start_thread() for exec() class syscalls

Changed files
+65 -19
arch
powerpc
+1
arch/powerpc/include/asm/book3s/64/pgtable.h
··· 230 230 #define KERN_VIRT_SIZE __kernel_virt_size 231 231 extern struct page *vmemmap; 232 232 extern unsigned long ioremap_bot; 233 + extern unsigned long pci_io_base; 233 234 #endif /* __ASSEMBLY__ */ 234 235 235 236 #include <asm/book3s/64/hash.h>
+1 -1
arch/powerpc/kernel/eeh_driver.c
··· 647 647 pci_unlock_rescan_remove(); 648 648 } 649 649 } else if (frozen_bus) { 650 - eeh_pe_dev_traverse(pe, eeh_rmv_device, &rmv_data); 650 + eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data); 651 651 } 652 652 653 653 /*
-1
arch/powerpc/kernel/pci_64.c
··· 47 47 48 48 printk(KERN_INFO "PCI: Probing PCI hardware\n"); 49 49 50 - pci_io_base = ISA_IO_BASE; 51 50 /* For now, override phys_mem_access_prot. If we need it,g 52 51 * later, we may move that initialization to each ppc_md 53 52 */
+10
arch/powerpc/kernel/process.c
··· 1505 1505 current->thread.regs = regs - 1; 1506 1506 } 1507 1507 1508 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1509 + /* 1510 + * Clear any transactional state, we're exec()ing. The cause is 1511 + * not important as there will never be a recheckpoint so it's not 1512 + * user visible. 1513 + */ 1514 + if (MSR_TM_SUSPENDED(mfmsr())) 1515 + tm_reclaim_current(0); 1516 + #endif 1517 + 1508 1518 memset(regs->gpr, 0, sizeof(regs->gpr)); 1509 1519 regs->ctr = 0; 1510 1520 regs->link = 0;
+44 -17
arch/powerpc/kernel/tm.S
··· 110 110 std r3, STK_PARAM(R3)(r1) 111 111 SAVE_NVGPRS(r1) 112 112 113 - /* We need to setup MSR for VSX register save instructions. Here we 114 - * also clear the MSR RI since when we do the treclaim, we won't have a 115 - * valid kernel pointer for a while. We clear RI here as it avoids 116 - * adding another mtmsr closer to the treclaim. This makes the region 117 - * maked as non-recoverable wider than it needs to be but it saves on 118 - * inserting another mtmsrd later. 119 - */ 113 + /* We need to setup MSR for VSX register save instructions. */ 120 114 mfmsr r14 121 115 mr r15, r14 122 116 ori r15, r15, MSR_FP 123 - li r16, MSR_RI 117 + li r16, 0 124 118 ori r16, r16, MSR_EE /* IRQs hard off */ 125 119 andc r15, r15, r16 126 120 oris r15, r15, MSR_VEC@h ··· 170 176 1: tdeqi r6, 0 171 177 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0 172 178 173 - /* The moment we treclaim, ALL of our GPRs will switch 179 + /* Clear MSR RI since we are about to change r1, EE is already off. */ 180 + li r4, 0 181 + mtmsrd r4, 1 182 + 183 + /* 184 + * BE CAREFUL HERE: 185 + * At this point we can't take an SLB miss since we have MSR_RI 186 + * off. Load only to/from the stack/paca which are in SLB bolted regions 187 + * until we turn MSR RI back on. 188 + * 189 + * The moment we treclaim, ALL of our GPRs will switch 174 190 * to user register state. (FPRs, CCR etc. also!) 175 191 * Use an sprg and a tm_scratch in the PACA to shuffle. 176 192 */ ··· 201 197 202 198 /* Store the PPR in r11 and reset to decent value */ 203 199 std r11, GPR11(r1) /* Temporary stash */ 200 + 201 + /* Reset MSR RI so we can take SLB faults again */ 202 + li r11, MSR_RI 203 + mtmsrd r11, 1 204 + 204 205 mfspr r11, SPRN_PPR 205 206 HMT_MEDIUM 206 207 ··· 406 397 ld r5, THREAD_TM_DSCR(r3) 407 398 ld r6, THREAD_TM_PPR(r3) 408 399 409 - /* Clear the MSR RI since we are about to change R1. EE is already off 410 - */ 411 - li r4, 0 412 - mtmsrd r4, 1 413 - 414 400 REST_GPR(0, r7) /* GPR0 */ 415 401 REST_2GPRS(2, r7) /* GPR2-3 */ 416 402 REST_GPR(4, r7) /* GPR4 */ ··· 443 439 ld r6, _CCR(r7) 444 440 mtcr r6 445 441 446 - REST_GPR(1, r7) /* GPR1 */ 447 - REST_GPR(5, r7) /* GPR5-7 */ 448 442 REST_GPR(6, r7) 449 - ld r7, GPR7(r7) 443 + 444 + /* 445 + * Store r1 and r5 on the stack so that we can access them 446 + * after we clear MSR RI. 447 + */ 448 + 449 + REST_GPR(5, r7) 450 + std r5, -8(r1) 451 + ld r5, GPR1(r7) 452 + std r5, -16(r1) 453 + 454 + REST_GPR(7, r7) 455 + 456 + /* Clear MSR RI since we are about to change r1. EE is already off */ 457 + li r5, 0 458 + mtmsrd r5, 1 459 + 460 + /* 461 + * BE CAREFUL HERE: 462 + * At this point we can't take an SLB miss since we have MSR_RI 463 + * off. Load only to/from the stack/paca which are in SLB bolted regions 464 + * until we turn MSR RI back on. 465 + */ 466 + 467 + ld r5, -8(r1) 468 + ld r1, -16(r1) 450 469 451 470 /* Commit register state as checkpointed state: */ 452 471 TRECHKPT
+4
arch/powerpc/mm/hash_utils_64.c
··· 922 922 vmemmap = (struct page *)H_VMEMMAP_BASE; 923 923 ioremap_bot = IOREMAP_BASE; 924 924 925 + #ifdef CONFIG_PCI 926 + pci_io_base = ISA_IO_BASE; 927 + #endif 928 + 925 929 /* Initialize the MMU Hash table and create the linear mapping 926 930 * of memory. Has to be done before SLB initialization as this is 927 931 * currently where the page size encoding is obtained.
+5
arch/powerpc/mm/pgtable-radix.c
··· 328 328 __vmalloc_end = RADIX_VMALLOC_END; 329 329 vmemmap = (struct page *)RADIX_VMEMMAP_BASE; 330 330 ioremap_bot = IOREMAP_BASE; 331 + 332 + #ifdef CONFIG_PCI 333 + pci_io_base = ISA_IO_BASE; 334 + #endif 335 + 331 336 /* 332 337 * For now radix also use the same frag size 333 338 */