Merge tag 'powerpc-5.16-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull more powerpc fixes from Michael Ellerman:

- Fix a bug in copying of sigset_t for 32-bit systems, which caused X
to not start.

- Fix handling of shared LSIs (rare) with the xive interrupt controller
(Power9/10).

- Fix missing TOC setup in some KVM code, which could result in oopses
depending on kernel data layout.

- Fix DMA mapping when we have persistent memory and only one DMA
window available.

- Fix further problems with STRICT_KERNEL_RWX on 8xx, exposed by a
recent fix.

- A couple of other minor fixes.

Thanks to Alexey Kardashevskiy, Aneesh Kumar K.V, Cédric Le Goater,
Christian Zigotzky, Christophe Leroy, Daniel Axtens, Finn Thain, Greg
Kurz, Masahiro Yamada, Nicholas Piggin, and Uwe Kleine-König.

* tag 'powerpc-5.16-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
powerpc/xive: Change IRQ domain to a tree domain
powerpc/8xx: Fix pinned TLBs with CONFIG_STRICT_KERNEL_RWX
powerpc/signal32: Fix sigset_t copy
powerpc/book3e: Fix TLBCAM preset at boot
powerpc/pseries/ddw: Do not try direct mapping with persistent memory and one window
powerpc/pseries/ddw: simplify enable_ddw()
powerpc/pseries/ddw: Revert "Extend upper limit for huge DMA window for persistent memory"
powerpc/pseries: Fix numa FORM2 parsing fallback code
powerpc/pseries: rename numa_dist_table to form2_distances
powerpc: clean vdso32 and vdso64 directories
powerpc/83xx/mpc8349emitx: Drop unused variable
KVM: PPC: Book3S HV: Use GLOBAL_TOC for kvmppc_h_set_dabr/xdabr()

Changed files
+51 -58
arch
powerpc
+3
arch/powerpc/kernel/Makefile
··· 196 196 # Force dependency (incbin is bad) 197 197 $(obj)/vdso32_wrapper.o : $(obj)/vdso32/vdso32.so.dbg 198 198 $(obj)/vdso64_wrapper.o : $(obj)/vdso64/vdso64.so.dbg 199 + 200 + # for cleaning 201 + subdir- += vdso32 vdso64
+7 -6
arch/powerpc/kernel/head_8xx.S
··· 733 733 #ifdef CONFIG_PIN_TLB_DATA 734 734 LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET) 735 735 LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED) 736 + li r8, 0 736 737 #ifdef CONFIG_PIN_TLB_IMMR 737 738 li r0, 3 738 739 #else ··· 742 741 mtctr r0 743 742 cmpwi r4, 0 744 743 beq 4f 745 - LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT) 746 744 LOAD_REG_ADDR(r9, _sinittext) 747 745 748 746 2: ori r0, r6, MD_EVALID 747 + ori r12, r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT 749 748 mtspr SPRN_MD_CTR, r5 750 749 mtspr SPRN_MD_EPN, r0 751 750 mtspr SPRN_MD_TWC, r7 752 - mtspr SPRN_MD_RPN, r8 751 + mtspr SPRN_MD_RPN, r12 753 752 addi r5, r5, 0x100 754 753 addis r6, r6, SZ_8M@h 755 754 addis r8, r8, SZ_8M@h 756 755 cmplw r6, r9 757 756 bdnzt lt, 2b 758 - 759 - 4: LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT) 757 + 4: 760 758 2: ori r0, r6, MD_EVALID 759 + ori r12, r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT 761 760 mtspr SPRN_MD_CTR, r5 762 761 mtspr SPRN_MD_EPN, r0 763 762 mtspr SPRN_MD_TWC, r7 764 - mtspr SPRN_MD_RPN, r8 763 + mtspr SPRN_MD_RPN, r12 765 764 addi r5, r5, 0x100 766 765 addis r6, r6, SZ_8M@h 767 766 addis r8, r8, SZ_8M@h ··· 782 781 #endif 783 782 #if defined(CONFIG_PIN_TLB_IMMR) || defined(CONFIG_PIN_TLB_DATA) 784 783 lis r0, (MD_RSV4I | MD_TWAM)@h 785 - mtspr SPRN_MI_CTR, r0 784 + mtspr SPRN_MD_CTR, r0 786 785 #endif 787 786 mtspr SPRN_SRR1, r10 788 787 mtspr SPRN_SRR0, r11
+8 -2
arch/powerpc/kernel/signal.h
··· 25 25 26 26 return __get_user(dst->sig[0], (u64 __user *)&src->sig[0]); 27 27 } 28 - #define unsafe_get_user_sigset(dst, src, label) \ 29 - unsafe_get_user((dst)->sig[0], (u64 __user *)&(src)->sig[0], label) 28 + #define unsafe_get_user_sigset(dst, src, label) do { \ 29 + sigset_t *__dst = dst; \ 30 + const sigset_t __user *__src = src; \ 31 + int i; \ 32 + \ 33 + for (i = 0; i < _NSIG_WORDS; i++) \ 34 + unsafe_get_user(__dst->sig[i], &__src->sig[i], label); \ 35 + } while (0) 30 36 31 37 #ifdef CONFIG_VSX 32 38 extern unsigned long copy_vsx_to_user(void __user *to,
+2 -2
arch/powerpc/kvm/book3s_hv_rmhandlers.S
··· 2005 2005 .globl hcall_real_table_end 2006 2006 hcall_real_table_end: 2007 2007 2008 - _GLOBAL(kvmppc_h_set_xdabr) 2008 + _GLOBAL_TOC(kvmppc_h_set_xdabr) 2009 2009 EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr) 2010 2010 andi. r0, r5, DABRX_USER | DABRX_KERNEL 2011 2011 beq 6f ··· 2015 2015 6: li r3, H_PARAMETER 2016 2016 blr 2017 2017 2018 - _GLOBAL(kvmppc_h_set_dabr) 2018 + _GLOBAL_TOC(kvmppc_h_set_dabr) 2019 2019 EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr) 2020 2020 li r5, DABRX_USER | DABRX_KERNEL 2021 2021 3:
+1 -1
arch/powerpc/mm/nohash/kaslr_booke.c
··· 314 314 pr_warn("KASLR: No safe seed for randomizing the kernel base.\n"); 315 315 316 316 ram = min_t(phys_addr_t, __max_low_memory, size); 317 - ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, false); 317 + ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, true); 318 318 linear_sz = min_t(unsigned long, ram, SZ_512M); 319 319 320 320 /* If the linear size is smaller than 64M, do not randmize */
+2 -2
arch/powerpc/mm/nohash/tlb.c
··· 645 645 646 646 if (map) 647 647 linear_map_top = map_mem_in_cams(linear_map_top, 648 - num_cams, true, true); 648 + num_cams, false, true); 649 649 } 650 650 #endif 651 651 ··· 766 766 num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; 767 767 768 768 linear_sz = map_mem_in_cams(first_memblock_size, num_cams, 769 - false, true); 769 + true, true); 770 770 771 771 ppc64_rma_size = min_t(u64, linear_sz, 0x40000000); 772 772 } else
+19 -23
arch/powerpc/mm/numa.c
··· 376 376 { 377 377 int i, j; 378 378 struct device_node *root; 379 - const __u8 *numa_dist_table; 379 + const __u8 *form2_distances; 380 380 const __be32 *numa_lookup_index; 381 - int numa_dist_table_length; 381 + int form2_distances_length; 382 382 int max_numa_index, distance_index; 383 383 384 384 if (firmware_has_feature(FW_FEATURE_OPAL)) ··· 392 392 max_numa_index = of_read_number(&numa_lookup_index[0], 1); 393 393 394 394 /* first element of the array is the size and is encode-int */ 395 - numa_dist_table = of_get_property(root, "ibm,numa-distance-table", NULL); 396 - numa_dist_table_length = of_read_number((const __be32 *)&numa_dist_table[0], 1); 395 + form2_distances = of_get_property(root, "ibm,numa-distance-table", NULL); 396 + form2_distances_length = of_read_number((const __be32 *)&form2_distances[0], 1); 397 397 /* Skip the size which is encoded int */ 398 - numa_dist_table += sizeof(__be32); 398 + form2_distances += sizeof(__be32); 399 399 400 - pr_debug("numa_dist_table_len = %d, numa_dist_indexes_len = %d\n", 401 - numa_dist_table_length, max_numa_index); 400 + pr_debug("form2_distances_len = %d, numa_dist_indexes_len = %d\n", 401 + form2_distances_length, max_numa_index); 402 402 403 403 for (i = 0; i < max_numa_index; i++) 404 404 /* +1 skip the max_numa_index in the property */ 405 405 numa_id_index_table[i] = of_read_number(&numa_lookup_index[i + 1], 1); 406 406 407 407 408 - if (numa_dist_table_length != max_numa_index * max_numa_index) { 408 + if (form2_distances_length != max_numa_index * max_numa_index) { 409 409 WARN(1, "Wrong NUMA distance information\n"); 410 - /* consider everybody else just remote. */ 411 - for (i = 0; i < max_numa_index; i++) { 412 - for (j = 0; j < max_numa_index; j++) { 413 - int nodeA = numa_id_index_table[i]; 414 - int nodeB = numa_id_index_table[j]; 415 - 416 - if (nodeA == nodeB) 417 - numa_distance_table[nodeA][nodeB] = LOCAL_DISTANCE; 418 - else 419 - numa_distance_table[nodeA][nodeB] = REMOTE_DISTANCE; 420 - } 421 - } 410 + form2_distances = NULL; // don't use it 422 411 } 423 - 424 412 distance_index = 0; 425 413 for (i = 0; i < max_numa_index; i++) { 426 414 for (j = 0; j < max_numa_index; j++) { 427 415 int nodeA = numa_id_index_table[i]; 428 416 int nodeB = numa_id_index_table[j]; 417 + int dist; 429 418 430 - numa_distance_table[nodeA][nodeB] = numa_dist_table[distance_index++]; 431 - pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, numa_distance_table[nodeA][nodeB]); 419 + if (form2_distances) 420 + dist = form2_distances[distance_index++]; 421 + else if (nodeA == nodeB) 422 + dist = LOCAL_DISTANCE; 423 + else 424 + dist = REMOTE_DISTANCE; 425 + numa_distance_table[nodeA][nodeB] = dist; 426 + pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, dist); 432 427 } 433 428 } 429 + 434 430 of_node_put(root); 435 431 } 436 432
-1
arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
··· 186 186 static int mcu_remove(struct i2c_client *client) 187 187 { 188 188 struct mcu *mcu = i2c_get_clientdata(client); 189 - int ret; 190 189 191 190 kthread_stop(shutdown_thread); 192 191
+8 -18
arch/powerpc/platforms/pseries/iommu.c
··· 1094 1094 phys_addr_t max_addr = memory_hotplug_max(); 1095 1095 struct device_node *memory; 1096 1096 1097 - /* 1098 - * The "ibm,pmemory" can appear anywhere in the address space. 1099 - * Assuming it is still backed by page structs, set the upper limit 1100 - * for the huge DMA window as MAX_PHYSMEM_BITS. 1101 - */ 1102 - if (of_find_node_by_type(NULL, "ibm,pmemory")) 1103 - return (sizeof(phys_addr_t) * 8 <= MAX_PHYSMEM_BITS) ? 1104 - (phys_addr_t) -1 : (1ULL << MAX_PHYSMEM_BITS); 1105 - 1106 1097 for_each_node_by_type(memory, "memory") { 1107 1098 unsigned long start, size; 1108 1099 int n_mem_addr_cells, n_mem_size_cells, len; ··· 1229 1238 u32 ddw_avail[DDW_APPLICABLE_SIZE]; 1230 1239 struct dma_win *window; 1231 1240 struct property *win64; 1232 - bool ddw_enabled = false; 1233 1241 struct failed_ddw_pdn *fpdn; 1234 1242 bool default_win_removed = false, direct_mapping = false; 1235 1243 bool pmem_present; ··· 1243 1253 1244 1254 if (find_existing_ddw(pdn, &dev->dev.archdata.dma_offset, &len)) { 1245 1255 direct_mapping = (len >= max_ram_len); 1246 - ddw_enabled = true; 1247 1256 goto out_unlock; 1248 1257 } 1249 1258 ··· 1356 1367 len = order_base_2(query.largest_available_block << page_shift); 1357 1368 win_name = DMA64_PROPNAME; 1358 1369 } else { 1359 - direct_mapping = true; 1360 - win_name = DIRECT64_PROPNAME; 1370 + direct_mapping = !default_win_removed || 1371 + (len == MAX_PHYSMEM_BITS) || 1372 + (!pmem_present && (len == max_ram_len)); 1373 + win_name = direct_mapping ? DIRECT64_PROPNAME : DMA64_PROPNAME; 1361 1374 } 1362 1375 1363 1376 ret = create_ddw(dev, ddw_avail, &create, page_shift, len); ··· 1397 1406 dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n", 1398 1407 dn, ret); 1399 1408 1400 - /* Make sure to clean DDW if any TCE was set*/ 1401 - clean_dma_window(pdn, win64->value); 1409 + /* Make sure to clean DDW if any TCE was set*/ 1410 + clean_dma_window(pdn, win64->value); 1402 1411 goto out_del_list; 1403 1412 } 1404 1413 } else { ··· 1445 1454 spin_unlock(&dma_win_list_lock); 1446 1455 1447 1456 dev->dev.archdata.dma_offset = win_addr; 1448 - ddw_enabled = true; 1449 1457 goto out_unlock; 1450 1458 1451 1459 out_del_list: ··· 1480 1490 * as RAM, then we failed to create a window to cover persistent 1481 1491 * memory and need to set the DMA limit. 1482 1492 */ 1483 - if (pmem_present && ddw_enabled && direct_mapping && len == max_ram_len) 1493 + if (pmem_present && direct_mapping && len == max_ram_len) 1484 1494 dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset + (1ULL << len); 1485 1495 1486 - return ddw_enabled && direct_mapping; 1496 + return direct_mapping; 1487 1497 } 1488 1498 1489 1499 static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
-1
arch/powerpc/sysdev/xive/Kconfig
··· 3 3 bool 4 4 select PPC_SMP_MUXED_IPI 5 5 select HARDIRQS_SW_RESEND 6 - select IRQ_DOMAIN_NOMAP 7 6 8 7 config PPC_XIVE_NATIVE 9 8 bool
+1 -2
arch/powerpc/sysdev/xive/common.c
··· 1443 1443 1444 1444 static void __init xive_init_host(struct device_node *np) 1445 1445 { 1446 - xive_irq_domain = irq_domain_add_nomap(np, XIVE_MAX_IRQ, 1447 - &xive_irq_domain_ops, NULL); 1446 + xive_irq_domain = irq_domain_add_tree(np, &xive_irq_domain_ops, NULL); 1448 1447 if (WARN_ON(xive_irq_domain == NULL)) 1449 1448 return; 1450 1449 irq_set_default_host(xive_irq_domain);