Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'fixes' into next

Merge our fixes branch from the 4.16 cycle.

There were a number of important fixes merged, in particular some Power9
workarounds that we want in next for testing purposes. There's also been
some conflicting changes in the CPU features code which are best merged
and tested before going upstream.

+223 -94
+5
Documentation/accelerators/ocxl.rst
··· 152 152 Associate an event fd to an AFU interrupt so that the user process 153 153 can be notified when the AFU sends an interrupt. 154 154 155 + OCXL_IOCTL_GET_METADATA: 156 + 157 + Obtains configuration information from the card, such at the size of 158 + MMIO areas, the AFU version, and the PASID for the current context. 159 + 155 160 156 161 mmap 157 162 ----
+2 -1
arch/powerpc/boot/Makefile
··· 101 101 libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c 102 102 libfdtheader := fdt.h libfdt.h libfdt_internal.h 103 103 104 - $(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o): \ 104 + $(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o \ 105 + treeboot-akebono.o treeboot-currituck.o treeboot-iss4xx.o): \ 105 106 $(addprefix $(obj)/,$(libfdtheader)) 106 107 107 108 src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \
+3
arch/powerpc/include/asm/book3s/64/mmu.h
··· 97 97 /* Number of bits in the mm_cpumask */ 98 98 atomic_t active_cpus; 99 99 100 + /* Number of users of the external (Nest) MMU */ 101 + atomic_t copros; 102 + 100 103 /* NPU NMMU context */ 101 104 struct npu_context *npu_context; 102 105
-3
arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
··· 47 47 #endif 48 48 extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr); 49 49 extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr); 50 - extern void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa, 51 - unsigned long page_size); 52 - extern void radix__flush_tlb_lpid(unsigned long lpid); 53 50 extern void radix__flush_tlb_all(void); 54 51 extern void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm, 55 52 unsigned long address);
+3 -1
arch/powerpc/include/asm/cputable.h
··· 215 215 #define CPU_FTR_POWER9_DD2_1 LONG_ASM_CONST(0x0000080000000000) 216 216 #define CPU_FTR_P9_TM_HV_ASSIST LONG_ASM_CONST(0x0000100000000000) 217 217 #define CPU_FTR_P9_TM_XER_SO_BUG LONG_ASM_CONST(0x0000200000000000) 218 + #define CPU_FTR_P9_TLBIE_BUG LONG_ASM_CONST(0x0000400000000000) 218 219 219 220 #ifndef __ASSEMBLY__ 220 221 ··· 466 465 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ 467 466 CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ 468 467 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \ 469 - CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY) 468 + CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \ 469 + CPU_FTR_P9_TLBIE_BUG) 470 470 #define CPU_FTRS_POWER9_DD1 ((CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD1) & \ 471 471 (~CPU_FTR_SAO)) 472 472 #define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9
+13 -5
arch/powerpc/include/asm/mmu_context.h
··· 92 92 static inline void mm_context_add_copro(struct mm_struct *mm) 93 93 { 94 94 /* 95 - * On hash, should only be called once over the lifetime of 96 - * the context, as we can't decrement the active cpus count 97 - * and flush properly for the time being. 95 + * If any copro is in use, increment the active CPU count 96 + * in order to force TLB invalidations to be global as to 97 + * propagate to the Nest MMU. 98 98 */ 99 - inc_mm_active_cpus(mm); 99 + if (atomic_inc_return(&mm->context.copros) == 1) 100 + inc_mm_active_cpus(mm); 100 101 } 101 102 102 103 static inline void mm_context_remove_copro(struct mm_struct *mm) 103 104 { 105 + int c; 106 + 107 + c = atomic_dec_if_positive(&mm->context.copros); 108 + 109 + /* Detect imbalance between add and remove */ 110 + WARN_ON(c < 0); 111 + 104 112 /* 105 113 * Need to broadcast a global flush of the full mm before 106 114 * decrementing active_cpus count, as the next TLBI may be ··· 119 111 * for the time being. Invalidations will remain global if 120 112 * used on hash. 121 113 */ 122 - if (radix_enabled()) { 114 + if (c == 0 && radix_enabled()) { 123 115 flush_all_mm(mm); 124 116 dec_mm_active_cpus(mm); 125 117 }
+6 -1
arch/powerpc/kernel/dt_cpu_ftrs.c
··· 714 714 cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST | 715 715 CPU_FTR_P9_TM_XER_SO_BUG; 716 716 717 - if ((version & 0xffff0000) == 0x004e0000) 717 + if ((version & 0xffff0000) == 0x004e0000) { 718 718 cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR); 719 + cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG; 720 + } 719 721 } 720 722 721 723 static void __init cpufeatures_setup_finished(void) ··· 728 726 pr_err("hypervisor not present in device tree but HV mode is enabled in the CPU. Enabling.\n"); 729 727 cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE; 730 728 } 729 + 730 + /* Make sure powerpc_base_platform is non-NULL */ 731 + powerpc_base_platform = cur_cpu_spec->platform; 731 732 732 733 system_registers.lpcr = mfspr(SPRN_LPCR); 733 734 system_registers.hfscr = mfspr(SPRN_HFSCR);
+1 -1
arch/powerpc/kernel/exceptions-64s.S
··· 706 706 ld r3, PACA_EXSLB+EX_DAR(r13) 707 707 std r3, _DAR(r1) 708 708 beq cr6, 2f 709 - li r10, 0x480 /* fix trap number for I-SLB miss */ 709 + li r10, 0x481 /* fix trap number for I-SLB miss */ 710 710 std r10, _TRAP(r1) 711 711 2: bl save_nvgprs 712 712 addi r3, r1, STACK_FRAME_OVERHEAD
+8
arch/powerpc/kernel/irq.c
··· 476 476 */ 477 477 WARN_ON(!arch_irqs_disabled()); 478 478 479 + /* 480 + * Interrupts must always be hard disabled before irq_happened is 481 + * modified (to prevent lost update in case of interrupt between 482 + * load and store). 483 + */ 484 + __hard_irq_disable(); 485 + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 486 + 479 487 /* Indicate in the PACA that we have an interrupt to replay */ 480 488 local_paca->irq_happened |= PACA_IRQ_EE; 481 489 }
-1
arch/powerpc/kernel/prom_init.c
··· 874 874 .mmu = 0, 875 875 .hash_ext = 0, 876 876 .radix_ext = 0, 877 - .byte22 = 0, 878 877 }, 879 878 880 879 /* option vector 6: IBM PAPR hints */
+3
arch/powerpc/kvm/book3s_64_mmu_radix.c
··· 157 157 asm volatile("ptesync": : :"memory"); 158 158 asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1) 159 159 : : "r" (addr), "r" (kvm->arch.lpid) : "memory"); 160 + if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) 161 + asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1) 162 + : : "r" (addr), "r" (kvm->arch.lpid) : "memory"); 160 163 asm volatile("ptesync": : :"memory"); 161 164 } 162 165
+11
arch/powerpc/kvm/book3s_hv_rm_mmu.c
··· 473 473 trace_tlbie(kvm->arch.lpid, 0, rbvalues[i], 474 474 kvm->arch.lpid, 0, 0, 0); 475 475 } 476 + 477 + if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) { 478 + /* 479 + * Need the extra ptesync to make sure we don't 480 + * re-order the tlbie 481 + */ 482 + asm volatile("ptesync": : :"memory"); 483 + asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : : 484 + "r" (rbvalues[0]), "r" (kvm->arch.lpid)); 485 + } 486 + 476 487 asm volatile("eieio; tlbsync; ptesync" : : : "memory"); 477 488 kvm->arch.tlbie_lock = 0; 478 489 } else {
+15 -1
arch/powerpc/mm/hash_native_64.c
··· 201 201 return va; 202 202 } 203 203 204 + static inline void fixup_tlbie(unsigned long vpn, int psize, int apsize, int ssize) 205 + { 206 + if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) { 207 + /* Need the extra ptesync to ensure we don't reorder tlbie*/ 208 + asm volatile("ptesync": : :"memory"); 209 + ___tlbie(vpn, psize, apsize, ssize); 210 + } 211 + } 212 + 204 213 static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) 205 214 { 206 215 unsigned long rb; ··· 287 278 asm volatile("ptesync": : :"memory"); 288 279 } else { 289 280 __tlbie(vpn, psize, apsize, ssize); 281 + fixup_tlbie(vpn, psize, apsize, ssize); 290 282 asm volatile("eieio; tlbsync; ptesync": : :"memory"); 291 283 } 292 284 if (lock_tlbie && !use_local) ··· 781 771 */ 782 772 static void native_flush_hash_range(unsigned long number, int local) 783 773 { 784 - unsigned long vpn; 774 + unsigned long vpn = 0; 785 775 unsigned long hash, index, hidx, shift, slot; 786 776 struct hash_pte *hptep; 787 777 unsigned long hpte_v; ··· 853 843 __tlbie(vpn, psize, psize, ssize); 854 844 } pte_iterate_hashed_end(); 855 845 } 846 + /* 847 + * Just do one more with the last used values. 848 + */ 849 + fixup_tlbie(vpn, psize, psize, ssize); 856 850 asm volatile("eieio; tlbsync; ptesync":::"memory"); 857 851 858 852 if (lock_tlbie)
+1
arch/powerpc/mm/mmu_context_book3s64.c
··· 166 166 mm_iommu_init(mm); 167 167 #endif 168 168 atomic_set(&mm->context.active_cpus, 0); 169 + atomic_set(&mm->context.copros, 0); 169 170 170 171 return 0; 171 172 }
+1
arch/powerpc/mm/pgtable_64.c
··· 481 481 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); 482 482 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0); 483 483 } 484 + /* do we need fixup here ?*/ 484 485 asm volatile("eieio; tlbsync; ptesync" : : : "memory"); 485 486 } 486 487 EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
+90 -79
arch/powerpc/mm/tlb-radix.c
··· 119 119 trace_tlbie(0, 0, rb, rs, ric, prs, r); 120 120 } 121 121 122 + static inline void __tlbiel_va(unsigned long va, unsigned long pid, 123 + unsigned long ap, unsigned long ric) 124 + { 125 + unsigned long rb,rs,prs,r; 126 + 127 + rb = va & ~(PPC_BITMASK(52, 63)); 128 + rb |= ap << PPC_BITLSHIFT(58); 129 + rs = pid << PPC_BITLSHIFT(31); 130 + prs = 1; /* process scoped */ 131 + r = 1; /* radix format */ 132 + 133 + asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) 134 + : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); 135 + trace_tlbie(0, 1, rb, rs, ric, prs, r); 136 + } 137 + 138 + static inline void __tlbie_va(unsigned long va, unsigned long pid, 139 + unsigned long ap, unsigned long ric) 140 + { 141 + unsigned long rb,rs,prs,r; 142 + 143 + rb = va & ~(PPC_BITMASK(52, 63)); 144 + rb |= ap << PPC_BITLSHIFT(58); 145 + rs = pid << PPC_BITLSHIFT(31); 146 + prs = 1; /* process scoped */ 147 + r = 1; /* radix format */ 148 + 149 + asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) 150 + : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); 151 + trace_tlbie(0, 0, rb, rs, ric, prs, r); 152 + } 153 + 154 + static inline void fixup_tlbie(void) 155 + { 156 + unsigned long pid = 0; 157 + unsigned long va = ((1UL << 52) - 1); 158 + 159 + if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) { 160 + asm volatile("ptesync": : :"memory"); 161 + __tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB); 162 + } 163 + } 164 + 122 165 /* 123 166 * We use 128 set in radix mode and 256 set in hpt mode. 124 167 */ ··· 194 151 static inline void _tlbie_pid(unsigned long pid, unsigned long ric) 195 152 { 196 153 asm volatile("ptesync": : :"memory"); 197 - __tlbie_pid(pid, ric); 154 + 155 + /* 156 + * Workaround the fact that the "ric" argument to __tlbie_pid 157 + * must be a compile-time contraint to match the "i" constraint 158 + * in the asm statement. 159 + */ 160 + switch (ric) { 161 + case RIC_FLUSH_TLB: 162 + __tlbie_pid(pid, RIC_FLUSH_TLB); 163 + break; 164 + case RIC_FLUSH_PWC: 165 + __tlbie_pid(pid, RIC_FLUSH_PWC); 166 + break; 167 + case RIC_FLUSH_ALL: 168 + default: 169 + __tlbie_pid(pid, RIC_FLUSH_ALL); 170 + } 171 + fixup_tlbie(); 198 172 asm volatile("eieio; tlbsync; ptesync": : :"memory"); 199 - } 200 - 201 - static inline void __tlbiel_va(unsigned long va, unsigned long pid, 202 - unsigned long ap, unsigned long ric) 203 - { 204 - unsigned long rb,rs,prs,r; 205 - 206 - rb = va & ~(PPC_BITMASK(52, 63)); 207 - rb |= ap << PPC_BITLSHIFT(58); 208 - rs = pid << PPC_BITLSHIFT(31); 209 - prs = 1; /* process scoped */ 210 - r = 1; /* radix format */ 211 - 212 - asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) 213 - : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); 214 - trace_tlbie(0, 1, rb, rs, ric, prs, r); 215 173 } 216 174 217 175 static inline void __tlbiel_va_range(unsigned long start, unsigned long end, ··· 247 203 asm volatile("ptesync": : :"memory"); 248 204 } 249 205 250 - static inline void __tlbie_va(unsigned long va, unsigned long pid, 251 - unsigned long ap, unsigned long ric) 252 - { 253 - unsigned long rb,rs,prs,r; 254 - 255 - rb = va & ~(PPC_BITMASK(52, 63)); 256 - rb |= ap << PPC_BITLSHIFT(58); 257 - rs = pid << PPC_BITLSHIFT(31); 258 - prs = 1; /* process scoped */ 259 - r = 1; /* radix format */ 260 - 261 - asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) 262 - : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); 263 - trace_tlbie(0, 0, rb, rs, ric, prs, r); 264 - } 265 - 266 206 static inline void __tlbie_va_range(unsigned long start, unsigned long end, 267 207 unsigned long pid, unsigned long page_size, 268 208 unsigned long psize) ··· 265 237 266 238 asm volatile("ptesync": : :"memory"); 267 239 __tlbie_va(va, pid, ap, ric); 240 + fixup_tlbie(); 268 241 asm volatile("eieio; tlbsync; ptesync": : :"memory"); 269 242 } 270 243 ··· 277 248 if (also_pwc) 278 249 __tlbie_pid(pid, RIC_FLUSH_PWC); 279 250 __tlbie_va_range(start, end, pid, page_size, psize); 251 + fixup_tlbie(); 280 252 asm volatile("eieio; tlbsync; ptesync": : :"memory"); 281 253 } 282 254 ··· 341 311 } 342 312 EXPORT_SYMBOL(radix__local_flush_tlb_page); 343 313 314 + static bool mm_needs_flush_escalation(struct mm_struct *mm) 315 + { 316 + /* 317 + * P9 nest MMU has issues with the page walk cache 318 + * caching PTEs and not flushing them properly when 319 + * RIC = 0 for a PID/LPID invalidate 320 + */ 321 + return atomic_read(&mm->context.copros) != 0; 322 + } 323 + 344 324 #ifdef CONFIG_SMP 345 325 void radix__flush_tlb_mm(struct mm_struct *mm) 346 326 { ··· 361 321 return; 362 322 363 323 preempt_disable(); 364 - if (!mm_is_thread_local(mm)) 365 - _tlbie_pid(pid, RIC_FLUSH_TLB); 366 - else 324 + if (!mm_is_thread_local(mm)) { 325 + if (mm_needs_flush_escalation(mm)) 326 + _tlbie_pid(pid, RIC_FLUSH_ALL); 327 + else 328 + _tlbie_pid(pid, RIC_FLUSH_TLB); 329 + } else 367 330 _tlbiel_pid(pid, RIC_FLUSH_TLB); 368 331 preempt_enable(); 369 332 } ··· 478 435 } 479 436 480 437 if (full) { 481 - if (local) 438 + if (local) { 482 439 _tlbiel_pid(pid, RIC_FLUSH_TLB); 483 - else 484 - _tlbie_pid(pid, RIC_FLUSH_TLB); 440 + } else { 441 + if (mm_needs_flush_escalation(mm)) 442 + _tlbie_pid(pid, RIC_FLUSH_ALL); 443 + else 444 + _tlbie_pid(pid, RIC_FLUSH_TLB); 445 + } 485 446 } else { 486 447 bool hflush = false; 487 448 unsigned long hstart, hend; ··· 512 465 if (hflush) 513 466 __tlbie_va_range(hstart, hend, pid, 514 467 HPAGE_PMD_SIZE, MMU_PAGE_2M); 468 + fixup_tlbie(); 515 469 asm volatile("eieio; tlbsync; ptesync": : :"memory"); 516 470 } 517 471 } ··· 596 548 } 597 549 598 550 if (full) { 551 + if (!local && mm_needs_flush_escalation(mm)) 552 + also_pwc = true; 553 + 599 554 if (local) 600 555 _tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB); 601 556 else ··· 653 602 preempt_enable(); 654 603 } 655 604 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 656 - 657 - void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa, 658 - unsigned long page_size) 659 - { 660 - unsigned long rb,rs,prs,r; 661 - unsigned long ap; 662 - unsigned long ric = RIC_FLUSH_TLB; 663 - 664 - ap = mmu_get_ap(radix_get_mmu_psize(page_size)); 665 - rb = gpa & ~(PPC_BITMASK(52, 63)); 666 - rb |= ap << PPC_BITLSHIFT(58); 667 - rs = lpid & ((1UL << 32) - 1); 668 - prs = 0; /* process scoped */ 669 - r = 1; /* radix format */ 670 - 671 - asm volatile("ptesync": : :"memory"); 672 - asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) 673 - : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); 674 - asm volatile("eieio; tlbsync; ptesync": : :"memory"); 675 - trace_tlbie(lpid, 0, rb, rs, ric, prs, r); 676 - } 677 - EXPORT_SYMBOL(radix__flush_tlb_lpid_va); 678 - 679 - void radix__flush_tlb_lpid(unsigned long lpid) 680 - { 681 - unsigned long rb,rs,prs,r; 682 - unsigned long ric = RIC_FLUSH_ALL; 683 - 684 - rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */ 685 - rs = lpid & ((1UL << 32) - 1); 686 - prs = 0; /* partition scoped */ 687 - r = 1; /* radix format */ 688 - 689 - asm volatile("ptesync": : :"memory"); 690 - asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) 691 - : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); 692 - asm volatile("eieio; tlbsync; ptesync": : :"memory"); 693 - trace_tlbie(lpid, 0, rb, rs, ric, prs, r); 694 - } 695 - EXPORT_SYMBOL(radix__flush_tlb_lpid); 696 605 697 606 void radix__flush_pmd_tlb_range(struct vm_area_struct *vma, 698 607 unsigned long start, unsigned long end)
+27
drivers/misc/ocxl/file.c
··· 102 102 return rc; 103 103 } 104 104 105 + static long afu_ioctl_get_metadata(struct ocxl_context *ctx, 106 + struct ocxl_ioctl_metadata __user *uarg) 107 + { 108 + struct ocxl_ioctl_metadata arg; 109 + 110 + memset(&arg, 0, sizeof(arg)); 111 + 112 + arg.version = 0; 113 + 114 + arg.afu_version_major = ctx->afu->config.version_major; 115 + arg.afu_version_minor = ctx->afu->config.version_minor; 116 + arg.pasid = ctx->pasid; 117 + arg.pp_mmio_size = ctx->afu->config.pp_mmio_stride; 118 + arg.global_mmio_size = ctx->afu->config.global_mmio_size; 119 + 120 + if (copy_to_user(uarg, &arg, sizeof(arg))) 121 + return -EFAULT; 122 + 123 + return 0; 124 + } 125 + 105 126 #define CMD_STR(x) (x == OCXL_IOCTL_ATTACH ? "ATTACH" : \ 106 127 x == OCXL_IOCTL_IRQ_ALLOC ? "IRQ_ALLOC" : \ 107 128 x == OCXL_IOCTL_IRQ_FREE ? "IRQ_FREE" : \ 108 129 x == OCXL_IOCTL_IRQ_SET_FD ? "IRQ_SET_FD" : \ 130 + x == OCXL_IOCTL_GET_METADATA ? "GET_METADATA" : \ 109 131 "UNKNOWN") 110 132 111 133 static long afu_ioctl(struct file *file, unsigned int cmd, ··· 179 157 return -EINVAL; 180 158 rc = ocxl_afu_irq_set_fd(ctx, irq_fd.irq_offset, 181 159 irq_fd.eventfd); 160 + break; 161 + 162 + case OCXL_IOCTL_GET_METADATA: 163 + rc = afu_ioctl_get_metadata(ctx, 164 + (struct ocxl_ioctl_metadata __user *) args); 182 165 break; 183 166 184 167 default:
+17
include/uapi/misc/ocxl.h
··· 32 32 __u64 reserved3; 33 33 }; 34 34 35 + struct ocxl_ioctl_metadata { 36 + __u16 version; // struct version, always backwards compatible 37 + 38 + // Version 0 fields 39 + __u8 afu_version_major; 40 + __u8 afu_version_minor; 41 + __u32 pasid; // PASID assigned to the current context 42 + 43 + __u64 pp_mmio_size; // Per PASID MMIO size 44 + __u64 global_mmio_size; 45 + 46 + // End version 0 fields 47 + 48 + __u64 reserved[13]; // Total of 16*u64 49 + }; 50 + 35 51 struct ocxl_ioctl_irq_fd { 36 52 __u64 irq_offset; 37 53 __s32 eventfd; ··· 61 45 #define OCXL_IOCTL_IRQ_ALLOC _IOR(OCXL_MAGIC, 0x11, __u64) 62 46 #define OCXL_IOCTL_IRQ_FREE _IOW(OCXL_MAGIC, 0x12, __u64) 63 47 #define OCXL_IOCTL_IRQ_SET_FD _IOW(OCXL_MAGIC, 0x13, struct ocxl_ioctl_irq_fd) 48 + #define OCXL_IOCTL_GET_METADATA _IOR(OCXL_MAGIC, 0x14, struct ocxl_ioctl_metadata) 64 49 65 50 #endif /* _UAPI_MISC_OCXL_H */
+14
tools/testing/selftests/powerpc/mm/subpage_prot.c
··· 135 135 return 0; 136 136 } 137 137 138 + static int syscall_available(void) 139 + { 140 + int rc; 141 + 142 + errno = 0; 143 + rc = syscall(__NR_subpage_prot, 0, 0, 0); 144 + 145 + return rc == 0 || (errno != ENOENT && errno != ENOSYS); 146 + } 147 + 138 148 int test_anon(void) 139 149 { 140 150 unsigned long align; ··· 154 144 }; 155 145 void *mallocblock; 156 146 unsigned long mallocsize; 147 + 148 + SKIP_IF(!syscall_available()); 157 149 158 150 if (getpagesize() != 0x10000) { 159 151 fprintf(stderr, "Kernel page size must be 64K!\n"); ··· 191 179 void *fileblock; 192 180 off_t filesize; 193 181 int fd; 182 + 183 + SKIP_IF(!syscall_available()); 194 184 195 185 fd = open(file_name, O_RDWR); 196 186 if (fd == -1) {
+1 -1
tools/testing/selftests/powerpc/tm/Makefile
··· 16 16 $(OUTPUT)/tm-syscall: CFLAGS += -I../../../../../usr/include 17 17 $(OUTPUT)/tm-tmspr: CFLAGS += -pthread 18 18 $(OUTPUT)/tm-vmx-unavail: CFLAGS += -pthread -m64 19 - $(OUTPUT)/tm-resched-dscr: ../pmu/lib.o 19 + $(OUTPUT)/tm-resched-dscr: ../pmu/lib.c 20 20 $(OUTPUT)/tm-unavailable: CFLAGS += -O0 -pthread -m64 -Wno-error=uninitialized -mvsx 21 21 $(OUTPUT)/tm-trap: CFLAGS += -O0 -pthread -m64 22 22
+2
tools/testing/selftests/powerpc/tm/tm-trap.c
··· 255 255 256 256 struct sigaction trap_sa; 257 257 258 + SKIP_IF(!have_htm()); 259 + 258 260 trap_sa.sa_flags = SA_SIGINFO; 259 261 trap_sa.sa_sigaction = trap_signal_handler; 260 262 sigaction(SIGTRAP, &trap_sa, NULL);