Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'powerpc-6.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc updates from Michael Ellerman:

- Add initial support to recognise the HeXin C2000 processor.

- Add papr-vpd and papr-sysparm character device drivers for VPD &
sysparm retrieval, so userspace tools can be adapted to avoid doing
raw firmware calls from userspace.

- Sched domains optimisations for shared processor partitions on
P9/P10.

- A series of optimisations for KVM running as a nested HV under
PowerVM.

- Other small features and fixes.

Thanks to Aditya Gupta, Aneesh Kumar K.V, Arnd Bergmann, Christophe
Leroy, Colin Ian King, Dario Binacchi, David Heidelberg, Geoff Levand,
Gustavo A. R. Silva, Haoran Liu, Jordan Niethe, Kajol Jain, Kevin Hao,
Kunwu Chan, Li kunyu, Li zeming, Masahiro Yamada, Michal Suchánek,
Nathan Lynch, Naveen N Rao, Nicholas Piggin, Randy Dunlap, Sathvika
Vasireddy, Srikar Dronamraju, Stephen Rothwell, Vaibhav Jain, and
Zhao Ke.

* tag 'powerpc-6.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (96 commits)
powerpc/ps3_defconfig: Disable PPC64_BIG_ENDIAN_ELF_ABI_V2
powerpc/86xx: Drop unused CONFIG_MPC8610
powerpc/powernv: Add error handling to opal_prd_range_is_valid
selftests/powerpc: Fix spelling mistake "EACCESS" -> "EACCES"
powerpc/hvcall: Reorder Nestedv2 hcall opcodes
powerpc/ps3: Add missing set_freezable() for ps3_probe_thread()
powerpc/mpc83xx: Use wait_event_freezable() for freezable kthread
powerpc/mpc83xx: Add the missing set_freezable() for agent_thread_fn()
powerpc/fsl: Fix fsl,tmu-calibration to match the schema
powerpc/smp: Dynamically build Powerpc topology
powerpc/smp: Avoid asym packing within thread_group of a core
powerpc/smp: Add __ro_after_init attribute
powerpc/smp: Disable MC domain for shared processor
powerpc/smp: Enable Asym packing for cores on shared processor
powerpc/sched: Cleanup vcpu_is_preempted()
powerpc: add cpu_spec.cpu_features to vmcoreinfo
powerpc/imc-pmu: Add a null pointer check in update_events_in_group()
powerpc/powernv: Add a null pointer check in opal_powercap_init()
powerpc/powernv: Add a null pointer check in opal_event_init()
powerpc/powernv: Add a null pointer check to scom_debug_init_one()
...

+2188 -599
+4
Documentation/userspace-api/ioctl/ioctl-number.rst
··· 349 349 <mailto:vgo@ratio.de> 350 350 0xB1 00-1F PPPoX 351 351 <mailto:mostrows@styx.uwaterloo.ca> 352 + 0xB2 00 arch/powerpc/include/uapi/asm/papr-vpd.h powerpc/pseries VPD API 353 + <mailto:linuxppc-dev> 354 + 0xB2 01-02 arch/powerpc/include/uapi/asm/papr-sysparm.h powerpc/pseries system parameter API 355 + <mailto:linuxppc-dev> 352 356 0xB3 00 linux/mmc/ioctl.h 353 357 0xB4 00-0F linux/gpio.h <mailto:linux-gpio@vger.kernel.org> 354 358 0xB5 00-0F uapi/linux/rpmsg.h <mailto:linux-remoteproc@vger.kernel.org>
+3 -3
MAINTAINERS
··· 12252 12252 F: arch/powerpc/platforms/40x/ 12253 12253 F: arch/powerpc/platforms/44x/ 12254 12254 12255 - LINUX FOR POWERPC EMBEDDED PPC83XX AND PPC85XX 12255 + LINUX FOR POWERPC EMBEDDED PPC85XX 12256 12256 M: Scott Wood <oss@buserror.net> 12257 12257 L: linuxppc-dev@lists.ozlabs.org 12258 12258 S: Odd fixes 12259 12259 T: git git://git.kernel.org/pub/scm/linux/kernel/git/scottwood/linux.git 12260 12260 F: Documentation/devicetree/bindings/cache/freescale-l2cache.txt 12261 12261 F: Documentation/devicetree/bindings/powerpc/fsl/ 12262 - F: arch/powerpc/platforms/83xx/ 12263 12262 F: arch/powerpc/platforms/85xx/ 12264 12263 12265 - LINUX FOR POWERPC EMBEDDED PPC8XX 12264 + LINUX FOR POWERPC EMBEDDED PPC8XX AND PPC83XX 12266 12265 M: Christophe Leroy <christophe.leroy@csgroup.eu> 12267 12266 L: linuxppc-dev@lists.ozlabs.org 12268 12267 S: Maintained 12269 12268 F: arch/powerpc/platforms/8xx/ 12269 + F: arch/powerpc/platforms/83xx/ 12270 12270 12271 12271 LINUX KERNEL DUMP TEST MODULE (LKDTM) 12272 12272 M: Kees Cook <keescook@chromium.org>
+1
arch/powerpc/Kconfig
··· 189 189 select EDAC_ATOMIC_SCRUB 190 190 select EDAC_SUPPORT 191 191 select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY if ARCH_USING_PATCHABLE_FUNCTION_ENTRY 192 + select FUNCTION_ALIGNMENT_4B 192 193 select GENERIC_ATOMIC64 if PPC32 193 194 select GENERIC_CLOCKEVENTS_BROADCAST if SMP 194 195 select GENERIC_CMOS_UPDATE
-1
arch/powerpc/Kconfig.debug
··· 271 271 config PPC_EARLY_DEBUG_PS3GELIC 272 272 bool "Early debugging through the PS3 Ethernet port" 273 273 depends on PPC_PS3 274 - select PS3GELIC_UDBG 275 274 help 276 275 Select this to enable early debugging for the PlayStation3 via 277 276 UDP broadcasts sent out through the Ethernet port.
+18 -7
arch/powerpc/Makefile
··· 10 10 # Rewritten by Cort Dougan and Paul Mackerras 11 11 # 12 12 13 + ifdef cross_compiling 14 + ifeq ($(CROSS_COMPILE),) 15 + # Auto detect cross compiler prefix. 16 + # Look for: (powerpc(64(le)?)?)(-unknown)?-linux(-gnu)?- 17 + CC_ARCHES := powerpc powerpc64 powerpc64le 18 + CC_SUFFIXES := linux linux-gnu unknown-linux-gnu 19 + CROSS_COMPILE := $(call cc-cross-prefix, $(foreach a,$(CC_ARCHES), \ 20 + $(foreach s,$(CC_SUFFIXES),$(a)-$(s)-))) 21 + endif 22 + endif 23 + 13 24 HAS_BIARCH := $(call cc-option-yn, -m32) 14 25 15 26 # Set default 32 bits cross compilers for vdso and boot wrapper 16 27 CROSS32_COMPILE ?= 17 28 18 29 # If we're on a ppc/ppc64/ppc64le machine use that defconfig, otherwise just use 19 - # ppc64_defconfig because we have nothing better to go on. 30 + # ppc64le_defconfig because we have nothing better to go on. 20 31 uname := $(shell uname -m) 21 - KBUILD_DEFCONFIG := $(if $(filter ppc%,$(uname)),$(uname),ppc64)_defconfig 32 + KBUILD_DEFCONFIG := $(if $(filter ppc%,$(uname)),$(uname),ppc64le)_defconfig 22 33 23 34 new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi) 24 35 ··· 172 161 173 162 asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1) 174 163 175 - KBUILD_CPPFLAGS += -I $(srctree)/arch/$(ARCH) $(asinstr) 164 + KBUILD_CPPFLAGS += -I $(srctree)/arch/powerpc $(asinstr) 176 165 KBUILD_AFLAGS += $(AFLAGS-y) 177 166 KBUILD_CFLAGS += $(call cc-option,-msoft-float) 178 167 KBUILD_CFLAGS += $(CFLAGS-y) ··· 243 232 244 233 PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2) 245 234 246 - boot := arch/$(ARCH)/boot 235 + boot := arch/powerpc/boot 247 236 248 237 $(BOOT_TARGETS1): vmlinux 249 238 $(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@) ··· 347 336 348 337 define archhelp 349 338 echo '* zImage - Build default images selected by kernel config' 350 - echo ' zImage.* - Compressed kernel image (arch/$(ARCH)/boot/zImage.*)' 339 + echo ' zImage.* - Compressed kernel image (arch/powerpc/boot/zImage.*)' 351 340 echo ' uImage - U-Boot native image format' 352 341 echo ' cuImage.<dt> - Backwards compatible U-Boot image for older' 353 342 echo ' versions which do not support device trees' ··· 358 347 echo ' (your) ~/bin/$(INSTALLKERNEL) or' 359 348 echo ' (distribution) /sbin/$(INSTALLKERNEL) or' 360 349 echo ' install to $$(INSTALL_PATH) and run lilo' 361 - echo ' *_defconfig - Select default config from arch/$(ARCH)/configs' 350 + echo ' *_defconfig - Select default config from arch/powerpc/configs' 362 351 echo '' 363 352 echo ' Targets with <dt> embed a device tree blob inside the image' 364 353 echo ' These targets support board with firmware that does not' 365 354 echo ' support passing a device tree directly. Replace <dt> with the' 366 - echo ' name of a dts file from the arch/$(ARCH)/boot/dts/ directory' 355 + echo ' name of a dts file from the arch/powerpc/boot/dts/ directory' 367 356 echo ' (minus the .dts extension).' 368 357 echo 369 358 $(foreach cfg,$(generated_configs),
+37 -36
arch/powerpc/boot/dts/fsl/t1023si-post.dtsi
··· 367 367 reg = <0xf0000 0x1000>; 368 368 interrupts = <18 2 0 0>; 369 369 fsl,tmu-range = <0xb0000 0xa0026 0x80048 0x30061>; 370 - fsl,tmu-calibration = <0x00000000 0x0000000f 371 - 0x00000001 0x00000017 372 - 0x00000002 0x0000001e 373 - 0x00000003 0x00000026 374 - 0x00000004 0x0000002e 375 - 0x00000005 0x00000035 376 - 0x00000006 0x0000003d 377 - 0x00000007 0x00000044 378 - 0x00000008 0x0000004c 379 - 0x00000009 0x00000053 380 - 0x0000000a 0x0000005b 381 - 0x0000000b 0x00000064 370 + fsl,tmu-calibration = 371 + <0x00000000 0x0000000f>, 372 + <0x00000001 0x00000017>, 373 + <0x00000002 0x0000001e>, 374 + <0x00000003 0x00000026>, 375 + <0x00000004 0x0000002e>, 376 + <0x00000005 0x00000035>, 377 + <0x00000006 0x0000003d>, 378 + <0x00000007 0x00000044>, 379 + <0x00000008 0x0000004c>, 380 + <0x00000009 0x00000053>, 381 + <0x0000000a 0x0000005b>, 382 + <0x0000000b 0x00000064>, 382 383 383 - 0x00010000 0x00000011 384 - 0x00010001 0x0000001c 385 - 0x00010002 0x00000024 386 - 0x00010003 0x0000002b 387 - 0x00010004 0x00000034 388 - 0x00010005 0x00000039 389 - 0x00010006 0x00000042 390 - 0x00010007 0x0000004c 391 - 0x00010008 0x00000051 392 - 0x00010009 0x0000005a 393 - 0x0001000a 0x00000063 384 + <0x00010000 0x00000011>, 385 + <0x00010001 0x0000001c>, 386 + <0x00010002 0x00000024>, 387 + <0x00010003 0x0000002b>, 388 + <0x00010004 0x00000034>, 389 + <0x00010005 0x00000039>, 390 + <0x00010006 0x00000042>, 391 + <0x00010007 0x0000004c>, 392 + <0x00010008 0x00000051>, 393 + <0x00010009 0x0000005a>, 394 + <0x0001000a 0x00000063>, 394 395 395 - 0x00020000 0x00000013 396 - 0x00020001 0x00000019 397 - 0x00020002 0x00000024 398 - 0x00020003 0x0000002c 399 - 0x00020004 0x00000035 400 - 0x00020005 0x0000003d 401 - 0x00020006 0x00000046 402 - 0x00020007 0x00000050 403 - 0x00020008 0x00000059 396 + <0x00020000 0x00000013>, 397 + <0x00020001 0x00000019>, 398 + <0x00020002 0x00000024>, 399 + <0x00020003 0x0000002c>, 400 + <0x00020004 0x00000035>, 401 + <0x00020005 0x0000003d>, 402 + <0x00020006 0x00000046>, 403 + <0x00020007 0x00000050>, 404 + <0x00020008 0x00000059>, 404 405 405 - 0x00030000 0x00000002 406 - 0x00030001 0x0000000d 407 - 0x00030002 0x00000019 408 - 0x00030003 0x00000024>; 406 + <0x00030000 0x00000002>, 407 + <0x00030001 0x0000000d>, 408 + <0x00030002 0x00000019>, 409 + <0x00030003 0x00000024>; 409 410 #thermal-sensor-cells = <1>; 410 411 }; 411 412
+33 -32
arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
··· 447 447 reg = <0xf0000 0x1000>; 448 448 interrupts = <18 2 0 0>; 449 449 fsl,tmu-range = <0xa0000 0x90026 0x8004a 0x1006a>; 450 - fsl,tmu-calibration = <0x00000000 0x00000025 451 - 0x00000001 0x00000028 452 - 0x00000002 0x0000002d 453 - 0x00000003 0x00000031 454 - 0x00000004 0x00000036 455 - 0x00000005 0x0000003a 456 - 0x00000006 0x00000040 457 - 0x00000007 0x00000044 458 - 0x00000008 0x0000004a 459 - 0x00000009 0x0000004f 460 - 0x0000000a 0x00000054 450 + fsl,tmu-calibration = 451 + <0x00000000 0x00000025>, 452 + <0x00000001 0x00000028>, 453 + <0x00000002 0x0000002d>, 454 + <0x00000003 0x00000031>, 455 + <0x00000004 0x00000036>, 456 + <0x00000005 0x0000003a>, 457 + <0x00000006 0x00000040>, 458 + <0x00000007 0x00000044>, 459 + <0x00000008 0x0000004a>, 460 + <0x00000009 0x0000004f>, 461 + <0x0000000a 0x00000054>, 461 462 462 - 0x00010000 0x0000000d 463 - 0x00010001 0x00000013 464 - 0x00010002 0x00000019 465 - 0x00010003 0x0000001f 466 - 0x00010004 0x00000025 467 - 0x00010005 0x0000002d 468 - 0x00010006 0x00000033 469 - 0x00010007 0x00000043 470 - 0x00010008 0x0000004b 471 - 0x00010009 0x00000053 463 + <0x00010000 0x0000000d>, 464 + <0x00010001 0x00000013>, 465 + <0x00010002 0x00000019>, 466 + <0x00010003 0x0000001f>, 467 + <0x00010004 0x00000025>, 468 + <0x00010005 0x0000002d>, 469 + <0x00010006 0x00000033>, 470 + <0x00010007 0x00000043>, 471 + <0x00010008 0x0000004b>, 472 + <0x00010009 0x00000053>, 472 473 473 - 0x00020000 0x00000010 474 - 0x00020001 0x00000017 475 - 0x00020002 0x0000001f 476 - 0x00020003 0x00000029 477 - 0x00020004 0x00000031 478 - 0x00020005 0x0000003c 479 - 0x00020006 0x00000042 480 - 0x00020007 0x0000004d 481 - 0x00020008 0x00000056 474 + <0x00020000 0x00000010>, 475 + <0x00020001 0x00000017>, 476 + <0x00020002 0x0000001f>, 477 + <0x00020003 0x00000029>, 478 + <0x00020004 0x00000031>, 479 + <0x00020005 0x0000003c>, 480 + <0x00020006 0x00000042>, 481 + <0x00020007 0x0000004d>, 482 + <0x00020008 0x00000056>, 482 483 483 - 0x00030000 0x00000012 484 - 0x00030001 0x0000001d>; 484 + <0x00030000 0x00000012>, 485 + <0x00030001 0x0000001d>; 485 486 #thermal-sensor-cells = <1>; 486 487 }; 487 488
+1
arch/powerpc/configs/ppc64_defconfig
··· 92 92 CONFIG_MEMORY_HOTREMOVE=y 93 93 CONFIG_KSM=y 94 94 CONFIG_TRANSPARENT_HUGEPAGE=y 95 + CONFIG_MEM_SOFT_DIRTY=y 95 96 CONFIG_ZONE_DEVICE=y 96 97 CONFIG_NET=y 97 98 CONFIG_PACKET=y
+1
arch/powerpc/configs/ps3_defconfig
··· 24 24 CONFIG_PS3_LPM=m 25 25 # CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set 26 26 CONFIG_KEXEC=y 27 + # CONFIG_PPC64_BIG_ENDIAN_ELF_ABI_V2 is not set 27 28 CONFIG_PPC_4K_PAGES=y 28 29 CONFIG_SCHED_SMT=y 29 30 CONFIG_PM=y
+2 -8
arch/powerpc/include/asm/book3s/64/pgtable.h
··· 17 17 #define _PAGE_EXEC 0x00001 /* execute permission */ 18 18 #define _PAGE_WRITE 0x00002 /* write access allowed */ 19 19 #define _PAGE_READ 0x00004 /* read access allowed */ 20 - #define _PAGE_NA _PAGE_PRIVILEGED 21 - #define _PAGE_NAX _PAGE_EXEC 22 - #define _PAGE_RO _PAGE_READ 23 - #define _PAGE_ROX (_PAGE_READ | _PAGE_EXEC) 24 - #define _PAGE_RW (_PAGE_READ | _PAGE_WRITE) 25 - #define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC) 26 20 #define _PAGE_PRIVILEGED 0x00008 /* kernel access only */ 27 21 #define _PAGE_SAO 0x00010 /* Strong access order */ 28 22 #define _PAGE_NON_IDEMPOTENT 0x00020 /* non idempotent memory */ ··· 526 532 static inline bool pte_access_permitted(pte_t pte, bool write) 527 533 { 528 534 /* 529 - * _PAGE_READ is needed for any access and will be 530 - * cleared for PROT_NONE 535 + * _PAGE_READ is needed for any access and will be cleared for 536 + * PROT_NONE. Execute-only mapping via PROT_EXEC also returns false. 531 537 */ 532 538 if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte)) 533 539 return false;
+2 -7
arch/powerpc/include/asm/book3s/64/tlbflush.h
··· 158 158 */ 159 159 } 160 160 161 - static inline bool __pte_protnone(unsigned long pte) 162 - { 163 - return (pte & (pgprot_val(PAGE_NONE) | _PAGE_RWX)) == pgprot_val(PAGE_NONE); 164 - } 165 - 166 161 static inline bool __pte_flags_need_flush(unsigned long oldval, 167 162 unsigned long newval) 168 163 { ··· 174 179 /* 175 180 * We do not expect kernel mappings or non-PTEs or not-present PTEs. 176 181 */ 177 - VM_WARN_ON_ONCE(!__pte_protnone(oldval) && oldval & _PAGE_PRIVILEGED); 178 - VM_WARN_ON_ONCE(!__pte_protnone(newval) && newval & _PAGE_PRIVILEGED); 182 + VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED); 183 + VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED); 179 184 VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE)); 180 185 VM_WARN_ON_ONCE(!(newval & _PAGE_PTE)); 181 186 VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));
+1 -1
arch/powerpc/include/asm/ftrace.h
··· 25 25 if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)) 26 26 addr += MCOUNT_INSN_SIZE; 27 27 28 - return addr; 28 + return addr; 29 29 } 30 30 31 31 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
+10 -10
arch/powerpc/include/asm/hvcall.h
··· 349 349 #define H_GET_ENERGY_SCALE_INFO 0x450 350 350 #define H_PKS_SIGNED_UPDATE 0x454 351 351 #define H_WATCHDOG 0x45C 352 - #define MAX_HCALL_OPCODE H_WATCHDOG 352 + #define H_GUEST_GET_CAPABILITIES 0x460 353 + #define H_GUEST_SET_CAPABILITIES 0x464 354 + #define H_GUEST_CREATE 0x470 355 + #define H_GUEST_CREATE_VCPU 0x474 356 + #define H_GUEST_GET_STATE 0x478 357 + #define H_GUEST_SET_STATE 0x47C 358 + #define H_GUEST_RUN_VCPU 0x480 359 + #define H_GUEST_COPY_MEMORY 0x484 360 + #define H_GUEST_DELETE 0x488 361 + #define MAX_HCALL_OPCODE H_GUEST_DELETE 353 362 354 363 /* Scope args for H_SCM_UNBIND_ALL */ 355 364 #define H_UNBIND_SCOPE_ALL (0x1) ··· 402 393 #define H_ENTER_NESTED 0xF804 403 394 #define H_TLB_INVALIDATE 0xF808 404 395 #define H_COPY_TOFROM_GUEST 0xF80C 405 - #define H_GUEST_GET_CAPABILITIES 0x460 406 - #define H_GUEST_SET_CAPABILITIES 0x464 407 - #define H_GUEST_CREATE 0x470 408 - #define H_GUEST_CREATE_VCPU 0x474 409 - #define H_GUEST_GET_STATE 0x478 410 - #define H_GUEST_SET_STATE 0x47C 411 - #define H_GUEST_RUN_VCPU 0x480 412 - #define H_GUEST_COPY_MEMORY 0x484 413 - #define H_GUEST_DELETE 0x488 414 396 415 397 /* Flags for H_SVM_PAGE_IN */ 416 398 #define H_PAGE_IN_SHARED 0x1
+7 -3
arch/powerpc/include/asm/kvm_book3s.h
··· 302 302 void kvmhv_vm_nested_init(struct kvm *kvm); 303 303 long kvmhv_set_partition_table(struct kvm_vcpu *vcpu); 304 304 long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu); 305 + void kvmhv_flush_lpid(u64 lpid); 305 306 void kvmhv_set_ptbl_entry(u64 lpid, u64 dw0, u64 dw1); 306 307 void kvmhv_release_all_nested(struct kvm *kvm); 307 308 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu); ··· 594 593 595 594 596 595 KVMPPC_BOOK3S_VCORE_ACCESSOR(vtb, 64, KVMPPC_GSID_VTB) 597 - KVMPPC_BOOK3S_VCORE_ACCESSOR(tb_offset, 64, KVMPPC_GSID_TB_OFFSET) 598 596 KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(arch_compat, 32, KVMPPC_GSID_LOGICAL_PVR) 599 597 KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(lpcr, 64, KVMPPC_GSID_LPCR) 598 + KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(tb_offset, 64, KVMPPC_GSID_TB_OFFSET) 599 + 600 + static inline u64 kvmppc_get_tb_offset(struct kvm_vcpu *vcpu) 601 + { 602 + return vcpu->arch.vcore->tb_offset; 603 + } 600 604 601 605 static inline u64 kvmppc_get_dec_expires(struct kvm_vcpu *vcpu) 602 606 { 603 - WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_TB_OFFSET) < 0); 604 607 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB) < 0); 605 608 return vcpu->arch.dec_expires; 606 609 } ··· 612 607 static inline void kvmppc_set_dec_expires(struct kvm_vcpu *vcpu, u64 val) 613 608 { 614 609 vcpu->arch.dec_expires = val; 615 - WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_TB_OFFSET) < 0); 616 610 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB); 617 611 } 618 612
+1
arch/powerpc/include/asm/kvm_book3s_64.h
··· 682 682 int kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu *vcpu, u64 time_limit); 683 683 int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1); 684 684 int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu); 685 + int kvmhv_nestedv2_set_vpa(struct kvm_vcpu *vcpu, unsigned long vpa); 685 686 686 687 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ 687 688
-3
arch/powerpc/include/asm/linkage.h
··· 4 4 5 5 #include <asm/types.h> 6 6 7 - #define __ALIGN .align 2 8 - #define __ALIGN_STR ".align 2" 9 - 10 7 #ifdef CONFIG_PPC64_ELF_ABI_V1 11 8 #define cond_syscall(x) \ 12 9 asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n" \
+4
arch/powerpc/include/asm/mmu.h
··· 412 412 #include <asm/nohash/mmu.h> 413 413 #endif 414 414 415 + #if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP) 416 + #define __HAVE_ARCH_RESERVED_KERNEL_PAGES 417 + #endif 418 + 415 419 #endif /* __KERNEL__ */ 416 420 #endif /* _ASM_POWERPC_MMU_H_ */
-8
arch/powerpc/include/asm/mmzone.h
··· 42 42 #else 43 43 #define memory_hotplug_max() memblock_end_of_DRAM() 44 44 #endif /* CONFIG_NUMA */ 45 - #ifdef CONFIG_FA_DUMP 46 - #define __HAVE_ARCH_RESERVED_KERNEL_PAGES 47 - #endif 48 - 49 - #ifdef CONFIG_MEMORY_HOTPLUG 50 - extern int create_section_mapping(unsigned long start, unsigned long end, 51 - int nid, pgprot_t prot); 52 - #endif 53 45 54 46 #endif /* __KERNEL__ */ 55 47 #endif /* _ASM_MMZONE_H_ */
+11 -6
arch/powerpc/include/asm/papr-sysparm.h
··· 2 2 #ifndef _ASM_POWERPC_PAPR_SYSPARM_H 3 3 #define _ASM_POWERPC_PAPR_SYSPARM_H 4 4 5 + #include <uapi/asm/papr-sysparm.h> 6 + 5 7 typedef struct { 6 - const u32 token; 8 + u32 token; 7 9 } papr_sysparm_t; 8 10 9 11 #define mk_papr_sysparm(x_) ((papr_sysparm_t){ .token = x_, }) ··· 22 20 #define PAPR_SYSPARM_TLB_BLOCK_INVALIDATE_ATTRS mk_papr_sysparm(50) 23 21 #define PAPR_SYSPARM_LPAR_NAME mk_papr_sysparm(55) 24 22 25 - enum { 26 - PAPR_SYSPARM_MAX_INPUT = 1024, 27 - PAPR_SYSPARM_MAX_OUTPUT = 4000, 28 - }; 29 - 23 + /** 24 + * struct papr_sysparm_buf - RTAS work area layout for system parameter functions. 25 + * 26 + * This is the memory layout of the buffers passed to/from 27 + * ibm,get-system-parameter and ibm,set-system-parameter. It is 28 + * distinct from the papr_sysparm_io_block structure that is passed 29 + * between user space and the kernel. 30 + */ 30 31 struct papr_sysparm_buf { 31 32 __be16 len; 32 33 char val[PAPR_SYSPARM_MAX_OUTPUT];
+25 -8
arch/powerpc/include/asm/paravirt.h
··· 76 76 { 77 77 return lppaca_of(vcpu).idle; 78 78 } 79 + 80 + static inline bool vcpu_is_dispatched(int vcpu) 81 + { 82 + /* 83 + * This is the yield_count. An "odd" value (low bit on) means that 84 + * the processor is yielded (either because of an OS yield or a 85 + * hypervisor preempt). An even value implies that the processor is 86 + * currently executing. 87 + */ 88 + return (!(yield_count_of(vcpu) & 1)); 89 + } 79 90 #else 80 91 static inline bool is_shared_processor(void) 81 92 { ··· 120 109 { 121 110 return false; 122 111 } 112 + static inline bool vcpu_is_dispatched(int vcpu) 113 + { 114 + return true; 115 + } 123 116 #endif 124 117 125 118 #define vcpu_is_preempted vcpu_is_preempted ··· 149 134 * If the hypervisor has dispatched the target CPU on a physical 150 135 * processor, then the target CPU is definitely not preempted. 151 136 */ 152 - if (!(yield_count_of(cpu) & 1)) 137 + if (vcpu_is_dispatched(cpu)) 153 138 return false; 154 139 155 140 /* 156 - * If the target CPU has yielded to Hypervisor but OS has not 157 - * requested idle then the target CPU is definitely preempted. 141 + * if the target CPU is not dispatched and the guest OS 142 + * has not marked the CPU idle, then it is hypervisor preempted. 158 143 */ 159 144 if (!is_vcpu_idle(cpu)) 160 145 return true; ··· 181 166 182 167 /* 183 168 * The PowerVM hypervisor dispatches VMs on a whole core 184 - * basis. So we know that a thread sibling of the local CPU 169 + * basis. So we know that a thread sibling of the executing CPU 185 170 * cannot have been preempted by the hypervisor, even if it 186 171 * has called H_CONFER, which will set the yield bit. 187 172 */ ··· 189 174 return false; 190 175 191 176 /* 192 - * If any of the threads of the target CPU's core are not 193 - * preempted or ceded, then consider target CPU to be 194 - * non-preempted. 177 + * The specific target CPU was marked by guest OS as idle, but 178 + * then also check all other cpus in the core for PowerVM 179 + * because it does core scheduling and one of the vcpu 180 + * of the core getting preempted by hypervisor implies 181 + * other vcpus can also be considered preempted. 195 182 */ 196 183 first_cpu = cpu_first_thread_sibling(cpu); 197 184 for (i = first_cpu; i < first_cpu + threads_per_core; i++) { 198 185 if (i == cpu) 199 186 continue; 200 - if (!(yield_count_of(i) & 1)) 187 + if (vcpu_is_dispatched(i)) 201 188 return false; 202 189 if (!is_vcpu_idle(i)) 203 190 return true;
+3 -2
arch/powerpc/include/asm/ppc-pci.h
··· 35 35 extern unsigned long get_phb_buid (struct device_node *); 36 36 extern int rtas_setup_phb(struct pci_controller *phb); 37 37 38 + int rtas_pci_dn_read_config(struct pci_dn *pdn, int where, int size, u32 *val); 39 + int rtas_pci_dn_write_config(struct pci_dn *pdn, int where, int size, u32 val); 40 + 38 41 #ifdef CONFIG_EEH 39 42 40 43 void eeh_addr_cache_insert_dev(struct pci_dev *dev); ··· 47 44 int eeh_pci_enable(struct eeh_pe *pe, int function); 48 45 int eeh_pe_reset_full(struct eeh_pe *pe, bool include_passed); 49 46 void eeh_save_bars(struct eeh_dev *edev); 50 - int rtas_write_config(struct pci_dn *, int where, int size, u32 val); 51 - int rtas_read_config(struct pci_dn *, int where, int size, u32 *val); 52 47 void eeh_pe_state_mark(struct eeh_pe *pe, int state); 53 48 void eeh_pe_mark_isolated(struct eeh_pe *pe); 54 49 void eeh_pe_state_clear(struct eeh_pe *pe, int state, bool include_passed);
+6
arch/powerpc/include/asm/ps3.h
··· 514 514 515 515 void ps3_early_mm_init(void); 516 516 517 + #ifdef CONFIG_PPC_EARLY_DEBUG_PS3GELIC 518 + void udbg_shutdown_ps3gelic(void); 519 + #else 520 + static inline void udbg_shutdown_ps3gelic(void) {} 521 + #endif 522 + 517 523 #endif
+1
arch/powerpc/include/asm/reg.h
··· 1361 1361 #define PVR_POWER8E 0x004B 1362 1362 #define PVR_POWER8NVL 0x004C 1363 1363 #define PVR_POWER8 0x004D 1364 + #define PVR_HX_C2000 0x0066 1364 1365 #define PVR_POWER9 0x004E 1365 1366 #define PVR_POWER10 0x0080 1366 1367 #define PVR_BE 0x0070
-154
arch/powerpc/include/asm/reg_a2.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 - /* 3 - * Register definitions specific to the A2 core 4 - * 5 - * Copyright (C) 2008 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp. 6 - */ 7 - 8 - #ifndef __ASM_POWERPC_REG_A2_H__ 9 - #define __ASM_POWERPC_REG_A2_H__ 10 - 11 - #include <asm/asm-const.h> 12 - 13 - #define SPRN_TENSR 0x1b5 14 - #define SPRN_TENS 0x1b6 /* Thread ENable Set */ 15 - #define SPRN_TENC 0x1b7 /* Thread ENable Clear */ 16 - 17 - #define SPRN_A2_CCR0 0x3f0 /* Core Configuration Register 0 */ 18 - #define SPRN_A2_CCR1 0x3f1 /* Core Configuration Register 1 */ 19 - #define SPRN_A2_CCR2 0x3f2 /* Core Configuration Register 2 */ 20 - #define SPRN_MMUCR0 0x3fc /* MMU Control Register 0 */ 21 - #define SPRN_MMUCR1 0x3fd /* MMU Control Register 1 */ 22 - #define SPRN_MMUCR2 0x3fe /* MMU Control Register 2 */ 23 - #define SPRN_MMUCR3 0x3ff /* MMU Control Register 3 */ 24 - 25 - #define SPRN_IAR 0x372 26 - 27 - #define SPRN_IUCR0 0x3f3 28 - #define IUCR0_ICBI_ACK 0x1000 29 - 30 - #define SPRN_XUCR0 0x3f6 /* Execution Unit Config Register 0 */ 31 - 32 - #define A2_IERAT_SIZE 16 33 - #define A2_DERAT_SIZE 32 34 - 35 - /* A2 MMUCR0 bits */ 36 - #define MMUCR0_ECL 0x80000000 /* Extended Class for TLB fills */ 37 - #define MMUCR0_TID_NZ 0x40000000 /* TID is non-zero */ 38 - #define MMUCR0_TS 0x10000000 /* Translation space for TLB fills */ 39 - #define MMUCR0_TGS 0x20000000 /* Guest space for TLB fills */ 40 - #define MMUCR0_TLBSEL 0x0c000000 /* TLB or ERAT target for TLB fills */ 41 - #define MMUCR0_TLBSEL_U 0x00000000 /* TLBSEL = UTLB */ 42 - #define MMUCR0_TLBSEL_I 0x08000000 /* TLBSEL = I-ERAT */ 43 - #define MMUCR0_TLBSEL_D 0x0c000000 /* TLBSEL = D-ERAT */ 44 - #define MMUCR0_LOCKSRSH 0x02000000 /* Use TLB lock on tlbsx. */ 45 - #define MMUCR0_TID_MASK 0x000000ff /* TID field */ 46 - 47 - /* A2 MMUCR1 bits */ 48 - #define MMUCR1_IRRE 0x80000000 /* I-ERAT round robin enable */ 49 - #define MMUCR1_DRRE 0x40000000 /* D-ERAT round robin enable */ 50 - #define MMUCR1_REE 0x20000000 /* Reference Exception Enable*/ 51 - #define MMUCR1_CEE 0x10000000 /* Change exception enable */ 52 - #define MMUCR1_CSINV_ALL 0x00000000 /* Inval ERAT on all CS evts */ 53 - #define MMUCR1_CSINV_NISYNC 0x04000000 /* Inval ERAT on all ex isync*/ 54 - #define MMUCR1_CSINV_NEVER 0x0c000000 /* Don't inval ERAT on CS */ 55 - #define MMUCR1_ICTID 0x00080000 /* IERAT class field as TID */ 56 - #define MMUCR1_ITTID 0x00040000 /* IERAT thdid field as TID */ 57 - #define MMUCR1_DCTID 0x00020000 /* DERAT class field as TID */ 58 - #define MMUCR1_DTTID 0x00010000 /* DERAT thdid field as TID */ 59 - #define MMUCR1_DCCD 0x00008000 /* DERAT class ignore */ 60 - #define MMUCR1_TLBWE_BINV 0x00004000 /* back invalidate on tlbwe */ 61 - 62 - /* A2 MMUCR2 bits */ 63 - #define MMUCR2_PSSEL_SHIFT 4 64 - 65 - /* A2 MMUCR3 bits */ 66 - #define MMUCR3_THID 0x0000000f /* Thread ID */ 67 - 68 - /* *** ERAT TLB bits definitions */ 69 - #define TLB0_EPN_MASK ASM_CONST(0xfffffffffffff000) 70 - #define TLB0_CLASS_MASK ASM_CONST(0x0000000000000c00) 71 - #define TLB0_CLASS_00 ASM_CONST(0x0000000000000000) 72 - #define TLB0_CLASS_01 ASM_CONST(0x0000000000000400) 73 - #define TLB0_CLASS_10 ASM_CONST(0x0000000000000800) 74 - #define TLB0_CLASS_11 ASM_CONST(0x0000000000000c00) 75 - #define TLB0_V ASM_CONST(0x0000000000000200) 76 - #define TLB0_X ASM_CONST(0x0000000000000100) 77 - #define TLB0_SIZE_MASK ASM_CONST(0x00000000000000f0) 78 - #define TLB0_SIZE_4K ASM_CONST(0x0000000000000010) 79 - #define TLB0_SIZE_64K ASM_CONST(0x0000000000000030) 80 - #define TLB0_SIZE_1M ASM_CONST(0x0000000000000050) 81 - #define TLB0_SIZE_16M ASM_CONST(0x0000000000000070) 82 - #define TLB0_SIZE_1G ASM_CONST(0x00000000000000a0) 83 - #define TLB0_THDID_MASK ASM_CONST(0x000000000000000f) 84 - #define TLB0_THDID_0 ASM_CONST(0x0000000000000001) 85 - #define TLB0_THDID_1 ASM_CONST(0x0000000000000002) 86 - #define TLB0_THDID_2 ASM_CONST(0x0000000000000004) 87 - #define TLB0_THDID_3 ASM_CONST(0x0000000000000008) 88 - #define TLB0_THDID_ALL ASM_CONST(0x000000000000000f) 89 - 90 - #define TLB1_RESVATTR ASM_CONST(0x00f0000000000000) 91 - #define TLB1_U0 ASM_CONST(0x0008000000000000) 92 - #define TLB1_U1 ASM_CONST(0x0004000000000000) 93 - #define TLB1_U2 ASM_CONST(0x0002000000000000) 94 - #define TLB1_U3 ASM_CONST(0x0001000000000000) 95 - #define TLB1_R ASM_CONST(0x0000800000000000) 96 - #define TLB1_C ASM_CONST(0x0000400000000000) 97 - #define TLB1_RPN_MASK ASM_CONST(0x000003fffffff000) 98 - #define TLB1_W ASM_CONST(0x0000000000000800) 99 - #define TLB1_I ASM_CONST(0x0000000000000400) 100 - #define TLB1_M ASM_CONST(0x0000000000000200) 101 - #define TLB1_G ASM_CONST(0x0000000000000100) 102 - #define TLB1_E ASM_CONST(0x0000000000000080) 103 - #define TLB1_VF ASM_CONST(0x0000000000000040) 104 - #define TLB1_UX ASM_CONST(0x0000000000000020) 105 - #define TLB1_SX ASM_CONST(0x0000000000000010) 106 - #define TLB1_UW ASM_CONST(0x0000000000000008) 107 - #define TLB1_SW ASM_CONST(0x0000000000000004) 108 - #define TLB1_UR ASM_CONST(0x0000000000000002) 109 - #define TLB1_SR ASM_CONST(0x0000000000000001) 110 - 111 - /* A2 erativax attributes definitions */ 112 - #define ERATIVAX_RS_IS_ALL 0x000 113 - #define ERATIVAX_RS_IS_TID 0x040 114 - #define ERATIVAX_RS_IS_CLASS 0x080 115 - #define ERATIVAX_RS_IS_FULLMATCH 0x0c0 116 - #define ERATIVAX_CLASS_00 0x000 117 - #define ERATIVAX_CLASS_01 0x010 118 - #define ERATIVAX_CLASS_10 0x020 119 - #define ERATIVAX_CLASS_11 0x030 120 - #define ERATIVAX_PSIZE_4K (TLB_PSIZE_4K >> 1) 121 - #define ERATIVAX_PSIZE_64K (TLB_PSIZE_64K >> 1) 122 - #define ERATIVAX_PSIZE_1M (TLB_PSIZE_1M >> 1) 123 - #define ERATIVAX_PSIZE_16M (TLB_PSIZE_16M >> 1) 124 - #define ERATIVAX_PSIZE_1G (TLB_PSIZE_1G >> 1) 125 - 126 - /* A2 eratilx attributes definitions */ 127 - #define ERATILX_T_ALL 0 128 - #define ERATILX_T_TID 1 129 - #define ERATILX_T_TGS 2 130 - #define ERATILX_T_FULLMATCH 3 131 - #define ERATILX_T_CLASS0 4 132 - #define ERATILX_T_CLASS1 5 133 - #define ERATILX_T_CLASS2 6 134 - #define ERATILX_T_CLASS3 7 135 - 136 - /* XUCR0 bits */ 137 - #define XUCR0_TRACE_UM_T0 0x40000000 /* Thread 0 */ 138 - #define XUCR0_TRACE_UM_T1 0x20000000 /* Thread 1 */ 139 - #define XUCR0_TRACE_UM_T2 0x10000000 /* Thread 2 */ 140 - #define XUCR0_TRACE_UM_T3 0x08000000 /* Thread 3 */ 141 - 142 - /* A2 CCR0 register */ 143 - #define A2_CCR0_PME_DISABLED 0x00000000 144 - #define A2_CCR0_PME_SLEEP 0x40000000 145 - #define A2_CCR0_PME_RVW 0x80000000 146 - #define A2_CCR0_PME_DISABLED2 0xc0000000 147 - 148 - /* A2 CCR2 register */ 149 - #define A2_CCR2_ERAT_ONLY_MODE 0x00000001 150 - #define A2_CCR2_ENABLE_ICSWX 0x00000002 151 - #define A2_CCR2_ENABLE_PC 0x20000000 152 - #define A2_CCR2_ENABLE_TRACE 0x40000000 153 - 154 - #endif /* __ASM_POWERPC_REG_A2_H__ */
+51 -40
arch/powerpc/include/asm/rtas.h
··· 3 3 #define _POWERPC_RTAS_H 4 4 #ifdef __KERNEL__ 5 5 6 + #include <linux/mutex.h> 6 7 #include <linux/spinlock.h> 7 8 #include <asm/page.h> 8 9 #include <asm/rtas-types.h> ··· 202 201 /* Memory set aside for sys_rtas to use with calls that need a work area. */ 203 202 #define RTAS_USER_REGION_SIZE (64 * 1024) 204 203 205 - /* RTAS return status codes */ 206 - #define RTAS_HARDWARE_ERROR -1 /* Hardware Error */ 207 - #define RTAS_BUSY -2 /* RTAS Busy */ 208 - #define RTAS_INVALID_PARAMETER -3 /* Invalid indicator/domain/sensor etc. */ 209 - #define RTAS_EXTENDED_DELAY_MIN 9900 210 - #define RTAS_EXTENDED_DELAY_MAX 9905 204 + /* 205 + * Common RTAS function return values, derived from the table "RTAS 206 + * Status Word Values" in PAPR+ v2.13 7.2.8: "Return Codes". If a 207 + * function can return a value in this table then generally it has the 208 + * meaning listed here. More extended commentary in the documentation 209 + * for rtas_call(). 210 + * 211 + * RTAS functions may use negative and positive numbers not in this 212 + * set for function-specific error and success conditions, 213 + * respectively. 214 + */ 215 + #define RTAS_SUCCESS 0 /* Success. */ 216 + #define RTAS_HARDWARE_ERROR -1 /* Hardware or other unspecified error. */ 217 + #define RTAS_BUSY -2 /* Retry immediately. */ 218 + #define RTAS_INVALID_PARAMETER -3 /* Invalid indicator/domain/sensor etc. */ 219 + #define RTAS_UNEXPECTED_STATE_CHANGE -7 /* Seems limited to EEH and slot reset. */ 220 + #define RTAS_EXTENDED_DELAY_MIN 9900 /* Retry after delaying for ~1ms. */ 221 + #define RTAS_EXTENDED_DELAY_MAX 9905 /* Retry after delaying for ~100s. */ 222 + #define RTAS_ML_ISOLATION_ERROR -9000 /* Multi-level isolation error. */ 211 223 212 224 /* statuses specific to ibm,suspend-me */ 213 225 #define RTAS_SUSPEND_ABORTED 9000 /* Suspension aborted */ ··· 282 268 #define RTAS_TYPE_DEALLOC 0xE3 283 269 #define RTAS_TYPE_DUMP 0xE4 284 270 #define RTAS_TYPE_HOTPLUG 0xE5 285 - /* I don't add PowerMGM events right now, this is a different topic */ 271 + /* I don't add PowerMGM events right now, this is a different topic */ 286 272 #define RTAS_TYPE_PMGM_POWER_SW_ON 0x60 287 273 #define RTAS_TYPE_PMGM_POWER_SW_OFF 0x61 288 274 #define RTAS_TYPE_PMGM_LID_OPEN 0x62 ··· 422 408 { 423 409 return rtas_function_token(handle) != RTAS_UNKNOWN_SERVICE; 424 410 } 425 - extern int rtas_token(const char *service); 426 - extern int rtas_service_present(const char *service); 427 - extern int rtas_call(int token, int, int, int *, ...); 411 + int rtas_token(const char *service); 412 + int rtas_call(int token, int nargs, int nret, int *outputs, ...); 428 413 void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, 429 414 int nret, ...); 430 - extern void __noreturn rtas_restart(char *cmd); 431 - extern void rtas_power_off(void); 432 - extern void __noreturn rtas_halt(void); 433 - extern void rtas_os_term(char *str); 415 + void __noreturn rtas_restart(char *cmd); 416 + void rtas_power_off(void); 417 + void __noreturn rtas_halt(void); 418 + void rtas_os_term(char *str); 434 419 void rtas_activate_firmware(void); 435 - extern int rtas_get_sensor(int sensor, int index, int *state); 436 - extern int rtas_get_sensor_fast(int sensor, int index, int *state); 437 - extern int rtas_get_power_level(int powerdomain, int *level); 438 - extern int rtas_set_power_level(int powerdomain, int level, int *setlevel); 439 - extern bool rtas_indicator_present(int token, int *maxindex); 440 - extern int rtas_set_indicator(int indicator, int index, int new_value); 441 - extern int rtas_set_indicator_fast(int indicator, int index, int new_value); 442 - extern void rtas_progress(char *s, unsigned short hex); 420 + int rtas_get_sensor(int sensor, int index, int *state); 421 + int rtas_get_sensor_fast(int sensor, int index, int *state); 422 + int rtas_get_power_level(int powerdomain, int *level); 423 + int rtas_set_power_level(int powerdomain, int level, int *setlevel); 424 + bool rtas_indicator_present(int token, int *maxindex); 425 + int rtas_set_indicator(int indicator, int index, int new_value); 426 + int rtas_set_indicator_fast(int indicator, int index, int new_value); 427 + void rtas_progress(char *s, unsigned short hex); 443 428 int rtas_ibm_suspend_me(int *fw_status); 444 429 int rtas_error_rc(int rtas_rc); 445 430 446 431 struct rtc_time; 447 - extern time64_t rtas_get_boot_time(void); 448 - extern void rtas_get_rtc_time(struct rtc_time *rtc_time); 449 - extern int rtas_set_rtc_time(struct rtc_time *rtc_time); 432 + time64_t rtas_get_boot_time(void); 433 + void rtas_get_rtc_time(struct rtc_time *rtc_time); 434 + int rtas_set_rtc_time(struct rtc_time *rtc_time); 450 435 451 - extern unsigned int rtas_busy_delay_time(int status); 436 + unsigned int rtas_busy_delay_time(int status); 452 437 bool rtas_busy_delay(int status); 453 438 454 - extern int early_init_dt_scan_rtas(unsigned long node, 455 - const char *uname, int depth, void *data); 439 + int early_init_dt_scan_rtas(unsigned long node, const char *uname, int depth, void *data); 456 440 457 - extern void pSeries_log_error(char *buf, unsigned int err_type, int fatal); 441 + void pSeries_log_error(char *buf, unsigned int err_type, int fatal); 458 442 459 443 #ifdef CONFIG_PPC_PSERIES 460 444 extern time64_t last_rtas_event; 461 - extern int clobbering_unread_rtas_event(void); 462 - extern void post_mobility_fixup(void); 445 + int clobbering_unread_rtas_event(void); 463 446 int rtas_syscall_dispatch_ibm_suspend_me(u64 handle); 464 447 #else 465 448 static inline int clobbering_unread_rtas_event(void) { return 0; } ··· 467 456 #endif 468 457 469 458 #ifdef CONFIG_PPC_RTAS_DAEMON 470 - extern void rtas_cancel_event_scan(void); 459 + void rtas_cancel_event_scan(void); 471 460 #else 472 461 static inline void rtas_cancel_event_scan(void) { } 473 462 #endif 474 463 475 464 /* Error types logged. */ 476 465 #define ERR_FLAG_ALREADY_LOGGED 0x0 477 - #define ERR_FLAG_BOOT 0x1 /* log was pulled from NVRAM on boot */ 466 + #define ERR_FLAG_BOOT 0x1 /* log was pulled from NVRAM on boot */ 478 467 #define ERR_TYPE_RTAS_LOG 0x2 /* from rtas event-scan */ 479 468 #define ERR_TYPE_KERNEL_PANIC 0x4 /* from die()/panic() */ 480 469 #define ERR_TYPE_KERNEL_PANIC_GZ 0x8 /* ditto, compressed */ ··· 484 473 (ERR_TYPE_RTAS_LOG | ERR_TYPE_KERNEL_PANIC | ERR_TYPE_KERNEL_PANIC_GZ) 485 474 486 475 #define RTAS_DEBUG KERN_DEBUG "RTAS: " 487 - 476 + 488 477 #define RTAS_ERROR_LOG_MAX 2048 489 478 490 479 /* ··· 492 481 * for all rtas calls that require an error buffer argument. 493 482 * This includes 'check-exception' and 'rtas-last-error'. 494 483 */ 495 - extern int rtas_get_error_log_max(void); 484 + int rtas_get_error_log_max(void); 496 485 497 486 /* Event Scan Parameters */ 498 487 #define EVENT_SCAN_ALL_EVENTS 0xf0000000 ··· 513 502 /* RMO buffer reserved for user-space RTAS use */ 514 503 extern unsigned long rtas_rmo_buf; 515 504 505 + extern struct mutex rtas_ibm_get_vpd_lock; 506 + 516 507 #define GLOBAL_INTERRUPT_QUEUE 9005 517 508 518 509 /** ··· 533 520 (devfn << 8) | (reg & 0xff); 534 521 } 535 522 536 - extern void rtas_give_timebase(void); 537 - extern void rtas_take_timebase(void); 523 + void rtas_give_timebase(void); 524 + void rtas_take_timebase(void); 538 525 539 526 #ifdef CONFIG_PPC_RTAS 540 527 static inline int page_is_rtas_user_buf(unsigned long pfn) ··· 547 534 548 535 /* Not the best place to put pSeries_coalesce_init, will be fixed when we 549 536 * move some of the rtas suspend-me stuff to pseries */ 550 - extern void pSeries_coalesce_init(void); 537 + void pSeries_coalesce_init(void); 551 538 void rtas_initialize(void); 552 539 #else 553 540 static inline int page_is_rtas_user_buf(unsigned long pfn) { return 0;} 554 541 static inline void pSeries_coalesce_init(void) { } 555 542 static inline void rtas_initialize(void) { } 556 543 #endif 557 - 558 - extern int call_rtas(const char *, int, int, unsigned long *, ...); 559 544 560 545 #ifdef CONFIG_HV_PERF_CTRS 561 546 void read_24x7_sys_info(void);
+9
arch/powerpc/include/uapi/asm/papr-miscdev.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 + #ifndef _UAPI_PAPR_MISCDEV_H_ 3 + #define _UAPI_PAPR_MISCDEV_H_ 4 + 5 + enum { 6 + PAPR_MISCDEV_IOC_ID = 0xb2, 7 + }; 8 + 9 + #endif /* _UAPI_PAPR_MISCDEV_H_ */
+58
arch/powerpc/include/uapi/asm/papr-sysparm.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 + #ifndef _UAPI_PAPR_SYSPARM_H_ 3 + #define _UAPI_PAPR_SYSPARM_H_ 4 + 5 + #include <linux/types.h> 6 + #include <asm/ioctl.h> 7 + #include <asm/papr-miscdev.h> 8 + 9 + enum { 10 + PAPR_SYSPARM_MAX_INPUT = 1024, 11 + PAPR_SYSPARM_MAX_OUTPUT = 4000, 12 + }; 13 + 14 + struct papr_sysparm_io_block { 15 + __u32 parameter; 16 + __u16 length; 17 + char data[PAPR_SYSPARM_MAX_OUTPUT]; 18 + }; 19 + 20 + /** 21 + * PAPR_SYSPARM_IOC_GET - Retrieve the value of a PAPR system parameter. 22 + * 23 + * Uses _IOWR because of one corner case: Retrieving the value of the 24 + * "OS Service Entitlement Status" parameter (60) requires the caller 25 + * to supply input data (a date string) in the buffer passed to 26 + * firmware. So the @length and @data of the incoming 27 + * papr_sysparm_io_block are always used to initialize the work area 28 + * supplied to ibm,get-system-parameter. No other parameters are known 29 + * to parameterize the result this way, and callers are encouraged 30 + * (but not required) to zero-initialize @length and @data in the 31 + * common case. 32 + * 33 + * On error the contents of the ioblock are indeterminate. 34 + * 35 + * Return: 36 + * 0: Success; @length is the length of valid data in @data, not to exceed @PAPR_SYSPARM_MAX_OUTPUT. 37 + * -EIO: Platform error. (-1) 38 + * -EINVAL: Incorrect data length or format. (-9999) 39 + * -EPERM: The calling partition is not allowed to access this parameter. (-9002) 40 + * -EOPNOTSUPP: Parameter not supported on this platform (-3) 41 + */ 42 + #define PAPR_SYSPARM_IOC_GET _IOWR(PAPR_MISCDEV_IOC_ID, 1, struct papr_sysparm_io_block) 43 + 44 + /** 45 + * PAPR_SYSPARM_IOC_SET - Update the value of a PAPR system parameter. 46 + * 47 + * The contents of the ioblock are unchanged regardless of success. 48 + * 49 + * Return: 50 + * 0: Success; the parameter has been updated. 51 + * -EIO: Platform error. (-1) 52 + * -EINVAL: Incorrect data length or format. (-9999) 53 + * -EPERM: The calling partition is not allowed to access this parameter. (-9002) 54 + * -EOPNOTSUPP: Parameter not supported on this platform (-3) 55 + */ 56 + #define PAPR_SYSPARM_IOC_SET _IOW(PAPR_MISCDEV_IOC_ID, 2, struct papr_sysparm_io_block) 57 + 58 + #endif /* _UAPI_PAPR_SYSPARM_H_ */
+22
arch/powerpc/include/uapi/asm/papr-vpd.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 + #ifndef _UAPI_PAPR_VPD_H_ 3 + #define _UAPI_PAPR_VPD_H_ 4 + 5 + #include <asm/ioctl.h> 6 + #include <asm/papr-miscdev.h> 7 + 8 + struct papr_location_code { 9 + /* 10 + * PAPR+ v2.13 12.3.2.4 Converged Location Code Rules - Length 11 + * Restrictions. 79 characters plus nul. 12 + */ 13 + char str[80]; 14 + }; 15 + 16 + /* 17 + * ioctl for /dev/papr-vpd. Returns a VPD handle fd corresponding to 18 + * the location code. 19 + */ 20 + #define PAPR_VPD_IOC_CREATE_HANDLE _IOW(PAPR_MISCDEV_IOC_ID, 0, struct papr_location_code) 21 + 22 + #endif /* _UAPI_PAPR_VPD_H_ */
+15
arch/powerpc/kernel/cpu_specs_book3s_64.h
··· 238 238 .machine_check_early = __machine_check_early_realmode_p8, 239 239 .platform = "power8", 240 240 }, 241 + { /* 2.07-compliant processor, HeXin C2000 processor */ 242 + .pvr_mask = 0xffff0000, 243 + .pvr_value = 0x00660000, 244 + .cpu_name = "HX-C2000", 245 + .cpu_features = CPU_FTRS_POWER8, 246 + .cpu_user_features = COMMON_USER_POWER8, 247 + .cpu_user_features2 = COMMON_USER2_POWER8, 248 + .mmu_features = MMU_FTRS_POWER8, 249 + .icache_bsize = 128, 250 + .dcache_bsize = 128, 251 + .cpu_setup = __setup_cpu_power8, 252 + .cpu_restore = __restore_cpu_power8, 253 + .machine_check_early = __machine_check_early_realmode_p8, 254 + .platform = "power8", 255 + }, 241 256 { /* 3.00-compliant processor, i.e. Power9 "architected" mode */ 242 257 .pvr_mask = 0xffffffff, 243 258 .pvr_value = 0x0f000005,
+2 -2
arch/powerpc/kernel/cputable.c
··· 20 20 #include <asm/setup.h> 21 21 #include <asm/cpu_setup.h> 22 22 23 - static struct cpu_spec the_cpu_spec __read_mostly; 23 + static struct cpu_spec the_cpu_spec __ro_after_init; 24 24 25 - struct cpu_spec* cur_cpu_spec __read_mostly = NULL; 25 + struct cpu_spec *cur_cpu_spec __ro_after_init = NULL; 26 26 EXPORT_SYMBOL(cur_cpu_spec); 27 27 28 28 /* The platform string corresponding to the real PVR */
-1
arch/powerpc/kernel/exceptions-64e.S
··· 14 14 #include <asm/cputable.h> 15 15 #include <asm/setup.h> 16 16 #include <asm/thread_info.h> 17 - #include <asm/reg_a2.h> 18 17 #include <asm/exception-64e.h> 19 18 #include <asm/bug.h> 20 19 #include <asm/irqflags.h>
+159 -50
arch/powerpc/kernel/rtas.c
··· 18 18 #include <linux/kernel.h> 19 19 #include <linux/lockdep.h> 20 20 #include <linux/memblock.h> 21 + #include <linux/mutex.h> 21 22 #include <linux/of.h> 22 23 #include <linux/of_fdt.h> 23 24 #include <linux/reboot.h> ··· 71 70 * ppc64le, and we want to keep it that way. It does 72 71 * not make sense for this to be set when @filter 73 72 * is NULL. 73 + * @lock: Pointer to an optional dedicated per-function mutex. This 74 + * should be set for functions that require multiple calls in 75 + * sequence to complete a single operation, and such sequences 76 + * will disrupt each other if allowed to interleave. Users of 77 + * this function are required to hold the associated lock for 78 + * the duration of the call sequence. Add an explanatory 79 + * comment to the function table entry if setting this member. 74 80 */ 75 81 struct rtas_function { 76 82 s32 token; 77 83 const bool banned_for_syscall_on_le:1; 78 84 const char * const name; 79 85 const struct rtas_filter *filter; 86 + struct mutex *lock; 80 87 }; 88 + 89 + /* 90 + * Per-function locks for sequence-based RTAS functions. 91 + */ 92 + static DEFINE_MUTEX(rtas_ibm_activate_firmware_lock); 93 + static DEFINE_MUTEX(rtas_ibm_get_dynamic_sensor_state_lock); 94 + static DEFINE_MUTEX(rtas_ibm_get_indices_lock); 95 + static DEFINE_MUTEX(rtas_ibm_lpar_perftools_lock); 96 + static DEFINE_MUTEX(rtas_ibm_physical_attestation_lock); 97 + static DEFINE_MUTEX(rtas_ibm_set_dynamic_indicator_lock); 98 + DEFINE_MUTEX(rtas_ibm_get_vpd_lock); 81 99 82 100 static struct rtas_function rtas_function_table[] __ro_after_init = { 83 101 [RTAS_FNIDX__CHECK_EXCEPTION] = { ··· 145 125 .buf_idx1 = -1, .size_idx1 = -1, 146 126 .buf_idx2 = -1, .size_idx2 = -1, 147 127 }, 128 + /* 129 + * PAPR+ as of v2.13 doesn't explicitly impose any 130 + * restriction, but this typically requires multiple 131 + * calls before success, and there's no reason to 132 + * allow sequences to interleave. 133 + */ 134 + .lock = &rtas_ibm_activate_firmware_lock, 148 135 }, 149 136 [RTAS_FNIDX__IBM_CBE_START_PTCAL] = { 150 137 .name = "ibm,cbe-start-ptcal", ··· 223 196 .buf_idx1 = 1, .size_idx1 = -1, 224 197 .buf_idx2 = -1, .size_idx2 = -1, 225 198 }, 199 + /* 200 + * PAPR+ v2.13 R1–7.3.19–3 is explicit that the OS 201 + * must not call ibm,get-dynamic-sensor-state with 202 + * different inputs until a non-retry status has been 203 + * returned. 204 + */ 205 + .lock = &rtas_ibm_get_dynamic_sensor_state_lock, 226 206 }, 227 207 [RTAS_FNIDX__IBM_GET_INDICES] = { 228 208 .name = "ibm,get-indices", ··· 237 203 .buf_idx1 = 2, .size_idx1 = 3, 238 204 .buf_idx2 = -1, .size_idx2 = -1, 239 205 }, 206 + /* 207 + * PAPR+ v2.13 R1–7.3.17–2 says that the OS must not 208 + * interleave ibm,get-indices call sequences with 209 + * different inputs. 210 + */ 211 + .lock = &rtas_ibm_get_indices_lock, 240 212 }, 241 213 [RTAS_FNIDX__IBM_GET_RIO_TOPOLOGY] = { 242 214 .name = "ibm,get-rio-topology", ··· 260 220 .buf_idx1 = 0, .size_idx1 = -1, 261 221 .buf_idx2 = 1, .size_idx2 = 2, 262 222 }, 223 + /* 224 + * PAPR+ v2.13 R1–7.3.20–4 indicates that sequences 225 + * should not be allowed to interleave. 226 + */ 227 + .lock = &rtas_ibm_get_vpd_lock, 263 228 }, 264 229 [RTAS_FNIDX__IBM_GET_XIVE] = { 265 230 .name = "ibm,get-xive", ··· 284 239 .buf_idx1 = 2, .size_idx1 = 3, 285 240 .buf_idx2 = -1, .size_idx2 = -1, 286 241 }, 242 + /* 243 + * PAPR+ v2.13 R1–7.3.26–6 says the OS should allow 244 + * only one call sequence in progress at a time. 245 + */ 246 + .lock = &rtas_ibm_lpar_perftools_lock, 287 247 }, 288 248 [RTAS_FNIDX__IBM_MANAGE_FLASH_IMAGE] = { 289 249 .name = "ibm,manage-flash-image", ··· 327 277 .buf_idx1 = 0, .size_idx1 = 1, 328 278 .buf_idx2 = -1, .size_idx2 = -1, 329 279 }, 280 + /* 281 + * This follows a sequence-based pattern similar to 282 + * ibm,get-vpd et al. Since PAPR+ restricts 283 + * interleaving call sequences for other functions of 284 + * this style, assume the restriction applies here, 285 + * even though it's not explicit in the spec. 286 + */ 287 + .lock = &rtas_ibm_physical_attestation_lock, 330 288 }, 331 289 [RTAS_FNIDX__IBM_PLATFORM_DUMP] = { 332 290 .name = "ibm,platform-dump", ··· 342 284 .buf_idx1 = 4, .size_idx1 = 5, 343 285 .buf_idx2 = -1, .size_idx2 = -1, 344 286 }, 287 + /* 288 + * PAPR+ v2.13 7.3.3.4.1 indicates that concurrent 289 + * sequences of ibm,platform-dump are allowed if they 290 + * are operating on different dump tags. So leave the 291 + * lock pointer unset for now. This may need 292 + * reconsideration if kernel-internal users appear. 293 + */ 345 294 }, 346 295 [RTAS_FNIDX__IBM_POWER_OFF_UPS] = { 347 296 .name = "ibm,power-off-ups", ··· 391 326 .buf_idx1 = 2, .size_idx1 = -1, 392 327 .buf_idx2 = -1, .size_idx2 = -1, 393 328 }, 329 + /* 330 + * PAPR+ v2.13 R1–7.3.18–3 says the OS must not call 331 + * this function with different inputs until a 332 + * non-retry status has been returned. 333 + */ 334 + .lock = &rtas_ibm_set_dynamic_indicator_lock, 394 335 }, 395 336 [RTAS_FNIDX__IBM_SET_EEH_OPTION] = { 396 337 .name = "ibm,set-eeh-option", ··· 525 454 }, 526 455 }; 527 456 457 + #define for_each_rtas_function(funcp) \ 458 + for (funcp = &rtas_function_table[0]; \ 459 + funcp < &rtas_function_table[ARRAY_SIZE(rtas_function_table)]; \ 460 + ++funcp) 461 + 528 462 /* 529 463 * Nearly all RTAS calls need to be serialized. All uses of the 530 464 * default rtas_args block must hold rtas_lock. ··· 601 525 602 526 static int __init rtas_token_to_function_xarray_init(void) 603 527 { 528 + const struct rtas_function *func; 604 529 int err = 0; 605 530 606 - for (size_t i = 0; i < ARRAY_SIZE(rtas_function_table); ++i) { 607 - const struct rtas_function *func = &rtas_function_table[i]; 531 + for_each_rtas_function(func) { 608 532 const s32 token = func->token; 609 533 610 534 if (token == RTAS_UNKNOWN_SERVICE) ··· 620 544 } 621 545 arch_initcall(rtas_token_to_function_xarray_init); 622 546 547 + /* 548 + * For use by sys_rtas(), where the token value is provided by user 549 + * space and we don't want to warn on failed lookups. 550 + */ 551 + static const struct rtas_function *rtas_token_to_function_untrusted(s32 token) 552 + { 553 + return xa_load(&rtas_token_to_function_xarray, token); 554 + } 555 + 556 + /* 557 + * Reverse lookup for deriving the function descriptor from a 558 + * known-good token value in contexts where the former is not already 559 + * available. @token must be valid, e.g. derived from the result of a 560 + * prior lookup against the function table. 561 + */ 623 562 static const struct rtas_function *rtas_token_to_function(s32 token) 624 563 { 625 564 const struct rtas_function *func; ··· 642 551 if (WARN_ONCE(token < 0, "invalid token %d", token)) 643 552 return NULL; 644 553 645 - func = xa_load(&rtas_token_to_function_xarray, token); 554 + func = rtas_token_to_function_untrusted(token); 555 + if (func) 556 + return func; 557 + /* 558 + * Fall back to linear scan in case the reverse mapping hasn't 559 + * been initialized yet. 560 + */ 561 + if (xa_empty(&rtas_token_to_function_xarray)) { 562 + for_each_rtas_function(func) { 563 + if (func->token == token) 564 + return func; 565 + } 566 + } 646 567 647 - if (WARN_ONCE(!func, "unexpected failed lookup for token %d", token)) 648 - return NULL; 649 - 650 - return func; 568 + WARN_ONCE(true, "unexpected failed lookup for token %d", token); 569 + return NULL; 651 570 } 652 571 653 572 /* This is here deliberately so it's only used in this file */ ··· 671 570 672 571 static void __do_enter_rtas_trace(struct rtas_args *args) 673 572 { 674 - const char *name = NULL; 573 + const struct rtas_function *func = rtas_token_to_function(be32_to_cpu(args->token)); 574 + 575 + /* 576 + * If there is a per-function lock, it must be held by the 577 + * caller. 578 + */ 579 + if (func->lock) 580 + lockdep_assert_held(func->lock); 675 581 676 582 if (args == &rtas_args) 677 583 lockdep_assert_held(&rtas_lock); 678 - /* 679 - * If the tracepoints that consume the function name aren't 680 - * active, avoid the lookup. 681 - */ 682 - if ((trace_rtas_input_enabled() || trace_rtas_output_enabled())) { 683 - const s32 token = be32_to_cpu(args->token); 684 - const struct rtas_function *func = rtas_token_to_function(token); 685 584 686 - name = func->name; 687 - } 688 - 689 - trace_rtas_input(args, name); 585 + trace_rtas_input(args, func->name); 690 586 trace_rtas_ll_entry(args); 691 587 692 588 __do_enter_rtas(args); 693 589 694 590 trace_rtas_ll_exit(args); 695 - trace_rtas_output(args, name); 591 + trace_rtas_output(args, func->name); 696 592 } 697 593 698 594 static void do_enter_rtas(struct rtas_args *args) ··· 768 670 static int pending_newline = 0; /* did last write end with unprinted newline? */ 769 671 static int width = 16; 770 672 771 - if (c == '\n') { 673 + if (c == '\n') { 772 674 while (width-- > 0) 773 675 call_rtas_display_status(' '); 774 676 width = 16; ··· 778 680 if (pending_newline) { 779 681 call_rtas_display_status('\r'); 780 682 call_rtas_display_status('\n'); 781 - } 683 + } 782 684 pending_newline = 0; 783 685 if (width--) { 784 686 call_rtas_display_status(c); ··· 918 820 else 919 821 rtas_call(display_character, 1, 1, NULL, '\r'); 920 822 } 921 - 823 + 922 824 if (row_width) 923 825 width = row_width[current_line]; 924 826 else ··· 938 840 spin_unlock(&progress_lock); 939 841 return; 940 842 } 941 - 843 + 942 844 /* RTAS wants CR-LF, not just LF */ 943 - 845 + 944 846 if (*os == '\n') { 945 847 rtas_call(display_character, 1, 1, NULL, '\r'); 946 848 rtas_call(display_character, 1, 1, NULL, '\n'); ··· 950 852 */ 951 853 rtas_call(display_character, 1, 1, NULL, *os); 952 854 } 953 - 855 + 954 856 if (row_width) 955 857 width = row_width[current_line]; 956 858 else ··· 959 861 width--; 960 862 rtas_call(display_character, 1, 1, NULL, *os); 961 863 } 962 - 864 + 963 865 os++; 964 - 866 + 965 867 /* if we overwrite the screen length */ 966 868 if (width <= 0) 967 869 while ((*os != 0) && (*os != '\n') && (*os != '\r')) 968 870 os++; 969 871 } 970 - 872 + 971 873 spin_unlock(&progress_lock); 972 874 } 973 875 EXPORT_SYMBOL_GPL(rtas_progress); /* needed by rtas_flash module */ ··· 997 899 return tokp ? be32_to_cpu(*tokp) : RTAS_UNKNOWN_SERVICE; 998 900 } 999 901 EXPORT_SYMBOL_GPL(rtas_token); 1000 - 1001 - int rtas_service_present(const char *service) 1002 - { 1003 - return rtas_token(service) != RTAS_UNKNOWN_SERVICE; 1004 - } 1005 902 1006 903 #ifdef CONFIG_RTAS_ERROR_LOGGING 1007 904 ··· 1731 1638 return; 1732 1639 } 1733 1640 1641 + mutex_lock(&rtas_ibm_activate_firmware_lock); 1642 + 1734 1643 do { 1735 1644 fwrc = rtas_call(token, 0, 1, NULL); 1736 1645 } while (rtas_busy_delay(fwrc)); 1646 + 1647 + mutex_unlock(&rtas_ibm_activate_firmware_lock); 1737 1648 1738 1649 if (fwrc) 1739 1650 pr_err("ibm,activate-firmware failed (%i)\n", fwrc); ··· 1810 1713 end < (rtas_rmo_buf + RTAS_USER_REGION_SIZE); 1811 1714 } 1812 1715 1813 - static bool block_rtas_call(int token, int nargs, 1716 + static bool block_rtas_call(const struct rtas_function *func, int nargs, 1814 1717 struct rtas_args *args) 1815 1718 { 1816 - const struct rtas_function *func; 1817 1719 const struct rtas_filter *f; 1818 - const bool is_platform_dump = token == rtas_function_token(RTAS_FN_IBM_PLATFORM_DUMP); 1819 - const bool is_config_conn = token == rtas_function_token(RTAS_FN_IBM_CONFIGURE_CONNECTOR); 1720 + const bool is_platform_dump = 1721 + func == &rtas_function_table[RTAS_FNIDX__IBM_PLATFORM_DUMP]; 1722 + const bool is_config_conn = 1723 + func == &rtas_function_table[RTAS_FNIDX__IBM_CONFIGURE_CONNECTOR]; 1820 1724 u32 base, size, end; 1821 1725 1822 1726 /* 1823 - * If this token doesn't correspond to a function the kernel 1824 - * understands, you're not allowed to call it. 1825 - */ 1826 - func = rtas_token_to_function(token); 1827 - if (!func) 1828 - goto err; 1829 - /* 1830 - * And only functions with filters attached are allowed. 1727 + * Only functions with filters attached are allowed. 1831 1728 */ 1832 1729 f = func->filter; 1833 1730 if (!f) ··· 1878 1787 return false; 1879 1788 err: 1880 1789 pr_err_ratelimited("sys_rtas: RTAS call blocked - exploit attempt?\n"); 1881 - pr_err_ratelimited("sys_rtas: token=0x%x, nargs=%d (called by %s)\n", 1882 - token, nargs, current->comm); 1790 + pr_err_ratelimited("sys_rtas: %s nargs=%d (called by %s)\n", 1791 + func->name, nargs, current->comm); 1883 1792 return true; 1884 1793 } 1885 1794 1886 1795 /* We assume to be passed big endian arguments */ 1887 1796 SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs) 1888 1797 { 1798 + const struct rtas_function *func; 1889 1799 struct pin_cookie cookie; 1890 1800 struct rtas_args args; 1891 1801 unsigned long flags; ··· 1916 1824 nargs * sizeof(rtas_arg_t)) != 0) 1917 1825 return -EFAULT; 1918 1826 1919 - if (token == RTAS_UNKNOWN_SERVICE) 1827 + /* 1828 + * If this token doesn't correspond to a function the kernel 1829 + * understands, you're not allowed to call it. 1830 + */ 1831 + func = rtas_token_to_function_untrusted(token); 1832 + if (!func) 1920 1833 return -EINVAL; 1921 1834 1922 1835 args.rets = &args.args[nargs]; 1923 1836 memset(args.rets, 0, nret * sizeof(rtas_arg_t)); 1924 1837 1925 - if (block_rtas_call(token, nargs, &args)) 1838 + if (block_rtas_call(func, nargs, &args)) 1926 1839 return -EINVAL; 1927 1840 1928 1841 if (token_is_restricted_errinjct(token)) { ··· 1960 1863 1961 1864 buff_copy = get_errorlog_buffer(); 1962 1865 1866 + /* 1867 + * If this function has a mutex assigned to it, we must 1868 + * acquire it to avoid interleaving with any kernel-based uses 1869 + * of the same function. Kernel-based sequences acquire the 1870 + * appropriate mutex explicitly. 1871 + */ 1872 + if (func->lock) 1873 + mutex_lock(func->lock); 1874 + 1963 1875 raw_spin_lock_irqsave(&rtas_lock, flags); 1964 1876 cookie = lockdep_pin_lock(&rtas_lock); 1965 1877 ··· 1983 1877 1984 1878 lockdep_unpin_lock(&rtas_lock, cookie); 1985 1879 raw_spin_unlock_irqrestore(&rtas_lock, flags); 1880 + 1881 + if (func->lock) 1882 + mutex_unlock(func->lock); 1986 1883 1987 1884 if (buff_copy) { 1988 1885 if (errbuf)
+4 -4
arch/powerpc/kernel/rtas_pci.c
··· 43 43 return 0; 44 44 } 45 45 46 - int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val) 46 + int rtas_pci_dn_read_config(struct pci_dn *pdn, int where, int size, u32 *val) 47 47 { 48 48 int returnval = -1; 49 49 unsigned long buid, addr; ··· 87 87 pdn = pci_get_pdn_by_devfn(bus, devfn); 88 88 89 89 /* Validity of pdn is checked in here */ 90 - ret = rtas_read_config(pdn, where, size, val); 90 + ret = rtas_pci_dn_read_config(pdn, where, size, val); 91 91 if (*val == EEH_IO_ERROR_VALUE(size) && 92 92 eeh_dev_check_failure(pdn_to_eeh_dev(pdn))) 93 93 return PCIBIOS_DEVICE_NOT_FOUND; ··· 95 95 return ret; 96 96 } 97 97 98 - int rtas_write_config(struct pci_dn *pdn, int where, int size, u32 val) 98 + int rtas_pci_dn_write_config(struct pci_dn *pdn, int where, int size, u32 val) 99 99 { 100 100 unsigned long buid, addr; 101 101 int ret; ··· 134 134 pdn = pci_get_pdn_by_devfn(bus, devfn); 135 135 136 136 /* Validity of pdn is checked in here. */ 137 - return rtas_write_config(pdn, where, size, val); 137 + return rtas_pci_dn_write_config(pdn, where, size, val); 138 138 } 139 139 140 140 static struct pci_ops rtas_pci_ops = {
+71 -55
arch/powerpc/kernel/smp.c
··· 77 77 #endif 78 78 79 79 struct task_struct *secondary_current; 80 - bool has_big_cores; 81 - bool coregroup_enabled; 82 - bool thread_group_shares_l2; 83 - bool thread_group_shares_l3; 80 + bool has_big_cores __ro_after_init; 81 + bool coregroup_enabled __ro_after_init; 82 + bool thread_group_shares_l2 __ro_after_init; 83 + bool thread_group_shares_l3 __ro_after_init; 84 84 85 85 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); 86 86 DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map); ··· 92 92 EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map); 93 93 EXPORT_PER_CPU_SYMBOL(cpu_core_map); 94 94 EXPORT_SYMBOL_GPL(has_big_cores); 95 - 96 - enum { 97 - #ifdef CONFIG_SCHED_SMT 98 - smt_idx, 99 - #endif 100 - cache_idx, 101 - mc_idx, 102 - die_idx, 103 - }; 104 95 105 96 #define MAX_THREAD_LIST_SIZE 8 106 97 #define THREAD_GROUP_SHARE_L1 1 ··· 978 987 return 0; 979 988 } 980 989 981 - static bool shared_caches; 990 + static bool shared_caches __ro_after_init; 982 991 983 992 #ifdef CONFIG_SCHED_SMT 984 993 /* cpumask of CPUs with asymmetric SMT dependency */ ··· 995 1004 #endif 996 1005 997 1006 /* 1007 + * On shared processor LPARs scheduled on a big core (which has two or more 1008 + * independent thread groups per core), prefer lower numbered CPUs, so 1009 + * that workload consolidates to lesser number of cores. 1010 + */ 1011 + static __ro_after_init DEFINE_STATIC_KEY_FALSE(splpar_asym_pack); 1012 + 1013 + /* 998 1014 * P9 has a slightly odd architecture where pairs of cores share an L2 cache. 999 1015 * This topology makes it *much* cheaper to migrate tasks between adjacent cores 1000 1016 * since the migrated task remains cache hot. We want to take advantage of this ··· 1009 1011 */ 1010 1012 static int powerpc_shared_cache_flags(void) 1011 1013 { 1014 + if (static_branch_unlikely(&splpar_asym_pack)) 1015 + return SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING; 1016 + 1012 1017 return SD_SHARE_PKG_RESOURCES; 1018 + } 1019 + 1020 + static int powerpc_shared_proc_flags(void) 1021 + { 1022 + if (static_branch_unlikely(&splpar_asym_pack)) 1023 + return SD_ASYM_PACKING; 1024 + 1025 + return 0; 1013 1026 } 1014 1027 1015 1028 /* ··· 1046 1037 1047 1038 static bool has_coregroup_support(void) 1048 1039 { 1040 + /* Coregroup identification not available on shared systems */ 1041 + if (is_shared_processor()) 1042 + return 0; 1043 + 1049 1044 return coregroup_enabled; 1050 1045 } 1051 1046 ··· 1057 1044 { 1058 1045 return cpu_coregroup_mask(cpu); 1059 1046 } 1060 - 1061 - static struct sched_domain_topology_level powerpc_topology[] = { 1062 - #ifdef CONFIG_SCHED_SMT 1063 - { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, 1064 - #endif 1065 - { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) }, 1066 - { cpu_mc_mask, SD_INIT_NAME(MC) }, 1067 - { cpu_cpu_mask, SD_INIT_NAME(PKG) }, 1068 - { NULL, }, 1069 - }; 1070 1047 1071 1048 static int __init init_big_cores(void) 1072 1049 { ··· 1685 1682 BUG(); 1686 1683 } 1687 1684 1688 - static void __init fixup_topology(void) 1685 + static struct sched_domain_topology_level powerpc_topology[6]; 1686 + 1687 + static void __init build_sched_topology(void) 1689 1688 { 1690 - int i; 1689 + int i = 0; 1690 + 1691 + if (is_shared_processor() && has_big_cores) 1692 + static_branch_enable(&splpar_asym_pack); 1691 1693 1692 1694 #ifdef CONFIG_SCHED_SMT 1693 1695 if (has_big_cores) { 1694 1696 pr_info("Big cores detected but using small core scheduling\n"); 1695 - powerpc_topology[smt_idx].mask = smallcore_smt_mask; 1697 + powerpc_topology[i++] = (struct sched_domain_topology_level){ 1698 + smallcore_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) 1699 + }; 1700 + } else { 1701 + powerpc_topology[i++] = (struct sched_domain_topology_level){ 1702 + cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) 1703 + }; 1696 1704 } 1697 1705 #endif 1698 - 1699 - if (!has_coregroup_support()) 1700 - powerpc_topology[mc_idx].mask = powerpc_topology[cache_idx].mask; 1701 - 1702 - /* 1703 - * Try to consolidate topology levels here instead of 1704 - * allowing scheduler to degenerate. 1705 - * - Dont consolidate if masks are different. 1706 - * - Dont consolidate if sd_flags exists and are different. 1707 - */ 1708 - for (i = 1; i <= die_idx; i++) { 1709 - if (powerpc_topology[i].mask != powerpc_topology[i - 1].mask) 1710 - continue; 1711 - 1712 - if (powerpc_topology[i].sd_flags && powerpc_topology[i - 1].sd_flags && 1713 - powerpc_topology[i].sd_flags != powerpc_topology[i - 1].sd_flags) 1714 - continue; 1715 - 1716 - if (!powerpc_topology[i - 1].sd_flags) 1717 - powerpc_topology[i - 1].sd_flags = powerpc_topology[i].sd_flags; 1718 - 1719 - powerpc_topology[i].mask = powerpc_topology[i + 1].mask; 1720 - powerpc_topology[i].sd_flags = powerpc_topology[i + 1].sd_flags; 1721 - #ifdef CONFIG_SCHED_DEBUG 1722 - powerpc_topology[i].name = powerpc_topology[i + 1].name; 1723 - #endif 1706 + if (shared_caches) { 1707 + powerpc_topology[i++] = (struct sched_domain_topology_level){ 1708 + shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) 1709 + }; 1724 1710 } 1711 + if (has_coregroup_support()) { 1712 + powerpc_topology[i++] = (struct sched_domain_topology_level){ 1713 + cpu_mc_mask, powerpc_shared_proc_flags, SD_INIT_NAME(MC) 1714 + }; 1715 + } 1716 + powerpc_topology[i++] = (struct sched_domain_topology_level){ 1717 + cpu_cpu_mask, powerpc_shared_proc_flags, SD_INIT_NAME(PKG) 1718 + }; 1719 + 1720 + /* There must be one trailing NULL entry left. */ 1721 + BUG_ON(i >= ARRAY_SIZE(powerpc_topology) - 1); 1722 + 1723 + set_sched_topology(powerpc_topology); 1725 1724 } 1726 1725 1727 1726 void __init smp_cpus_done(unsigned int max_cpus) ··· 1738 1733 smp_ops->bringup_done(); 1739 1734 1740 1735 dump_numa_cpu_topology(); 1736 + build_sched_topology(); 1737 + } 1741 1738 1742 - fixup_topology(); 1743 - set_sched_topology(powerpc_topology); 1739 + /* 1740 + * For asym packing, by default lower numbered CPU has higher priority. 1741 + * On shared processors, pack to lower numbered core. However avoid moving 1742 + * between thread_groups within the same core. 1743 + */ 1744 + int arch_asym_cpu_priority(int cpu) 1745 + { 1746 + if (static_branch_unlikely(&splpar_asym_pack)) 1747 + return -cpu / threads_per_core; 1748 + 1749 + return -cpu; 1744 1750 } 1745 1751 1746 1752 #ifdef CONFIG_HOTPLUG_CPU
+2
arch/powerpc/kernel/swsusp_64.c
··· 11 11 #include <linux/interrupt.h> 12 12 #include <linux/nmi.h> 13 13 14 + void do_after_copyback(void); 15 + 14 16 void do_after_copyback(void) 15 17 { 16 18 iommu_restore();
-2
arch/powerpc/kernel/trace/ftrace_entry.S
··· 162 162 .globl ftrace_regs_call 163 163 ftrace_regs_call: 164 164 bl ftrace_stub 165 - nop 166 165 ftrace_regs_exit 1 167 166 168 167 _GLOBAL(ftrace_caller) ··· 170 171 .globl ftrace_call 171 172 ftrace_call: 172 173 bl ftrace_stub 173 - nop 174 174 ftrace_regs_exit 0 175 175 176 176 _GLOBAL(ftrace_stub)
+2
arch/powerpc/kernel/traps.c
··· 1439 1439 return -EINVAL; 1440 1440 } 1441 1441 1442 + #ifdef CONFIG_GENERIC_BUG 1442 1443 int is_valid_bugaddr(unsigned long addr) 1443 1444 { 1444 1445 return is_kernel_addr(addr); 1445 1446 } 1447 + #endif 1446 1448 1447 1449 #ifdef CONFIG_MATH_EMULATION 1448 1450 static int emulate_math(struct pt_regs *regs)
-1
arch/powerpc/kernel/udbg_16550.c
··· 7 7 #include <linux/types.h> 8 8 #include <asm/udbg.h> 9 9 #include <asm/io.h> 10 - #include <asm/reg_a2.h> 11 10 #include <asm/early_ioremap.h> 12 11 13 12 extern u8 real_readb(volatile u8 __iomem *addr);
+1 -1
arch/powerpc/kernel/vdso/Makefile
··· 71 71 targets += vdso32.lds 72 72 CPPFLAGS_vdso32.lds += -P -C -Upowerpc 73 73 targets += vdso64.lds 74 - CPPFLAGS_vdso64.lds += -P -C -U$(ARCH) 74 + CPPFLAGS_vdso64.lds += -P -C 75 75 76 76 # link rule for the .so file, .lds has to be first 77 77 $(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) $(obj)/vgettimeofday-32.o FORCE
+1
arch/powerpc/kexec/core.c
··· 75 75 VMCOREINFO_OFFSET(mmu_psize_def, shift); 76 76 #endif 77 77 VMCOREINFO_SYMBOL(cur_cpu_spec); 78 + VMCOREINFO_OFFSET(cpu_spec, cpu_features); 78 79 VMCOREINFO_OFFSET(cpu_spec, mmu_features); 79 80 vmcoreinfo_append_str("NUMBER(RADIX_MMU)=%d\n", early_radix_enabled()); 80 81 vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
+2 -2
arch/powerpc/kvm/book3s.c
··· 302 302 303 303 switch (priority) { 304 304 case BOOK3S_IRQPRIO_DECREMENTER: 305 - deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; 305 + deliver = !kvmhv_is_nestedv2() && (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; 306 306 vec = BOOK3S_INTERRUPT_DECREMENTER; 307 307 break; 308 308 case BOOK3S_IRQPRIO_EXTERNAL: 309 - deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; 309 + deliver = !kvmhv_is_nestedv2() && (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; 310 310 vec = BOOK3S_INTERRUPT_EXTERNAL; 311 311 break; 312 312 case BOOK3S_IRQPRIO_SYSTEM_RESET:
+6 -1
arch/powerpc/kvm/book3s_64_mmu_radix.c
··· 40 40 unsigned long quadrant, ret = n; 41 41 bool is_load = !!to; 42 42 43 + if (kvmhv_is_nestedv2()) 44 + return H_UNSUPPORTED; 45 + 43 46 /* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */ 44 47 if (kvmhv_on_pseries()) 45 48 return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr, ··· 100 97 void *to, void *from, unsigned long n) 101 98 { 102 99 int lpid = vcpu->kvm->arch.lpid; 103 - int pid = kvmppc_get_pid(vcpu); 100 + int pid; 104 101 105 102 /* This would cause a data segment intr so don't allow the access */ 106 103 if (eaddr & (0x3FFUL << 52)) ··· 113 110 /* If accessing quadrant 3 then pid is expected to be 0 */ 114 111 if (((eaddr >> 62) & 0x3) == 0x3) 115 112 pid = 0; 113 + else 114 + pid = kvmppc_get_pid(vcpu); 116 115 117 116 eaddr &= ~(0xFFFUL << 52); 118 117
+51 -21
arch/powerpc/kvm/book3s_hv.c
··· 650 650 return err; 651 651 } 652 652 653 - static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap) 653 + static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap, 654 + struct kvmppc_vpa *old_vpap) 654 655 { 655 656 struct kvm *kvm = vcpu->kvm; 656 657 void *va; ··· 691 690 kvmppc_unpin_guest_page(kvm, va, gpa, false); 692 691 va = NULL; 693 692 } 694 - if (vpap->pinned_addr) 695 - kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa, 696 - vpap->dirty); 693 + *old_vpap = *vpap; 694 + 697 695 vpap->gpa = gpa; 698 696 vpap->pinned_addr = va; 699 697 vpap->dirty = false; ··· 702 702 703 703 static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) 704 704 { 705 + struct kvm *kvm = vcpu->kvm; 706 + struct kvmppc_vpa old_vpa = { 0 }; 707 + 705 708 if (!(vcpu->arch.vpa.update_pending || 706 709 vcpu->arch.slb_shadow.update_pending || 707 710 vcpu->arch.dtl.update_pending)) ··· 712 709 713 710 spin_lock(&vcpu->arch.vpa_update_lock); 714 711 if (vcpu->arch.vpa.update_pending) { 715 - kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); 716 - if (vcpu->arch.vpa.pinned_addr) 712 + kvmppc_update_vpa(vcpu, &vcpu->arch.vpa, &old_vpa); 713 + if (old_vpa.pinned_addr) { 714 + if (kvmhv_is_nestedv2()) 715 + kvmhv_nestedv2_set_vpa(vcpu, ~0ull); 716 + kvmppc_unpin_guest_page(kvm, old_vpa.pinned_addr, old_vpa.gpa, 717 + old_vpa.dirty); 718 + } 719 + if (vcpu->arch.vpa.pinned_addr) { 717 720 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); 721 + if (kvmhv_is_nestedv2()) 722 + kvmhv_nestedv2_set_vpa(vcpu, __pa(vcpu->arch.vpa.pinned_addr)); 723 + } 718 724 } 719 725 if (vcpu->arch.dtl.update_pending) { 720 - kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); 726 + kvmppc_update_vpa(vcpu, &vcpu->arch.dtl, &old_vpa); 727 + if (old_vpa.pinned_addr) 728 + kvmppc_unpin_guest_page(kvm, old_vpa.pinned_addr, old_vpa.gpa, 729 + old_vpa.dirty); 721 730 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; 722 731 vcpu->arch.dtl_index = 0; 723 732 } 724 - if (vcpu->arch.slb_shadow.update_pending) 725 - kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow); 733 + if (vcpu->arch.slb_shadow.update_pending) { 734 + kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow, &old_vpa); 735 + if (old_vpa.pinned_addr) 736 + kvmppc_unpin_guest_page(kvm, old_vpa.pinned_addr, old_vpa.gpa, 737 + old_vpa.dirty); 738 + } 739 + 726 740 spin_unlock(&vcpu->arch.vpa_update_lock); 727 741 } 728 742 ··· 1617 1597 * That can happen due to a bug, or due to a machine check 1618 1598 * occurring at just the wrong time. 1619 1599 */ 1620 - if (__kvmppc_get_msr_hv(vcpu) & MSR_HV) { 1600 + if (!kvmhv_is_nestedv2() && (__kvmppc_get_msr_hv(vcpu) & MSR_HV)) { 1621 1601 printk(KERN_EMERG "KVM trap in HV mode!\n"); 1622 1602 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", 1623 1603 vcpu->arch.trap, kvmppc_get_pc(vcpu), ··· 1708 1688 { 1709 1689 int i; 1710 1690 1711 - if (unlikely(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) { 1691 + if (!kvmhv_is_nestedv2() && unlikely(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) { 1712 1692 /* 1713 1693 * Guest userspace executed sc 1. This can only be 1714 1694 * reached by the P9 path because the old path ··· 4104 4084 if (rc < 0) 4105 4085 return -EINVAL; 4106 4086 4087 + kvmppc_gse_put_u64(io->vcpu_run_input, KVMPPC_GSID_LPCR, lpcr); 4088 + 4107 4089 accumulate_time(vcpu, &vcpu->arch.in_guest); 4108 4090 rc = plpar_guest_run_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id, 4109 4091 &trap, &i); ··· 4758 4736 4759 4737 if (!nested) { 4760 4738 kvmppc_core_prepare_to_enter(vcpu); 4761 - if (__kvmppc_get_msr_hv(vcpu) & MSR_EE) { 4762 - if (xive_interrupt_pending(vcpu)) 4739 + if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, 4740 + &vcpu->arch.pending_exceptions) || 4741 + xive_interrupt_pending(vcpu)) { 4742 + /* 4743 + * For nested HV, don't synthesize but always pass MER, 4744 + * the L0 will be able to optimise that more 4745 + * effectively than manipulating registers directly. 4746 + */ 4747 + if (!kvmhv_on_pseries() && (__kvmppc_get_msr_hv(vcpu) & MSR_EE)) 4763 4748 kvmppc_inject_interrupt_hv(vcpu, 4764 - BOOK3S_INTERRUPT_EXTERNAL, 0); 4765 - } else if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, 4766 - &vcpu->arch.pending_exceptions)) { 4767 - lpcr |= LPCR_MER; 4749 + BOOK3S_INTERRUPT_EXTERNAL, 0); 4750 + else 4751 + lpcr |= LPCR_MER; 4768 4752 } 4769 4753 } else if (vcpu->arch.pending_exceptions || 4770 4754 vcpu->arch.doorbell_request || ··· 4834 4806 * entering a nested guest in which case the decrementer is now owned 4835 4807 * by L2 and the L1 decrementer is provided in hdec_expires 4836 4808 */ 4837 - if (kvmppc_core_pending_dec(vcpu) && 4809 + if (!kvmhv_is_nestedv2() && kvmppc_core_pending_dec(vcpu) && 4838 4810 ((tb < kvmppc_dec_expires_host_tb(vcpu)) || 4839 4811 (trap == BOOK3S_INTERRUPT_SYSCALL && 4840 4812 kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED))) ··· 4977 4949 if (run->exit_reason == KVM_EXIT_PAPR_HCALL) { 4978 4950 accumulate_time(vcpu, &vcpu->arch.hcall); 4979 4951 4980 - if (WARN_ON_ONCE(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) { 4952 + if (!kvmhv_is_nestedv2() && WARN_ON_ONCE(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) { 4981 4953 /* 4982 4954 * These should have been caught reflected 4983 4955 * into the guest by now. Final sanity check: ··· 5719 5691 kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); 5720 5692 } 5721 5693 5722 - if (kvmhv_is_nestedv2()) 5694 + if (kvmhv_is_nestedv2()) { 5695 + kvmhv_flush_lpid(kvm->arch.lpid); 5723 5696 plpar_guest_delete(0, kvm->arch.lpid); 5724 - else 5697 + } else { 5725 5698 kvmppc_free_lpid(kvm->arch.lpid); 5699 + } 5726 5700 5727 5701 kvmppc_free_pimap(kvm); 5728 5702 }
+1 -1
arch/powerpc/kvm/book3s_hv_nested.c
··· 503 503 } 504 504 } 505 505 506 - static void kvmhv_flush_lpid(u64 lpid) 506 + void kvmhv_flush_lpid(u64 lpid) 507 507 { 508 508 long rc; 509 509
+29
arch/powerpc/kvm/book3s_hv_nestedv2.c
··· 856 856 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_ptbl_entry); 857 857 858 858 /** 859 + * kvmhv_nestedv2_set_vpa() - register L2 VPA with L0 860 + * @vcpu: vcpu 861 + * @vpa: L1 logical real address 862 + */ 863 + int kvmhv_nestedv2_set_vpa(struct kvm_vcpu *vcpu, unsigned long vpa) 864 + { 865 + struct kvmhv_nestedv2_io *io; 866 + struct kvmppc_gs_buff *gsb; 867 + int rc = 0; 868 + 869 + io = &vcpu->arch.nestedv2_io; 870 + gsb = io->vcpu_run_input; 871 + 872 + kvmppc_gsb_reset(gsb); 873 + rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_VPA, vpa); 874 + if (rc < 0) 875 + goto out; 876 + 877 + rc = kvmppc_gsb_send(gsb, 0); 878 + if (rc < 0) 879 + pr_err("KVM-NESTEDv2: couldn't register the L2 VPA (rc=%d)\n", rc); 880 + 881 + out: 882 + kvmppc_gsb_reset(gsb); 883 + return rc; 884 + } 885 + EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_vpa); 886 + 887 + /** 859 888 * kvmhv_nestedv2_parse_output() - receive values from H_GUEST_RUN_VCPU output 860 889 * @vcpu: vcpu 861 890 *
+1
arch/powerpc/kvm/book3s_pr.c
··· 604 604 case PVR_POWER8: 605 605 case PVR_POWER8E: 606 606 case PVR_POWER8NVL: 607 + case PVR_HX_C2000: 607 608 case PVR_POWER9: 608 609 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE | 609 610 BOOK3S_HFLAG_NEW_TLBIE;
+10 -11
arch/powerpc/kvm/emulate_loadstore.c
··· 93 93 94 94 emulated = EMULATE_FAIL; 95 95 vcpu->arch.regs.msr = kvmppc_get_msr(vcpu); 96 - kvmhv_nestedv2_reload_ptregs(vcpu, &vcpu->arch.regs); 97 96 if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) { 98 97 int type = op.type & INSTR_TYPE_MASK; 99 98 int size = GETSIZE(op.type); ··· 111 112 op.reg, size, !instr_byte_swap); 112 113 113 114 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) 114 - kvmppc_set_gpr(vcpu, op.update_reg, op.ea); 115 + kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed); 115 116 116 117 break; 117 118 } ··· 131 132 KVM_MMIO_REG_FPR|op.reg, size, 1); 132 133 133 134 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) 134 - kvmppc_set_gpr(vcpu, op.update_reg, op.ea); 135 + kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed); 135 136 136 137 break; 137 138 #endif ··· 223 224 break; 224 225 } 225 226 #endif 226 - case STORE: 227 - /* if need byte reverse, op.val has been reversed by 228 - * analyse_instr(). 229 - */ 230 - emulated = kvmppc_handle_store(vcpu, op.val, size, 1); 227 + case STORE: { 228 + int instr_byte_swap = op.type & BYTEREV; 229 + 230 + emulated = kvmppc_handle_store(vcpu, kvmppc_get_gpr(vcpu, op.reg), 231 + size, !instr_byte_swap); 231 232 232 233 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) 233 - kvmppc_set_gpr(vcpu, op.update_reg, op.ea); 234 + kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed); 234 235 235 236 break; 237 + } 236 238 #ifdef CONFIG_PPC_FPU 237 239 case STORE_FP: 238 240 if (kvmppc_check_fp_disabled(vcpu)) ··· 254 254 kvmppc_get_fpr(vcpu, op.reg), size, 1); 255 255 256 256 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) 257 - kvmppc_set_gpr(vcpu, op.update_reg, op.ea); 257 + kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed); 258 258 259 259 break; 260 260 #endif ··· 358 358 } 359 359 360 360 trace_kvm_ppc_instr(ppc_inst_val(inst), kvmppc_get_pc(vcpu), emulated); 361 - kvmhv_nestedv2_mark_dirty_ptregs(vcpu, &vcpu->arch.regs); 362 361 363 362 /* Advance past emulated instruction. */ 364 363 if (emulated != EMULATE_FAIL)
+1 -1
arch/powerpc/lib/Makefile
··· 45 45 # so it is only needed for modules, and only for older linkers which 46 46 # do not support --save-restore-funcs 47 47 ifndef CONFIG_LD_IS_BFD 48 - extra-$(CONFIG_PPC64) += crtsavres.o 48 + always-$(CONFIG_PPC64) += crtsavres.o 49 49 endif 50 50 51 51 obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \
+12 -2
arch/powerpc/lib/sstep.c
··· 586 586 } u; 587 587 588 588 nb = GETSIZE(op->type); 589 + if (nb > sizeof(u)) 590 + return -EINVAL; 589 591 if (!address_ok(regs, ea, nb)) 590 592 return -EFAULT; 591 593 rn = op->reg; ··· 638 636 } u; 639 637 640 638 nb = GETSIZE(op->type); 639 + if (nb > sizeof(u)) 640 + return -EINVAL; 641 641 if (!address_ok(regs, ea, nb)) 642 642 return -EFAULT; 643 643 rn = op->reg; ··· 684 680 u8 b[sizeof(__vector128)]; 685 681 } u = {}; 686 682 683 + if (size > sizeof(u)) 684 + return -EINVAL; 685 + 687 686 if (!address_ok(regs, ea & ~0xfUL, 16)) 688 687 return -EFAULT; 689 688 /* align to multiple of size */ ··· 695 688 if (err) 696 689 return err; 697 690 if (unlikely(cross_endian)) 698 - do_byte_reverse(&u.b[ea & 0xf], size); 691 + do_byte_reverse(&u.b[ea & 0xf], min_t(size_t, size, sizeof(u))); 699 692 preempt_disable(); 700 693 if (regs->msr & MSR_VEC) 701 694 put_vr(rn, &u.v); ··· 714 707 u8 b[sizeof(__vector128)]; 715 708 } u; 716 709 710 + if (size > sizeof(u)) 711 + return -EINVAL; 712 + 717 713 if (!address_ok(regs, ea & ~0xfUL, 16)) 718 714 return -EFAULT; 719 715 /* align to multiple of size */ ··· 729 719 u.v = current->thread.vr_state.vr[rn]; 730 720 preempt_enable(); 731 721 if (unlikely(cross_endian)) 732 - do_byte_reverse(&u.b[ea & 0xf], size); 722 + do_byte_reverse(&u.b[ea & 0xf], min_t(size_t, size, sizeof(u))); 733 723 return copy_mem_out(&u.b[ea & 0xf], ea, size, regs); 734 724 } 735 725 #endif /* CONFIG_ALTIVEC */
+7
arch/powerpc/mm/book3s64/hash_utils.c
··· 310 310 else 311 311 rflags |= 0x3; 312 312 } 313 + VM_WARN_ONCE(!(pteflags & _PAGE_RWX), "no-access mapping request"); 313 314 } else { 314 315 if (pteflags & _PAGE_RWX) 315 316 rflags |= 0x2; 317 + /* 318 + * We should never hit this in normal fault handling because 319 + * a permission check (check_pte_access()) will bubble this 320 + * to higher level linux handler even for PAGE_NONE. 321 + */ 322 + VM_WARN_ONCE(!(pteflags & _PAGE_RWX), "no-access mapping request"); 316 323 if (!((pteflags & _PAGE_WRITE) && (pteflags & _PAGE_DIRTY))) 317 324 rflags |= 0x1; 318 325 }
+2
arch/powerpc/mm/book3s64/pgtable.c
··· 542 542 set_pte_at(vma->vm_mm, addr, ptep, pte); 543 543 } 544 544 545 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 545 546 /* 546 547 * For hash translation mode, we use the deposited table to store hash slot 547 548 * information and they are stored at PTRS_PER_PMD offset from related pmd ··· 564 563 565 564 return true; 566 565 } 566 + #endif 567 567 568 568 /* 569 569 * Does the CPU support tlbie?
+2 -1
arch/powerpc/mm/book3s64/pkeys.c
··· 89 89 unsigned long pvr = mfspr(SPRN_PVR); 90 90 91 91 if (PVR_VER(pvr) == PVR_POWER8 || PVR_VER(pvr) == PVR_POWER8E || 92 - PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9) 92 + PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9 || 93 + PVR_VER(pvr) == PVR_HX_C2000) 93 94 pkeys_total = 32; 94 95 } 95 96 }
+3 -2
arch/powerpc/mm/init-common.c
··· 126 126 * as to leave enough 0 bits in the address to contain it. */ 127 127 unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1, 128 128 HUGEPD_SHIFT_MASK + 1); 129 - struct kmem_cache *new; 129 + struct kmem_cache *new = NULL; 130 130 131 131 /* It would be nice if this was a BUILD_BUG_ON(), but at the 132 132 * moment, gcc doesn't seem to recognize is_power_of_2 as a ··· 139 139 140 140 align = max_t(unsigned long, align, minalign); 141 141 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); 142 - new = kmem_cache_create(name, table_size, align, 0, ctor(shift)); 142 + if (name) 143 + new = kmem_cache_create(name, table_size, align, 0, ctor(shift)); 143 144 if (!new) 144 145 panic("Could not allocate pgtable cache for order %d", shift); 145 146
+5
arch/powerpc/mm/mmu_decl.h
··· 181 181 { 182 182 return IS_ENABLED(CONFIG_KFENCE) || debug_pagealloc_enabled(); 183 183 } 184 + 185 + #ifdef CONFIG_MEMORY_HOTPLUG 186 + int create_section_mapping(unsigned long start, unsigned long end, 187 + int nid, pgprot_t prot); 188 + #endif
+3
arch/powerpc/perf/hv-gpci.c
··· 534 534 if (!ret) 535 535 goto parse_result; 536 536 537 + if (ret && (ret != H_PARAMETER)) 538 + goto out; 539 + 537 540 /* 538 541 * ret value as 'H_PARAMETER' implies that the current buffer size 539 542 * can't accommodate all the information, and a partial buffer
+6
arch/powerpc/perf/imc-pmu.c
··· 299 299 attr_group->attrs = attrs; 300 300 do { 301 301 ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value); 302 + if (!ev_val_str) 303 + continue; 302 304 dev_str = device_str_attr_create(pmu->events[i].name, ev_val_str); 303 305 if (!dev_str) 304 306 continue; ··· 308 306 attrs[j++] = dev_str; 309 307 if (pmu->events[i].scale) { 310 308 ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale", pmu->events[i].name); 309 + if (!ev_scale_str) 310 + continue; 311 311 dev_str = device_str_attr_create(ev_scale_str, pmu->events[i].scale); 312 312 if (!dev_str) 313 313 continue; ··· 319 315 320 316 if (pmu->events[i].unit) { 321 317 ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit", pmu->events[i].name); 318 + if (!ev_unit_str) 319 + continue; 322 320 dev_str = device_str_attr_create(ev_unit_str, pmu->events[i].unit); 323 321 if (!dev_str) 324 322 continue;
+1
arch/powerpc/platforms/44x/Kconfig
··· 173 173 config CURRITUCK 174 174 bool "IBM Currituck (476fpe) Support" 175 175 depends on PPC_47x 176 + select I2C 176 177 select SWIOTLB 177 178 select 476FPE 178 179 select FORCE_PCI
+1 -1
arch/powerpc/platforms/44x/idle.c
··· 27 27 isync(); 28 28 } 29 29 30 - int __init ppc44x_idle_init(void) 30 + static int __init ppc44x_idle_init(void) 31 31 { 32 32 if (!mode_spin) { 33 33 /* If we are not setting spin mode
+2
arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
··· 17 17 #include <linux/of_address.h> 18 18 #include <linux/of_irq.h> 19 19 20 + #include "mpc5121_ads.h" 21 + 20 22 static struct device_node *cpld_pic_node; 21 23 static struct irq_domain *cpld_pic_host; 22 24
+1 -1
arch/powerpc/platforms/512x/pdm360ng.c
··· 101 101 } 102 102 #endif /* CONFIG_TOUCHSCREEN_ADS7846 */ 103 103 104 - void __init pdm360ng_init(void) 104 + static void __init pdm360ng_init(void) 105 105 { 106 106 mpc512x_init(); 107 107 pdm360ng_touchscreen_init();
+3 -2
arch/powerpc/platforms/83xx/suspend.c
··· 261 261 262 262 static int agent_thread_fn(void *data) 263 263 { 264 + set_freezable(); 265 + 264 266 while (1) { 265 - wait_event_interruptible(agent_wq, pci_pm_state >= 2); 266 - try_to_freeze(); 267 + wait_event_freezable(agent_wq, pci_pm_state >= 2); 267 268 268 269 if (signal_pending(current) || pci_pm_state < 2) 269 270 continue;
+1 -1
arch/powerpc/platforms/85xx/mpc85xx_rdb.c
··· 76 76 /* P1025 has pins muxed for QE and other functions. To 77 77 * enable QE UEC mode, we need to set bit QE0 for UCC1 78 78 * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9 79 - * and QE12 for QE MII management singals in PMUXCR 79 + * and QE12 for QE MII management signals in PMUXCR 80 80 * register. 81 81 */ 82 82 setbits32(&guts->pmuxcr, MPC85xx_PMUXCR_QE(0) |
-7
arch/powerpc/platforms/86xx/Kconfig
··· 52 52 select MPIC 53 53 default y if GEF_SBC610 || GEF_SBC310 || GEF_PPC9A \ 54 54 || MVME7100 55 - 56 - config MPC8610 57 - bool 58 - select HAVE_PCI 59 - select FSL_PCI if PCI 60 - select PPC_UDBG_16550 61 - select MPIC
+1 -1
arch/powerpc/platforms/pasemi/setup.c
··· 64 64 } 65 65 66 66 #ifdef CONFIG_PPC_PASEMI_NEMO 67 - void pas_shutdown(void) 67 + static void pas_shutdown(void) 68 68 { 69 69 /* Set the PLD bit that makes the SB600 think the power button is being pressed */ 70 70 void __iomem *pld_map = ioremap(0xf5000000,4096);
+2 -2
arch/powerpc/platforms/powermac/smp.c
··· 413 413 printk(KERN_ERR "Couldn't get primary IPI interrupt"); 414 414 } 415 415 416 - void __init smp_psurge_take_timebase(void) 416 + static void __init smp_psurge_take_timebase(void) 417 417 { 418 418 if (psurge_type != PSURGE_DUAL) 419 419 return; ··· 429 429 set_dec(tb_ticks_per_jiffy/2); 430 430 } 431 431 432 - void __init smp_psurge_give_timebase(void) 432 + static void __init smp_psurge_give_timebase(void) 433 433 { 434 434 /* Nothing to do here */ 435 435 }
+2
arch/powerpc/platforms/powernv/opal-irqchip.c
··· 275 275 else 276 276 name = kasprintf(GFP_KERNEL, "opal"); 277 277 278 + if (!name) 279 + continue; 278 280 /* Install interrupt handler */ 279 281 rc = request_irq(r->start, opal_interrupt, r->flags & IRQD_TRIGGER_MASK, 280 282 name, NULL);
+6
arch/powerpc/platforms/powernv/opal-powercap.c
··· 196 196 197 197 j = 0; 198 198 pcaps[i].pg.name = kasprintf(GFP_KERNEL, "%pOFn", node); 199 + if (!pcaps[i].pg.name) { 200 + kfree(pcaps[i].pattrs); 201 + kfree(pcaps[i].pg.attrs); 202 + goto out_pcaps_pattrs; 203 + } 204 + 199 205 if (has_min) { 200 206 powercap_add_attr(min, "powercap-min", 201 207 &pcaps[i].pattrs[j]);
+2
arch/powerpc/platforms/powernv/opal-prd.c
··· 66 66 const char *label; 67 67 68 68 addrp = of_get_address(node, 0, &range_size, NULL); 69 + if (!addrp) 70 + continue; 69 71 70 72 range_addr = of_read_number(addrp, 2); 71 73 range_end = range_addr + range_size;
+5
arch/powerpc/platforms/powernv/opal-xscom.c
··· 165 165 ent->chip = chip; 166 166 snprintf(ent->name, 16, "%08x", chip); 167 167 ent->path.data = (void *)kasprintf(GFP_KERNEL, "%pOF", dn); 168 + if (!ent->path.data) { 169 + kfree(ent); 170 + return -ENOMEM; 171 + } 172 + 168 173 ent->path.size = strlen((char *)ent->path.data); 169 174 170 175 dir = debugfs_create_dir(ent->name, root);
+2 -1
arch/powerpc/platforms/powernv/subcore.c
··· 425 425 426 426 if (pvr_ver != PVR_POWER8 && 427 427 pvr_ver != PVR_POWER8E && 428 - pvr_ver != PVR_POWER8NVL) 428 + pvr_ver != PVR_POWER8NVL && 429 + pvr_ver != PVR_HX_C2000) 429 430 return 0; 430 431 431 432 /*
-12
arch/powerpc/platforms/ps3/Kconfig
··· 167 167 profiling support of the Cell processor with programs like 168 168 perfmon2, then say Y or M, otherwise say N. 169 169 170 - config PS3GELIC_UDBG 171 - bool "PS3 udbg output via UDP broadcasts on Ethernet" 172 - depends on PPC_PS3 173 - help 174 - Enables udbg early debugging output by sending broadcast UDP 175 - via the Ethernet port (UDP port number 18194). 176 - 177 - This driver uses a trivial implementation and is independent 178 - from the main PS3 gelic network driver. 179 - 180 - If in doubt, say N here. 181 - 182 170 endmenu
+1 -1
arch/powerpc/platforms/ps3/Makefile
··· 3 3 obj-y += interrupt.o exports.o os-area.o 4 4 obj-y += system-bus.o 5 5 6 - obj-$(CONFIG_PS3GELIC_UDBG) += gelic_udbg.o 6 + obj-$(CONFIG_PPC_EARLY_DEBUG_PS3GELIC) += gelic_udbg.o 7 7 obj-$(CONFIG_SMP) += smp.o 8 8 obj-$(CONFIG_SPU_BASE) += spu.o 9 9 obj-y += device-init.o
+1
arch/powerpc/platforms/ps3/device-init.c
··· 827 827 if (res) 828 828 goto fail_free_irq; 829 829 830 + set_freezable(); 830 831 /* Loop here processing the requested notification events. */ 831 832 do { 832 833 try_to_freeze();
+1
arch/powerpc/platforms/ps3/gelic_udbg.c
··· 14 14 #include <linux/ip.h> 15 15 #include <linux/udp.h> 16 16 17 + #include <asm/ps3.h> 17 18 #include <asm/io.h> 18 19 #include <asm/udbg.h> 19 20 #include <asm/lv1call.h>
+1
arch/powerpc/platforms/pseries/Makefile
··· 4 4 5 5 obj-y := lpar.o hvCall.o nvram.o reconfig.o \ 6 6 of_helpers.o rtas-work-area.o papr-sysparm.o \ 7 + papr-vpd.o \ 7 8 setup.o iommu.o event_sources.o ras.o \ 8 9 firmware.o power.o dlpar.o mobility.o rng.o \ 9 10 pci.o pci_dlpar.o eeh_pseries.o msi.o \
+9 -9
arch/powerpc/platforms/pseries/eeh_pseries.c
··· 252 252 if (!pdn) 253 253 return 0; 254 254 255 - rtas_read_config(pdn, PCI_STATUS, 2, &status); 255 + rtas_pci_dn_read_config(pdn, PCI_STATUS, 2, &status); 256 256 if (!(status & PCI_STATUS_CAP_LIST)) 257 257 return 0; 258 258 ··· 270 270 return 0; 271 271 272 272 while (cnt--) { 273 - rtas_read_config(pdn, pos, 1, &pos); 273 + rtas_pci_dn_read_config(pdn, pos, 1, &pos); 274 274 if (pos < 0x40) 275 275 break; 276 276 pos &= ~3; 277 - rtas_read_config(pdn, pos + PCI_CAP_LIST_ID, 1, &id); 277 + rtas_pci_dn_read_config(pdn, pos + PCI_CAP_LIST_ID, 1, &id); 278 278 if (id == 0xff) 279 279 break; 280 280 if (id == cap) ··· 294 294 295 295 if (!edev || !edev->pcie_cap) 296 296 return 0; 297 - if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) 297 + if (rtas_pci_dn_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) 298 298 return 0; 299 299 else if (!header) 300 300 return 0; ··· 307 307 if (pos < 256) 308 308 break; 309 309 310 - if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) 310 + if (rtas_pci_dn_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) 311 311 break; 312 312 } 313 313 ··· 412 412 if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) { 413 413 edev->mode |= EEH_DEV_BRIDGE; 414 414 if (edev->pcie_cap) { 415 - rtas_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS, 416 - 2, &pcie_flags); 415 + rtas_pci_dn_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS, 416 + 2, &pcie_flags); 417 417 pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4; 418 418 if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT) 419 419 edev->mode |= EEH_DEV_ROOT_PORT; ··· 676 676 { 677 677 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 678 678 679 - return rtas_read_config(pdn, where, size, val); 679 + return rtas_pci_dn_read_config(pdn, where, size, val); 680 680 } 681 681 682 682 /** ··· 692 692 { 693 693 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 694 694 695 - return rtas_write_config(pdn, where, size, val); 695 + return rtas_pci_dn_write_config(pdn, where, size, val); 696 696 } 697 697 698 698 #ifdef CONFIG_PCI_IOV
+11 -5
arch/powerpc/platforms/pseries/hotplug-memory.c
··· 208 208 int rc; 209 209 210 210 mem_block = lmb_to_memblock(lmb); 211 - if (!mem_block) 211 + if (!mem_block) { 212 + pr_err("Failed memory block lookup for LMB 0x%x\n", lmb->drc_index); 212 213 return -EINVAL; 214 + } 213 215 214 216 if (online && mem_block->dev.offline) 215 217 rc = device_online(&mem_block->dev); ··· 438 436 } 439 437 } 440 438 441 - if (!lmb_found) 439 + if (!lmb_found) { 440 + pr_debug("Failed to look up LMB for drc index %x\n", drc_index); 442 441 rc = -EINVAL; 443 - 444 - if (rc) 442 + } else if (rc) { 445 443 pr_debug("Failed to hot-remove memory at %llx\n", 446 444 lmb->base_addr); 447 - else 445 + } else { 448 446 pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr); 447 + } 449 448 450 449 return rc; 451 450 } ··· 578 575 rc = update_lmb_associativity_index(lmb); 579 576 if (rc) { 580 577 dlpar_release_drc(lmb->drc_index); 578 + pr_err("Failed to configure LMB 0x%x\n", lmb->drc_index); 581 579 return rc; 582 580 } 583 581 ··· 592 588 /* Add the memory */ 593 589 rc = __add_memory(nid, lmb->base_addr, block_sz, MHP_MEMMAP_ON_MEMORY); 594 590 if (rc) { 591 + pr_err("Failed to add LMB 0x%x to node %u", lmb->drc_index, nid); 595 592 invalidate_lmb_associativity_index(lmb); 596 593 return rc; 597 594 } 598 595 599 596 rc = dlpar_online_lmb(lmb); 600 597 if (rc) { 598 + pr_err("Failed to online LMB 0x%x on node %u\n", lmb->drc_index, nid); 601 599 __remove_memory(lmb->base_addr, block_sz); 602 600 invalidate_lmb_associativity_index(lmb); 603 601 } else {
+203 -2
arch/powerpc/platforms/pseries/papr-sysparm.c
··· 2 2 3 3 #define pr_fmt(fmt) "papr-sysparm: " fmt 4 4 5 + #include <linux/anon_inodes.h> 5 6 #include <linux/bug.h> 7 + #include <linux/file.h> 8 + #include <linux/fs.h> 6 9 #include <linux/init.h> 7 10 #include <linux/kernel.h> 11 + #include <linux/miscdevice.h> 8 12 #include <linux/printk.h> 9 13 #include <linux/slab.h> 10 - #include <asm/rtas.h> 14 + #include <linux/uaccess.h> 15 + #include <asm/machdep.h> 11 16 #include <asm/papr-sysparm.h> 12 17 #include <asm/rtas-work-area.h> 18 + #include <asm/rtas.h> 13 19 14 20 struct papr_sysparm_buf *papr_sysparm_buf_alloc(void) 15 21 { ··· 27 21 void papr_sysparm_buf_free(struct papr_sysparm_buf *buf) 28 22 { 29 23 kfree(buf); 24 + } 25 + 26 + static size_t papr_sysparm_buf_get_length(const struct papr_sysparm_buf *buf) 27 + { 28 + return be16_to_cpu(buf->len); 29 + } 30 + 31 + static void papr_sysparm_buf_set_length(struct papr_sysparm_buf *buf, size_t length) 32 + { 33 + WARN_ONCE(length > sizeof(buf->val), 34 + "bogus length %zu, clamping to safe value", length); 35 + length = min(sizeof(buf->val), length); 36 + buf->len = cpu_to_be16(length); 37 + } 38 + 39 + /* 40 + * For use on buffers returned from ibm,get-system-parameter before 41 + * returning them to callers. Ensures the encoded length of valid data 42 + * cannot overrun buf->val[]. 43 + */ 44 + static void papr_sysparm_buf_clamp_length(struct papr_sysparm_buf *buf) 45 + { 46 + papr_sysparm_buf_set_length(buf, papr_sysparm_buf_get_length(buf)); 47 + } 48 + 49 + /* 50 + * Perform some basic diligence on the system parameter buffer before 51 + * submitting it to RTAS. 52 + */ 53 + static bool papr_sysparm_buf_can_submit(const struct papr_sysparm_buf *buf) 54 + { 55 + /* 56 + * Firmware ought to reject buffer lengths that exceed the 57 + * maximum specified in PAPR, but there's no reason for the 58 + * kernel to allow them either. 59 + */ 60 + if (papr_sysparm_buf_get_length(buf) > sizeof(buf->val)) 61 + return false; 62 + 63 + return true; 30 64 } 31 65 32 66 /** ··· 93 47 * 94 48 * Return: 0 on success, -errno otherwise. @buf is unmodified on error. 95 49 */ 96 - 97 50 int papr_sysparm_get(papr_sysparm_t param, struct papr_sysparm_buf *buf) 98 51 { 99 52 const s32 token = rtas_function_token(RTAS_FN_IBM_GET_SYSTEM_PARAMETER); ··· 108 63 if (token == RTAS_UNKNOWN_SERVICE) 109 64 return -ENOENT; 110 65 66 + if (!papr_sysparm_buf_can_submit(buf)) 67 + return -EINVAL; 68 + 111 69 work_area = rtas_work_area_alloc(sizeof(*buf)); 112 70 113 71 memcpy(rtas_work_area_raw_buf(work_area), buf, sizeof(*buf)); ··· 125 77 case 0: 126 78 ret = 0; 127 79 memcpy(buf, rtas_work_area_raw_buf(work_area), sizeof(*buf)); 80 + papr_sysparm_buf_clamp_length(buf); 128 81 break; 129 82 case -3: /* parameter not implemented */ 130 83 ret = -EOPNOTSUPP; ··· 164 115 if (token == RTAS_UNKNOWN_SERVICE) 165 116 return -ENOENT; 166 117 118 + if (!papr_sysparm_buf_can_submit(buf)) 119 + return -EINVAL; 120 + 167 121 work_area = rtas_work_area_alloc(sizeof(*buf)); 168 122 169 123 memcpy(rtas_work_area_raw_buf(work_area), buf, sizeof(*buf)); ··· 201 149 202 150 return ret; 203 151 } 152 + 153 + static struct papr_sysparm_buf * 154 + papr_sysparm_buf_from_user(const struct papr_sysparm_io_block __user *user_iob) 155 + { 156 + struct papr_sysparm_buf *kern_spbuf; 157 + long err; 158 + u16 len; 159 + 160 + /* 161 + * The length of valid data that userspace claims to be in 162 + * user_iob->data[]. 163 + */ 164 + if (get_user(len, &user_iob->length)) 165 + return ERR_PTR(-EFAULT); 166 + 167 + static_assert(sizeof(user_iob->data) >= PAPR_SYSPARM_MAX_INPUT); 168 + static_assert(sizeof(kern_spbuf->val) >= PAPR_SYSPARM_MAX_INPUT); 169 + 170 + if (len > PAPR_SYSPARM_MAX_INPUT) 171 + return ERR_PTR(-EINVAL); 172 + 173 + kern_spbuf = papr_sysparm_buf_alloc(); 174 + if (!kern_spbuf) 175 + return ERR_PTR(-ENOMEM); 176 + 177 + papr_sysparm_buf_set_length(kern_spbuf, len); 178 + 179 + if (len > 0 && copy_from_user(kern_spbuf->val, user_iob->data, len)) { 180 + err = -EFAULT; 181 + goto free_sysparm_buf; 182 + } 183 + 184 + return kern_spbuf; 185 + 186 + free_sysparm_buf: 187 + papr_sysparm_buf_free(kern_spbuf); 188 + return ERR_PTR(err); 189 + } 190 + 191 + static int papr_sysparm_buf_to_user(const struct papr_sysparm_buf *kern_spbuf, 192 + struct papr_sysparm_io_block __user *user_iob) 193 + { 194 + u16 len_out = papr_sysparm_buf_get_length(kern_spbuf); 195 + 196 + if (put_user(len_out, &user_iob->length)) 197 + return -EFAULT; 198 + 199 + static_assert(sizeof(user_iob->data) >= PAPR_SYSPARM_MAX_OUTPUT); 200 + static_assert(sizeof(kern_spbuf->val) >= PAPR_SYSPARM_MAX_OUTPUT); 201 + 202 + if (copy_to_user(user_iob->data, kern_spbuf->val, PAPR_SYSPARM_MAX_OUTPUT)) 203 + return -EFAULT; 204 + 205 + return 0; 206 + } 207 + 208 + static long papr_sysparm_ioctl_get(struct papr_sysparm_io_block __user *user_iob) 209 + { 210 + struct papr_sysparm_buf *kern_spbuf; 211 + papr_sysparm_t param; 212 + long ret; 213 + 214 + if (get_user(param.token, &user_iob->parameter)) 215 + return -EFAULT; 216 + 217 + kern_spbuf = papr_sysparm_buf_from_user(user_iob); 218 + if (IS_ERR(kern_spbuf)) 219 + return PTR_ERR(kern_spbuf); 220 + 221 + ret = papr_sysparm_get(param, kern_spbuf); 222 + if (ret) 223 + goto free_sysparm_buf; 224 + 225 + ret = papr_sysparm_buf_to_user(kern_spbuf, user_iob); 226 + if (ret) 227 + goto free_sysparm_buf; 228 + 229 + ret = 0; 230 + 231 + free_sysparm_buf: 232 + papr_sysparm_buf_free(kern_spbuf); 233 + return ret; 234 + } 235 + 236 + 237 + static long papr_sysparm_ioctl_set(struct papr_sysparm_io_block __user *user_iob) 238 + { 239 + struct papr_sysparm_buf *kern_spbuf; 240 + papr_sysparm_t param; 241 + long ret; 242 + 243 + if (get_user(param.token, &user_iob->parameter)) 244 + return -EFAULT; 245 + 246 + kern_spbuf = papr_sysparm_buf_from_user(user_iob); 247 + if (IS_ERR(kern_spbuf)) 248 + return PTR_ERR(kern_spbuf); 249 + 250 + ret = papr_sysparm_set(param, kern_spbuf); 251 + if (ret) 252 + goto free_sysparm_buf; 253 + 254 + ret = 0; 255 + 256 + free_sysparm_buf: 257 + papr_sysparm_buf_free(kern_spbuf); 258 + return ret; 259 + } 260 + 261 + static long papr_sysparm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) 262 + { 263 + void __user *argp = (__force void __user *)arg; 264 + long ret; 265 + 266 + switch (ioctl) { 267 + case PAPR_SYSPARM_IOC_GET: 268 + ret = papr_sysparm_ioctl_get(argp); 269 + break; 270 + case PAPR_SYSPARM_IOC_SET: 271 + if (filp->f_mode & FMODE_WRITE) 272 + ret = papr_sysparm_ioctl_set(argp); 273 + else 274 + ret = -EBADF; 275 + break; 276 + default: 277 + ret = -ENOIOCTLCMD; 278 + break; 279 + } 280 + return ret; 281 + } 282 + 283 + static const struct file_operations papr_sysparm_ops = { 284 + .unlocked_ioctl = papr_sysparm_ioctl, 285 + }; 286 + 287 + static struct miscdevice papr_sysparm_dev = { 288 + .minor = MISC_DYNAMIC_MINOR, 289 + .name = "papr-sysparm", 290 + .fops = &papr_sysparm_ops, 291 + }; 292 + 293 + static __init int papr_sysparm_init(void) 294 + { 295 + if (!rtas_function_implemented(RTAS_FN_IBM_GET_SYSTEM_PARAMETER)) 296 + return -ENODEV; 297 + 298 + return misc_register(&papr_sysparm_dev); 299 + } 300 + machine_device_initcall(pseries, papr_sysparm_init);
+541
arch/powerpc/platforms/pseries/papr-vpd.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + 3 + #define pr_fmt(fmt) "papr-vpd: " fmt 4 + 5 + #include <linux/anon_inodes.h> 6 + #include <linux/build_bug.h> 7 + #include <linux/file.h> 8 + #include <linux/fs.h> 9 + #include <linux/init.h> 10 + #include <linux/lockdep.h> 11 + #include <linux/kernel.h> 12 + #include <linux/miscdevice.h> 13 + #include <linux/signal.h> 14 + #include <linux/slab.h> 15 + #include <linux/string.h> 16 + #include <linux/string_helpers.h> 17 + #include <linux/uaccess.h> 18 + #include <asm/machdep.h> 19 + #include <asm/papr-vpd.h> 20 + #include <asm/rtas-work-area.h> 21 + #include <asm/rtas.h> 22 + #include <uapi/asm/papr-vpd.h> 23 + 24 + /* 25 + * Function-specific return values for ibm,get-vpd, derived from PAPR+ 26 + * v2.13 7.3.20 "ibm,get-vpd RTAS Call". 27 + */ 28 + #define RTAS_IBM_GET_VPD_COMPLETE 0 /* All VPD has been retrieved. */ 29 + #define RTAS_IBM_GET_VPD_MORE_DATA 1 /* More VPD is available. */ 30 + #define RTAS_IBM_GET_VPD_START_OVER -4 /* VPD changed, restart call sequence. */ 31 + 32 + /** 33 + * struct rtas_ibm_get_vpd_params - Parameters (in and out) for ibm,get-vpd. 34 + * @loc_code: In: Caller-provided location code buffer. Must be RTAS-addressable. 35 + * @work_area: In: Caller-provided work area buffer for results. 36 + * @sequence: In: Sequence number. Out: Next sequence number. 37 + * @written: Out: Bytes written by ibm,get-vpd to @work_area. 38 + * @status: Out: RTAS call status. 39 + */ 40 + struct rtas_ibm_get_vpd_params { 41 + const struct papr_location_code *loc_code; 42 + struct rtas_work_area *work_area; 43 + u32 sequence; 44 + u32 written; 45 + s32 status; 46 + }; 47 + 48 + /** 49 + * rtas_ibm_get_vpd() - Call ibm,get-vpd to fill a work area buffer. 50 + * @params: See &struct rtas_ibm_get_vpd_params. 51 + * 52 + * Calls ibm,get-vpd until it errors or successfully deposits data 53 + * into the supplied work area. Handles RTAS retry statuses. Maps RTAS 54 + * error statuses to reasonable errno values. 55 + * 56 + * The caller is expected to invoke rtas_ibm_get_vpd() multiple times 57 + * to retrieve all the VPD for the provided location code. Only one 58 + * sequence should be in progress at any time; starting a new sequence 59 + * will disrupt any sequence already in progress. Serialization of VPD 60 + * retrieval sequences is the responsibility of the caller. 61 + * 62 + * The caller should inspect @params.status to determine whether more 63 + * calls are needed to complete the sequence. 64 + * 65 + * Context: May sleep. 66 + * Return: -ve on error, 0 otherwise. 67 + */ 68 + static int rtas_ibm_get_vpd(struct rtas_ibm_get_vpd_params *params) 69 + { 70 + const struct papr_location_code *loc_code = params->loc_code; 71 + struct rtas_work_area *work_area = params->work_area; 72 + u32 rets[2]; 73 + s32 fwrc; 74 + int ret; 75 + 76 + lockdep_assert_held(&rtas_ibm_get_vpd_lock); 77 + 78 + do { 79 + fwrc = rtas_call(rtas_function_token(RTAS_FN_IBM_GET_VPD), 4, 3, 80 + rets, 81 + __pa(loc_code), 82 + rtas_work_area_phys(work_area), 83 + rtas_work_area_size(work_area), 84 + params->sequence); 85 + } while (rtas_busy_delay(fwrc)); 86 + 87 + switch (fwrc) { 88 + case RTAS_HARDWARE_ERROR: 89 + ret = -EIO; 90 + break; 91 + case RTAS_INVALID_PARAMETER: 92 + ret = -EINVAL; 93 + break; 94 + case RTAS_IBM_GET_VPD_START_OVER: 95 + ret = -EAGAIN; 96 + break; 97 + case RTAS_IBM_GET_VPD_MORE_DATA: 98 + params->sequence = rets[0]; 99 + fallthrough; 100 + case RTAS_IBM_GET_VPD_COMPLETE: 101 + params->written = rets[1]; 102 + /* 103 + * Kernel or firmware bug, do not continue. 104 + */ 105 + if (WARN(params->written > rtas_work_area_size(work_area), 106 + "possible write beyond end of work area")) 107 + ret = -EFAULT; 108 + else 109 + ret = 0; 110 + break; 111 + default: 112 + ret = -EIO; 113 + pr_err_ratelimited("unexpected ibm,get-vpd status %d\n", fwrc); 114 + break; 115 + } 116 + 117 + params->status = fwrc; 118 + return ret; 119 + } 120 + 121 + /* 122 + * Internal VPD "blob" APIs for accumulating ibm,get-vpd results into 123 + * an immutable buffer to be attached to a file descriptor. 124 + */ 125 + struct vpd_blob { 126 + const char *data; 127 + size_t len; 128 + }; 129 + 130 + static bool vpd_blob_has_data(const struct vpd_blob *blob) 131 + { 132 + return blob->data && blob->len; 133 + } 134 + 135 + static void vpd_blob_free(const struct vpd_blob *blob) 136 + { 137 + if (blob) { 138 + kvfree(blob->data); 139 + kfree(blob); 140 + } 141 + } 142 + 143 + /** 144 + * vpd_blob_extend() - Append data to a &struct vpd_blob. 145 + * @blob: The blob to extend. 146 + * @data: The new data to append to @blob. 147 + * @len: The length of @data. 148 + * 149 + * Context: May sleep. 150 + * Return: -ENOMEM on allocation failure, 0 otherwise. 151 + */ 152 + static int vpd_blob_extend(struct vpd_blob *blob, const char *data, size_t len) 153 + { 154 + const size_t new_len = blob->len + len; 155 + const size_t old_len = blob->len; 156 + const char *old_ptr = blob->data; 157 + char *new_ptr; 158 + 159 + new_ptr = old_ptr ? 160 + kvrealloc(old_ptr, old_len, new_len, GFP_KERNEL_ACCOUNT) : 161 + kvmalloc(len, GFP_KERNEL_ACCOUNT); 162 + 163 + if (!new_ptr) 164 + return -ENOMEM; 165 + 166 + memcpy(&new_ptr[old_len], data, len); 167 + blob->data = new_ptr; 168 + blob->len = new_len; 169 + return 0; 170 + } 171 + 172 + /** 173 + * vpd_blob_generate() - Construct a new &struct vpd_blob. 174 + * @generator: Function that supplies the blob data. 175 + * @arg: Context pointer supplied by caller, passed to @generator. 176 + * 177 + * The @generator callback is invoked until it returns NULL. @arg is 178 + * passed to @generator in its first argument on each call. When 179 + * @generator returns data, it should store the data length in its 180 + * second argument. 181 + * 182 + * Context: May sleep. 183 + * Return: A completely populated &struct vpd_blob, or NULL on error. 184 + */ 185 + static const struct vpd_blob * 186 + vpd_blob_generate(const char * (*generator)(void *, size_t *), void *arg) 187 + { 188 + struct vpd_blob *blob; 189 + const char *buf; 190 + size_t len; 191 + int err = 0; 192 + 193 + blob = kzalloc(sizeof(*blob), GFP_KERNEL_ACCOUNT); 194 + if (!blob) 195 + return NULL; 196 + 197 + while (err == 0 && (buf = generator(arg, &len))) 198 + err = vpd_blob_extend(blob, buf, len); 199 + 200 + if (err != 0 || !vpd_blob_has_data(blob)) 201 + goto free_blob; 202 + 203 + return blob; 204 + free_blob: 205 + vpd_blob_free(blob); 206 + return NULL; 207 + } 208 + 209 + /* 210 + * Internal VPD sequence APIs. A VPD sequence is a series of calls to 211 + * ibm,get-vpd for a given location code. The sequence ends when an 212 + * error is encountered or all VPD for the location code has been 213 + * returned. 214 + */ 215 + 216 + /** 217 + * struct vpd_sequence - State for managing a VPD sequence. 218 + * @error: Shall be zero as long as the sequence has not encountered an error, 219 + * -ve errno otherwise. Use vpd_sequence_set_err() to update this. 220 + * @params: Parameter block to pass to rtas_ibm_get_vpd(). 221 + */ 222 + struct vpd_sequence { 223 + int error; 224 + struct rtas_ibm_get_vpd_params params; 225 + }; 226 + 227 + /** 228 + * vpd_sequence_begin() - Begin a VPD retrieval sequence. 229 + * @seq: Uninitialized sequence state. 230 + * @loc_code: Location code that defines the scope of the VPD to return. 231 + * 232 + * Initializes @seq with the resources necessary to carry out a VPD 233 + * sequence. Callers must pass @seq to vpd_sequence_end() regardless 234 + * of whether the sequence succeeds. 235 + * 236 + * Context: May sleep. 237 + */ 238 + static void vpd_sequence_begin(struct vpd_sequence *seq, 239 + const struct papr_location_code *loc_code) 240 + { 241 + /* 242 + * Use a static data structure for the location code passed to 243 + * RTAS to ensure it's in the RMA and avoid a separate work 244 + * area allocation. Guarded by the function lock. 245 + */ 246 + static struct papr_location_code static_loc_code; 247 + 248 + /* 249 + * We could allocate the work area before acquiring the 250 + * function lock, but that would allow concurrent requests to 251 + * exhaust the limited work area pool for no benefit. So 252 + * allocate the work area under the lock. 253 + */ 254 + mutex_lock(&rtas_ibm_get_vpd_lock); 255 + static_loc_code = *loc_code; 256 + *seq = (struct vpd_sequence) { 257 + .params = { 258 + .work_area = rtas_work_area_alloc(SZ_4K), 259 + .loc_code = &static_loc_code, 260 + .sequence = 1, 261 + }, 262 + }; 263 + } 264 + 265 + /** 266 + * vpd_sequence_end() - Finalize a VPD retrieval sequence. 267 + * @seq: Sequence state. 268 + * 269 + * Releases resources obtained by vpd_sequence_begin(). 270 + */ 271 + static void vpd_sequence_end(struct vpd_sequence *seq) 272 + { 273 + rtas_work_area_free(seq->params.work_area); 274 + mutex_unlock(&rtas_ibm_get_vpd_lock); 275 + } 276 + 277 + /** 278 + * vpd_sequence_should_stop() - Determine whether a VPD retrieval sequence 279 + * should continue. 280 + * @seq: VPD sequence state. 281 + * 282 + * Examines the sequence error state and outputs of the last call to 283 + * ibm,get-vpd to determine whether the sequence in progress should 284 + * continue or stop. 285 + * 286 + * Return: True if the sequence has encountered an error or if all VPD for 287 + * this sequence has been retrieved. False otherwise. 288 + */ 289 + static bool vpd_sequence_should_stop(const struct vpd_sequence *seq) 290 + { 291 + bool done; 292 + 293 + if (seq->error) 294 + return true; 295 + 296 + switch (seq->params.status) { 297 + case 0: 298 + if (seq->params.written == 0) 299 + done = false; /* Initial state. */ 300 + else 301 + done = true; /* All data consumed. */ 302 + break; 303 + case 1: 304 + done = false; /* More data available. */ 305 + break; 306 + default: 307 + done = true; /* Error encountered. */ 308 + break; 309 + } 310 + 311 + return done; 312 + } 313 + 314 + static int vpd_sequence_set_err(struct vpd_sequence *seq, int err) 315 + { 316 + /* Preserve the first error recorded. */ 317 + if (seq->error == 0) 318 + seq->error = err; 319 + 320 + return seq->error; 321 + } 322 + 323 + /* 324 + * Generator function to be passed to vpd_blob_generate(). 325 + */ 326 + static const char *vpd_sequence_fill_work_area(void *arg, size_t *len) 327 + { 328 + struct vpd_sequence *seq = arg; 329 + struct rtas_ibm_get_vpd_params *p = &seq->params; 330 + 331 + if (vpd_sequence_should_stop(seq)) 332 + return NULL; 333 + if (vpd_sequence_set_err(seq, rtas_ibm_get_vpd(p))) 334 + return NULL; 335 + *len = p->written; 336 + return rtas_work_area_raw_buf(p->work_area); 337 + } 338 + 339 + /* 340 + * Higher-level VPD retrieval code below. These functions use the 341 + * vpd_blob_* and vpd_sequence_* APIs defined above to create fd-based 342 + * VPD handles for consumption by user space. 343 + */ 344 + 345 + /** 346 + * papr_vpd_run_sequence() - Run a single VPD retrieval sequence. 347 + * @loc_code: Location code that defines the scope of VPD to return. 348 + * 349 + * Context: May sleep. Holds a mutex and an RTAS work area for its 350 + * duration. Typically performs multiple sleepable slab 351 + * allocations. 352 + * 353 + * Return: A populated &struct vpd_blob on success. Encoded error 354 + * pointer otherwise. 355 + */ 356 + static const struct vpd_blob *papr_vpd_run_sequence(const struct papr_location_code *loc_code) 357 + { 358 + const struct vpd_blob *blob; 359 + struct vpd_sequence seq; 360 + 361 + vpd_sequence_begin(&seq, loc_code); 362 + blob = vpd_blob_generate(vpd_sequence_fill_work_area, &seq); 363 + if (!blob) 364 + vpd_sequence_set_err(&seq, -ENOMEM); 365 + vpd_sequence_end(&seq); 366 + 367 + if (seq.error) { 368 + vpd_blob_free(blob); 369 + return ERR_PTR(seq.error); 370 + } 371 + 372 + return blob; 373 + } 374 + 375 + /** 376 + * papr_vpd_retrieve() - Return the VPD for a location code. 377 + * @loc_code: Location code that defines the scope of VPD to return. 378 + * 379 + * Run VPD sequences against @loc_code until a blob is successfully 380 + * instantiated, or a hard error is encountered, or a fatal signal is 381 + * pending. 382 + * 383 + * Context: May sleep. 384 + * Return: A fully populated VPD blob when successful. Encoded error 385 + * pointer otherwise. 386 + */ 387 + static const struct vpd_blob *papr_vpd_retrieve(const struct papr_location_code *loc_code) 388 + { 389 + const struct vpd_blob *blob; 390 + 391 + /* 392 + * EAGAIN means the sequence errored with a -4 (VPD changed) 393 + * status from ibm,get-vpd, and we should attempt a new 394 + * sequence. PAPR+ v2.13 R1–7.3.20–5 indicates that this 395 + * should be a transient condition, not something that happens 396 + * continuously. But we'll stop trying on a fatal signal. 397 + */ 398 + do { 399 + blob = papr_vpd_run_sequence(loc_code); 400 + if (!IS_ERR(blob)) /* Success. */ 401 + break; 402 + if (PTR_ERR(blob) != -EAGAIN) /* Hard error. */ 403 + break; 404 + pr_info_ratelimited("VPD changed during retrieval, retrying\n"); 405 + cond_resched(); 406 + } while (!fatal_signal_pending(current)); 407 + 408 + return blob; 409 + } 410 + 411 + static ssize_t papr_vpd_handle_read(struct file *file, char __user *buf, size_t size, loff_t *off) 412 + { 413 + const struct vpd_blob *blob = file->private_data; 414 + 415 + /* bug: we should not instantiate a handle without any data attached. */ 416 + if (!vpd_blob_has_data(blob)) { 417 + pr_err_once("handle without data\n"); 418 + return -EIO; 419 + } 420 + 421 + return simple_read_from_buffer(buf, size, off, blob->data, blob->len); 422 + } 423 + 424 + static int papr_vpd_handle_release(struct inode *inode, struct file *file) 425 + { 426 + const struct vpd_blob *blob = file->private_data; 427 + 428 + vpd_blob_free(blob); 429 + 430 + return 0; 431 + } 432 + 433 + static loff_t papr_vpd_handle_seek(struct file *file, loff_t off, int whence) 434 + { 435 + const struct vpd_blob *blob = file->private_data; 436 + 437 + return fixed_size_llseek(file, off, whence, blob->len); 438 + } 439 + 440 + 441 + static const struct file_operations papr_vpd_handle_ops = { 442 + .read = papr_vpd_handle_read, 443 + .llseek = papr_vpd_handle_seek, 444 + .release = papr_vpd_handle_release, 445 + }; 446 + 447 + /** 448 + * papr_vpd_create_handle() - Create a fd-based handle for reading VPD. 449 + * @ulc: Location code in user memory; defines the scope of the VPD to 450 + * retrieve. 451 + * 452 + * Handler for PAPR_VPD_IOC_CREATE_HANDLE ioctl command. Validates 453 + * @ulc and instantiates an immutable VPD "blob" for it. The blob is 454 + * attached to a file descriptor for reading by user space. The memory 455 + * backing the blob is freed when the file is released. 456 + * 457 + * The entire requested VPD is retrieved by this call and all 458 + * necessary RTAS interactions are performed before returning the fd 459 + * to user space. This keeps the read handler simple and ensures that 460 + * the kernel can prevent interleaving of ibm,get-vpd call sequences. 461 + * 462 + * Return: The installed fd number if successful, -ve errno otherwise. 463 + */ 464 + static long papr_vpd_create_handle(struct papr_location_code __user *ulc) 465 + { 466 + struct papr_location_code klc; 467 + const struct vpd_blob *blob; 468 + struct file *file; 469 + long err; 470 + int fd; 471 + 472 + if (copy_from_user(&klc, ulc, sizeof(klc))) 473 + return -EFAULT; 474 + 475 + if (!string_is_terminated(klc.str, ARRAY_SIZE(klc.str))) 476 + return -EINVAL; 477 + 478 + blob = papr_vpd_retrieve(&klc); 479 + if (IS_ERR(blob)) 480 + return PTR_ERR(blob); 481 + 482 + fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 483 + if (fd < 0) { 484 + err = fd; 485 + goto free_blob; 486 + } 487 + 488 + file = anon_inode_getfile("[papr-vpd]", &papr_vpd_handle_ops, 489 + (void *)blob, O_RDONLY); 490 + if (IS_ERR(file)) { 491 + err = PTR_ERR(file); 492 + goto put_fd; 493 + } 494 + 495 + file->f_mode |= FMODE_LSEEK | FMODE_PREAD; 496 + fd_install(fd, file); 497 + return fd; 498 + put_fd: 499 + put_unused_fd(fd); 500 + free_blob: 501 + vpd_blob_free(blob); 502 + return err; 503 + } 504 + 505 + /* 506 + * Top-level ioctl handler for /dev/papr-vpd. 507 + */ 508 + static long papr_vpd_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) 509 + { 510 + void __user *argp = (__force void __user *)arg; 511 + long ret; 512 + 513 + switch (ioctl) { 514 + case PAPR_VPD_IOC_CREATE_HANDLE: 515 + ret = papr_vpd_create_handle(argp); 516 + break; 517 + default: 518 + ret = -ENOIOCTLCMD; 519 + break; 520 + } 521 + return ret; 522 + } 523 + 524 + static const struct file_operations papr_vpd_ops = { 525 + .unlocked_ioctl = papr_vpd_dev_ioctl, 526 + }; 527 + 528 + static struct miscdevice papr_vpd_dev = { 529 + .minor = MISC_DYNAMIC_MINOR, 530 + .name = "papr-vpd", 531 + .fops = &papr_vpd_ops, 532 + }; 533 + 534 + static __init int papr_vpd_init(void) 535 + { 536 + if (!rtas_function_implemented(RTAS_FN_IBM_GET_VPD)) 537 + return -ENODEV; 538 + 539 + return misc_register(&papr_vpd_dev); 540 + } 541 + machine_device_initcall(pseries, papr_vpd_init);
+1
arch/powerpc/platforms/pseries/pseries.h
··· 55 55 extern int dlpar_acquire_drc(u32 drc_index); 56 56 extern int dlpar_release_drc(u32 drc_index); 57 57 extern int dlpar_unisolate_drc(u32 drc_index); 58 + extern void post_mobility_fixup(void); 58 59 59 60 void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog); 60 61 int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_errlog);
+1
arch/powerpc/platforms/pseries/suspend.c
··· 13 13 #include <asm/mmu.h> 14 14 #include <asm/rtas.h> 15 15 #include <asm/topology.h> 16 + #include "pseries.h" 16 17 17 18 static struct device suspend_dev; 18 19
-19
arch/powerpc/sysdev/grackle.c
··· 18 18 #define GRACKLE_CFA(b, d, o) (0x80 | ((b) << 8) | ((d) << 16) \ 19 19 | (((o) & ~3) << 24)) 20 20 21 - #define GRACKLE_PICR1_STG 0x00000040 22 21 #define GRACKLE_PICR1_LOOPSNOOP 0x00000010 23 - 24 - /* N.B. this is called before bridges is initialized, so we can't 25 - use grackle_pcibios_{read,write}_config_dword. */ 26 - static inline void grackle_set_stg(struct pci_controller* bp, int enable) 27 - { 28 - unsigned int val; 29 - 30 - out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); 31 - val = in_le32(bp->cfg_data); 32 - val = enable? (val | GRACKLE_PICR1_STG) : 33 - (val & ~GRACKLE_PICR1_STG); 34 - out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); 35 - out_le32(bp->cfg_data, val); 36 - (void)in_le32(bp->cfg_data); 37 - } 38 22 39 23 static inline void grackle_set_loop_snoop(struct pci_controller *bp, int enable) 40 24 { ··· 40 56 pci_add_flags(PCI_REASSIGN_ALL_BUS); 41 57 if (of_machine_is_compatible("AAPL,PowerBook1998")) 42 58 grackle_set_loop_snoop(hose, 1); 43 - #if 0 /* Disabled for now, HW problems ??? */ 44 - grackle_set_stg(hose, 1); 45 - #endif 46 59 }
+2
arch/powerpc/sysdev/xics/icp-native.c
··· 236 236 rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation", 237 237 cpu, hw_id); 238 238 239 + if (!rname) 240 + return -ENOMEM; 239 241 if (!request_mem_region(addr, size, rname)) { 240 242 pr_warn("icp_native: Could not reserve ICP MMIO for CPU %d, interrupt server #0x%x\n", 241 243 cpu, hw_id);
+2 -1
drivers/misc/cxl/cxl.h
··· 836 836 { 837 837 if ((pvr_version_is(PVR_POWER8E)) || 838 838 (pvr_version_is(PVR_POWER8NVL)) || 839 - (pvr_version_is(PVR_POWER8))) 839 + (pvr_version_is(PVR_POWER8)) || 840 + (pvr_version_is(PVR_HX_C2000))) 840 841 return true; 841 842 return false; 842 843 }
+1 -1
drivers/misc/ocxl/afu_irq.c
··· 57 57 58 58 static irqreturn_t afu_irq_handler(int virq, void *data) 59 59 { 60 - struct afu_irq *irq = (struct afu_irq *) data; 60 + struct afu_irq *irq = data; 61 61 62 62 trace_ocxl_afu_irq_receive(virq); 63 63
+1 -1
drivers/misc/ocxl/context.c
··· 55 55 */ 56 56 static void xsl_fault_error(void *data, u64 addr, u64 dsisr) 57 57 { 58 - struct ocxl_context *ctx = (struct ocxl_context *) data; 58 + struct ocxl_context *ctx = data; 59 59 60 60 mutex_lock(&ctx->xsl_error_lock); 61 61 ctx->xsl_error.addr = addr;
+7 -7
drivers/misc/ocxl/link.c
··· 188 188 189 189 static irqreturn_t xsl_fault_handler(int irq, void *data) 190 190 { 191 - struct ocxl_link *link = (struct ocxl_link *) data; 191 + struct ocxl_link *link = data; 192 192 struct spa *spa = link->spa; 193 193 u64 dsisr, dar, pe_handle; 194 194 struct pe_data *pe_data; ··· 483 483 484 484 void ocxl_link_release(struct pci_dev *dev, void *link_handle) 485 485 { 486 - struct ocxl_link *link = (struct ocxl_link *) link_handle; 486 + struct ocxl_link *link = link_handle; 487 487 488 488 mutex_lock(&links_list_lock); 489 489 kref_put(&link->ref, release_xsl); ··· 540 540 void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr), 541 541 void *xsl_err_data) 542 542 { 543 - struct ocxl_link *link = (struct ocxl_link *) link_handle; 543 + struct ocxl_link *link = link_handle; 544 544 struct spa *spa = link->spa; 545 545 struct ocxl_process_element *pe; 546 546 int pe_handle, rc = 0; ··· 630 630 631 631 int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid) 632 632 { 633 - struct ocxl_link *link = (struct ocxl_link *) link_handle; 633 + struct ocxl_link *link = link_handle; 634 634 struct spa *spa = link->spa; 635 635 struct ocxl_process_element *pe; 636 636 int pe_handle, rc; ··· 666 666 667 667 int ocxl_link_remove_pe(void *link_handle, int pasid) 668 668 { 669 - struct ocxl_link *link = (struct ocxl_link *) link_handle; 669 + struct ocxl_link *link = link_handle; 670 670 struct spa *spa = link->spa; 671 671 struct ocxl_process_element *pe; 672 672 struct pe_data *pe_data; ··· 752 752 753 753 int ocxl_link_irq_alloc(void *link_handle, int *hw_irq) 754 754 { 755 - struct ocxl_link *link = (struct ocxl_link *) link_handle; 755 + struct ocxl_link *link = link_handle; 756 756 int irq; 757 757 758 758 if (atomic_dec_if_positive(&link->irq_available) < 0) ··· 771 771 772 772 void ocxl_link_free_irq(void *link_handle, int hw_irq) 773 773 { 774 - struct ocxl_link *link = (struct ocxl_link *) link_handle; 774 + struct ocxl_link *link = link_handle; 775 775 776 776 xive_native_free_irq(hw_irq); 777 777 atomic_inc(&link->irq_available);
+1 -1
drivers/misc/ocxl/main.c
··· 7 7 8 8 static int __init init_ocxl(void) 9 9 { 10 - int rc = 0; 10 + int rc; 11 11 12 12 if (!tlbie_capable) 13 13 return -EINVAL;
-6
drivers/net/ethernet/toshiba/ps3_gelic_net.h
··· 346 346 return port->priv; 347 347 } 348 348 349 - #ifdef CONFIG_PPC_EARLY_DEBUG_PS3GELIC 350 - void udbg_shutdown_ps3gelic(void); 351 - #else 352 - static inline void udbg_shutdown_ps3gelic(void) {} 353 - #endif 354 - 355 349 int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask); 356 350 /* shared netdev ops */ 357 351 void gelic_card_up(struct gelic_card *card);
+2
tools/testing/selftests/powerpc/Makefile
··· 32 32 vphn \ 33 33 math \ 34 34 papr_attributes \ 35 + papr_vpd \ 36 + papr_sysparm \ 35 37 ptrace \ 36 38 security \ 37 39 mce
+25
tools/testing/selftests/powerpc/math/fpu.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + /* 3 + * Copyright 2023, Michael Ellerman, IBM Corporation. 4 + */ 5 + 6 + #ifndef _SELFTESTS_POWERPC_FPU_H 7 + #define _SELFTESTS_POWERPC_FPU_H 8 + 9 + static inline void randomise_darray(double *darray, int num) 10 + { 11 + long val; 12 + 13 + for (int i = 0; i < num; i++) { 14 + val = random(); 15 + if (val & 1) 16 + val *= -1; 17 + 18 + if (val & 2) 19 + darray[i] = 1.0 / val; 20 + else 21 + darray[i] = val * val; 22 + } 23 + } 24 + 25 + #endif /* _SELFTESTS_POWERPC_FPU_H */
+41 -7
tools/testing/selftests/powerpc/math/fpu_asm.S
··· 66 66 li r3,0 # Success!!! 67 67 1: blr 68 68 69 + 70 + // int check_all_fprs(double darray[32]) 71 + FUNC_START(check_all_fprs) 72 + PUSH_BASIC_STACK(8) 73 + mr r4, r3 // r4 = darray 74 + li r3, 1 // prepare for failure 75 + 76 + stfd f31, STACK_FRAME_LOCAL(0, 0)(sp) // backup f31 77 + 78 + // Check regs f0-f30, using f31 as scratch 79 + .set i, 0 80 + .rept 31 81 + lfd f31, (8 * i)(r4) // load expected value 82 + fcmpu cr0, i, f31 // compare 83 + bne cr0, 1f // bail if mismatch 84 + .set i, i + 1 85 + .endr 86 + 87 + lfd f31, STACK_FRAME_LOCAL(0, 0)(sp) // reload f31 88 + stfd f30, STACK_FRAME_LOCAL(0, 0)(sp) // backup f30 89 + 90 + lfd f30, (8 * 31)(r4) // load expected value of f31 91 + fcmpu cr0, f30, f31 // compare 92 + bne cr0, 1f // bail if mismatch 93 + 94 + lfd f30, STACK_FRAME_LOCAL(0, 0)(sp) // reload f30 95 + 96 + // Success 97 + li r3, 0 98 + 99 + 1: POP_BASIC_STACK(8) 100 + blr 101 + FUNC_END(check_all_fprs) 102 + 69 103 FUNC_START(test_fpu) 70 104 # r3 holds pointer to where to put the result of fork 71 105 # r4 holds pointer to the pid ··· 109 75 std r3,STACK_FRAME_PARAM(0)(sp) # Address of darray 110 76 std r4,STACK_FRAME_PARAM(1)(sp) # Address of pid 111 77 112 - bl load_fpu 113 - nop 78 + // Load FPRs with expected values 79 + OP_REGS lfd, 8, 0, 31, r3 80 + 114 81 li r0,__NR_fork 115 82 sc 116 83 ··· 120 85 std r3,0(r9) 121 86 122 87 ld r3,STACK_FRAME_PARAM(0)(sp) 123 - bl check_fpu 88 + bl check_all_fprs 124 89 nop 125 90 126 91 POP_FPU(256) ··· 139 104 std r4,STACK_FRAME_PARAM(1)(sp) # int *threads_starting 140 105 std r5,STACK_FRAME_PARAM(2)(sp) # int *running 141 106 142 - bl load_fpu 143 - nop 107 + // Load FPRs with expected values 108 + OP_REGS lfd, 8, 0, 31, r3 144 109 145 110 sync 146 111 # Atomic DEC ··· 151 116 bne- 1b 152 117 153 118 2: ld r3,STACK_FRAME_PARAM(0)(sp) 154 - bl check_fpu 155 - nop 119 + bl check_all_fprs 156 120 cmpdi r3,0 157 121 bne 3f 158 122 ld r4,STACK_FRAME_PARAM(2)(sp)
+13 -17
tools/testing/selftests/powerpc/math/fpu_preempt.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-or-later 2 2 /* 3 3 * Copyright 2015, Cyril Bur, IBM Corp. 4 + * Copyright 2023, Michael Ellerman, IBM Corp. 4 5 * 5 6 * This test attempts to see if the FPU registers change across preemption. 6 - * Two things should be noted here a) The check_fpu function in asm only checks 7 - * the non volatile registers as it is reused from the syscall test b) There is 8 - * no way to be sure preemption happened so this test just uses many threads 9 - * and a long wait. As such, a successful test doesn't mean much but a failure 10 - * is bad. 7 + * There is no way to be sure preemption happened so this test just uses many 8 + * threads and a long wait. As such, a successful test doesn't mean much but 9 + * a failure is bad. 11 10 */ 12 11 13 12 #include <stdio.h> ··· 19 20 #include <pthread.h> 20 21 21 22 #include "utils.h" 23 + #include "fpu.h" 22 24 23 25 /* Time to wait for workers to get preempted (seconds) */ 24 - #define PREEMPT_TIME 20 26 + #define PREEMPT_TIME 60 25 27 /* 26 28 * Factor by which to multiply number of online CPUs for total number of 27 29 * worker threads ··· 30 30 #define THREAD_FACTOR 8 31 31 32 32 33 - __thread double darray[] = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 34 - 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 35 - 2.1}; 33 + __thread double darray[32]; 36 34 37 35 int threads_starting; 38 36 int running; 39 37 40 - extern void preempt_fpu(double *darray, int *threads_starting, int *running); 38 + extern int preempt_fpu(double *darray, int *threads_starting, int *running); 41 39 42 40 void *preempt_fpu_c(void *p) 43 41 { 44 - int i; 42 + long rc; 43 + 45 44 srand(pthread_self()); 46 - for (i = 0; i < 21; i++) 47 - darray[i] = rand(); 45 + randomise_darray(darray, ARRAY_SIZE(darray)); 46 + rc = preempt_fpu(darray, &threads_starting, &running); 48 47 49 - /* Test failed if it ever returns */ 50 - preempt_fpu(darray, &threads_starting, &running); 51 - 52 - return p; 48 + return (void *)rc; 53 49 } 54 50 55 51 int test_preempt_fpu(void)
+5 -3
tools/testing/selftests/powerpc/math/fpu_syscall.c
··· 14 14 #include <stdlib.h> 15 15 16 16 #include "utils.h" 17 + #include "fpu.h" 17 18 18 19 extern int test_fpu(double *darray, pid_t *pid); 19 20 20 - double darray[] = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 21 - 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 22 - 2.1}; 21 + double darray[32]; 23 22 24 23 int syscall_fpu(void) 25 24 { ··· 26 27 int i; 27 28 int ret; 28 29 int child_ret; 30 + 31 + randomise_darray(darray, ARRAY_SIZE(darray)); 32 + 29 33 for (i = 0; i < 1000; i++) { 30 34 /* test_fpu will fork() */ 31 35 ret = test_fpu(darray, &fork_pid);
+6 -4
tools/testing/selftests/powerpc/math/vmx_preempt.c
··· 37 37 int threads_starting; 38 38 int running; 39 39 40 - extern void preempt_vmx(vector int *varray, int *threads_starting, int *running); 40 + extern int preempt_vmx(vector int *varray, int *threads_starting, int *running); 41 41 42 42 void *preempt_vmx_c(void *p) 43 43 { 44 44 int i, j; 45 + long rc; 46 + 45 47 srand(pthread_self()); 46 48 for (i = 0; i < 12; i++) 47 49 for (j = 0; j < 4; j++) 48 50 varray[i][j] = rand(); 49 51 50 - /* Test fails if it ever returns */ 51 - preempt_vmx(varray, &threads_starting, &running); 52 - return p; 52 + rc = preempt_vmx(varray, &threads_starting, &running); 53 + 54 + return (void *)rc; 53 55 } 54 56 55 57 int test_preempt_vmx(void)
+1
tools/testing/selftests/powerpc/papr_sysparm/.gitignore
··· 1 + /papr_sysparm
+12
tools/testing/selftests/powerpc/papr_sysparm/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + noarg: 3 + $(MAKE) -C ../ 4 + 5 + TEST_GEN_PROGS := papr_sysparm 6 + 7 + top_srcdir = ../../../../.. 8 + include ../../lib.mk 9 + 10 + $(TEST_GEN_PROGS): ../harness.c ../utils.c 11 + 12 + $(OUTPUT)/papr_sysparm: CFLAGS += $(KHDR_INCLUDES)
+196
tools/testing/selftests/powerpc/papr_sysparm/papr_sysparm.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + #include <errno.h> 3 + #include <fcntl.h> 4 + #include <stdlib.h> 5 + #include <sys/ioctl.h> 6 + #include <unistd.h> 7 + #include <asm/papr-sysparm.h> 8 + 9 + #include "utils.h" 10 + 11 + #define DEVPATH "/dev/papr-sysparm" 12 + 13 + static int open_close(void) 14 + { 15 + const int devfd = open(DEVPATH, O_RDONLY); 16 + 17 + SKIP_IF_MSG(devfd < 0 && errno == ENOENT, 18 + DEVPATH " not present"); 19 + 20 + FAIL_IF(devfd < 0); 21 + FAIL_IF(close(devfd) != 0); 22 + 23 + return 0; 24 + } 25 + 26 + static int get_splpar(void) 27 + { 28 + struct papr_sysparm_io_block sp = { 29 + .parameter = 20, // SPLPAR characteristics 30 + }; 31 + const int devfd = open(DEVPATH, O_RDONLY); 32 + 33 + SKIP_IF_MSG(devfd < 0 && errno == ENOENT, 34 + DEVPATH " not present"); 35 + 36 + FAIL_IF(devfd < 0); 37 + FAIL_IF(ioctl(devfd, PAPR_SYSPARM_IOC_GET, &sp) != 0); 38 + FAIL_IF(sp.length == 0); 39 + FAIL_IF(sp.length > sizeof(sp.data)); 40 + FAIL_IF(close(devfd) != 0); 41 + 42 + return 0; 43 + } 44 + 45 + static int get_bad_parameter(void) 46 + { 47 + struct papr_sysparm_io_block sp = { 48 + .parameter = UINT32_MAX, // there are only ~60 specified parameters 49 + }; 50 + const int devfd = open(DEVPATH, O_RDONLY); 51 + 52 + SKIP_IF_MSG(devfd < 0 && errno == ENOENT, 53 + DEVPATH " not present"); 54 + 55 + FAIL_IF(devfd < 0); 56 + 57 + // Ensure expected error 58 + FAIL_IF(ioctl(devfd, PAPR_SYSPARM_IOC_GET, &sp) != -1); 59 + FAIL_IF(errno != EOPNOTSUPP); 60 + 61 + // Ensure the buffer is unchanged 62 + FAIL_IF(sp.length != 0); 63 + for (size_t i = 0; i < ARRAY_SIZE(sp.data); ++i) 64 + FAIL_IF(sp.data[i] != 0); 65 + 66 + FAIL_IF(close(devfd) != 0); 67 + 68 + return 0; 69 + } 70 + 71 + static int check_efault_common(unsigned long cmd) 72 + { 73 + const int devfd = open(DEVPATH, O_RDWR); 74 + 75 + SKIP_IF_MSG(devfd < 0 && errno == ENOENT, 76 + DEVPATH " not present"); 77 + 78 + FAIL_IF(devfd < 0); 79 + 80 + // Ensure expected error 81 + FAIL_IF(ioctl(devfd, cmd, NULL) != -1); 82 + FAIL_IF(errno != EFAULT); 83 + 84 + FAIL_IF(close(devfd) != 0); 85 + 86 + return 0; 87 + } 88 + 89 + static int check_efault_get(void) 90 + { 91 + return check_efault_common(PAPR_SYSPARM_IOC_GET); 92 + } 93 + 94 + static int check_efault_set(void) 95 + { 96 + return check_efault_common(PAPR_SYSPARM_IOC_SET); 97 + } 98 + 99 + static int set_hmc0(void) 100 + { 101 + struct papr_sysparm_io_block sp = { 102 + .parameter = 0, // HMC0, not a settable parameter 103 + }; 104 + const int devfd = open(DEVPATH, O_RDWR); 105 + 106 + SKIP_IF_MSG(devfd < 0 && errno == ENOENT, 107 + DEVPATH " not present"); 108 + 109 + FAIL_IF(devfd < 0); 110 + 111 + // Ensure expected error 112 + FAIL_IF(ioctl(devfd, PAPR_SYSPARM_IOC_SET, &sp) != -1); 113 + SKIP_IF_MSG(errno == EOPNOTSUPP, "operation not supported"); 114 + FAIL_IF(errno != EPERM); 115 + 116 + FAIL_IF(close(devfd) != 0); 117 + 118 + return 0; 119 + } 120 + 121 + static int set_with_ro_fd(void) 122 + { 123 + struct papr_sysparm_io_block sp = { 124 + .parameter = 0, // HMC0, not a settable parameter. 125 + }; 126 + const int devfd = open(DEVPATH, O_RDONLY); 127 + 128 + SKIP_IF_MSG(devfd < 0 && errno == ENOENT, 129 + DEVPATH " not present"); 130 + 131 + FAIL_IF(devfd < 0); 132 + 133 + // Ensure expected error 134 + FAIL_IF(ioctl(devfd, PAPR_SYSPARM_IOC_SET, &sp) != -1); 135 + SKIP_IF_MSG(errno == EOPNOTSUPP, "operation not supported"); 136 + 137 + // HMC0 isn't a settable parameter and we would normally 138 + // expect to get EPERM on attempts to modify it. However, when 139 + // the file is open read-only, we expect the driver to prevent 140 + // the attempt with a distinct error. 141 + FAIL_IF(errno != EBADF); 142 + 143 + FAIL_IF(close(devfd) != 0); 144 + 145 + return 0; 146 + } 147 + 148 + struct sysparm_test { 149 + int (*function)(void); 150 + const char *description; 151 + }; 152 + 153 + static const struct sysparm_test sysparm_tests[] = { 154 + { 155 + .function = open_close, 156 + .description = "open and close " DEVPATH " without issuing commands", 157 + }, 158 + { 159 + .function = get_splpar, 160 + .description = "retrieve SPLPAR characteristics", 161 + }, 162 + { 163 + .function = get_bad_parameter, 164 + .description = "verify EOPNOTSUPP for known-bad parameter", 165 + }, 166 + { 167 + .function = check_efault_get, 168 + .description = "PAPR_SYSPARM_IOC_GET returns EFAULT on bad address", 169 + }, 170 + { 171 + .function = check_efault_set, 172 + .description = "PAPR_SYSPARM_IOC_SET returns EFAULT on bad address", 173 + }, 174 + { 175 + .function = set_hmc0, 176 + .description = "ensure EPERM on attempt to update HMC0", 177 + }, 178 + { 179 + .function = set_with_ro_fd, 180 + .description = "PAPR_IOC_SYSPARM_SET returns EACCES on read-only fd", 181 + }, 182 + }; 183 + 184 + int main(void) 185 + { 186 + size_t fails = 0; 187 + 188 + for (size_t i = 0; i < ARRAY_SIZE(sysparm_tests); ++i) { 189 + const struct sysparm_test *t = &sysparm_tests[i]; 190 + 191 + if (test_harness(t->function, t->description)) 192 + ++fails; 193 + } 194 + 195 + return fails == 0 ? EXIT_SUCCESS : EXIT_FAILURE; 196 + }
+1
tools/testing/selftests/powerpc/papr_vpd/.gitignore
··· 1 + /papr_vpd
+12
tools/testing/selftests/powerpc/papr_vpd/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + noarg: 3 + $(MAKE) -C ../ 4 + 5 + TEST_GEN_PROGS := papr_vpd 6 + 7 + top_srcdir = ../../../../.. 8 + include ../../lib.mk 9 + 10 + $(TEST_GEN_PROGS): ../harness.c ../utils.c 11 + 12 + $(OUTPUT)/papr_vpd: CFLAGS += $(KHDR_INCLUDES)
+352
tools/testing/selftests/powerpc/papr_vpd/papr_vpd.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + #define _GNU_SOURCE 3 + #include <errno.h> 4 + #include <fcntl.h> 5 + #include <stdlib.h> 6 + #include <string.h> 7 + #include <sys/ioctl.h> 8 + #include <unistd.h> 9 + 10 + #include <asm/papr-vpd.h> 11 + 12 + #include "utils.h" 13 + 14 + #define DEVPATH "/dev/papr-vpd" 15 + 16 + static int dev_papr_vpd_open_close(void) 17 + { 18 + const int devfd = open(DEVPATH, O_RDONLY); 19 + 20 + SKIP_IF_MSG(devfd < 0 && errno == ENOENT, 21 + DEVPATH " not present"); 22 + 23 + FAIL_IF(devfd < 0); 24 + FAIL_IF(close(devfd) != 0); 25 + 26 + return 0; 27 + } 28 + 29 + static int dev_papr_vpd_get_handle_all(void) 30 + { 31 + const int devfd = open(DEVPATH, O_RDONLY); 32 + struct papr_location_code lc = { .str = "", }; 33 + off_t size; 34 + int fd; 35 + 36 + SKIP_IF_MSG(devfd < 0 && errno == ENOENT, 37 + DEVPATH " not present"); 38 + 39 + FAIL_IF(devfd < 0); 40 + 41 + errno = 0; 42 + fd = ioctl(devfd, PAPR_VPD_IOC_CREATE_HANDLE, &lc); 43 + FAIL_IF(errno != 0); 44 + FAIL_IF(fd < 0); 45 + 46 + FAIL_IF(close(devfd) != 0); 47 + 48 + size = lseek(fd, 0, SEEK_END); 49 + FAIL_IF(size <= 0); 50 + 51 + void *buf = malloc((size_t)size); 52 + FAIL_IF(!buf); 53 + 54 + ssize_t consumed = pread(fd, buf, size, 0); 55 + FAIL_IF(consumed != size); 56 + 57 + /* Ensure EOF */ 58 + FAIL_IF(read(fd, buf, size) != 0); 59 + FAIL_IF(close(fd)); 60 + 61 + /* Verify that the buffer looks like VPD */ 62 + static const char needle[] = "System VPD"; 63 + FAIL_IF(!memmem(buf, size, needle, strlen(needle))); 64 + 65 + return 0; 66 + } 67 + 68 + static int dev_papr_vpd_get_handle_byte_at_a_time(void) 69 + { 70 + const int devfd = open(DEVPATH, O_RDONLY); 71 + struct papr_location_code lc = { .str = "", }; 72 + int fd; 73 + 74 + SKIP_IF_MSG(devfd < 0 && errno == ENOENT, 75 + DEVPATH " not present"); 76 + 77 + FAIL_IF(devfd < 0); 78 + 79 + errno = 0; 80 + fd = ioctl(devfd, PAPR_VPD_IOC_CREATE_HANDLE, &lc); 81 + FAIL_IF(errno != 0); 82 + FAIL_IF(fd < 0); 83 + 84 + FAIL_IF(close(devfd) != 0); 85 + 86 + size_t consumed = 0; 87 + while (1) { 88 + ssize_t res; 89 + char c; 90 + 91 + errno = 0; 92 + res = read(fd, &c, sizeof(c)); 93 + FAIL_IF(res > sizeof(c)); 94 + FAIL_IF(res < 0); 95 + FAIL_IF(errno != 0); 96 + consumed += res; 97 + if (res == 0) 98 + break; 99 + } 100 + 101 + FAIL_IF(consumed != lseek(fd, 0, SEEK_END)); 102 + 103 + FAIL_IF(close(fd)); 104 + 105 + return 0; 106 + } 107 + 108 + 109 + static int dev_papr_vpd_unterm_loc_code(void) 110 + { 111 + const int devfd = open(DEVPATH, O_RDONLY); 112 + struct papr_location_code lc = {}; 113 + int fd; 114 + 115 + SKIP_IF_MSG(devfd < 0 && errno == ENOENT, 116 + DEVPATH " not present"); 117 + 118 + FAIL_IF(devfd < 0); 119 + 120 + /* 121 + * Place a non-null byte in every element of loc_code; the 122 + * driver should reject this input. 123 + */ 124 + memset(lc.str, 'x', ARRAY_SIZE(lc.str)); 125 + 126 + errno = 0; 127 + fd = ioctl(devfd, PAPR_VPD_IOC_CREATE_HANDLE, &lc); 128 + FAIL_IF(fd != -1); 129 + FAIL_IF(errno != EINVAL); 130 + 131 + FAIL_IF(close(devfd) != 0); 132 + return 0; 133 + } 134 + 135 + static int dev_papr_vpd_null_handle(void) 136 + { 137 + const int devfd = open(DEVPATH, O_RDONLY); 138 + int rc; 139 + 140 + SKIP_IF_MSG(devfd < 0 && errno == ENOENT, 141 + DEVPATH " not present"); 142 + 143 + FAIL_IF(devfd < 0); 144 + 145 + errno = 0; 146 + rc = ioctl(devfd, PAPR_VPD_IOC_CREATE_HANDLE, NULL); 147 + FAIL_IF(rc != -1); 148 + FAIL_IF(errno != EFAULT); 149 + 150 + FAIL_IF(close(devfd) != 0); 151 + return 0; 152 + } 153 + 154 + static int papr_vpd_close_handle_without_reading(void) 155 + { 156 + const int devfd = open(DEVPATH, O_RDONLY); 157 + struct papr_location_code lc; 158 + int fd; 159 + 160 + SKIP_IF_MSG(devfd < 0 && errno == ENOENT, 161 + DEVPATH " not present"); 162 + 163 + FAIL_IF(devfd < 0); 164 + 165 + errno = 0; 166 + fd = ioctl(devfd, PAPR_VPD_IOC_CREATE_HANDLE, &lc); 167 + FAIL_IF(errno != 0); 168 + FAIL_IF(fd < 0); 169 + 170 + /* close the handle without reading it */ 171 + FAIL_IF(close(fd) != 0); 172 + 173 + FAIL_IF(close(devfd) != 0); 174 + return 0; 175 + } 176 + 177 + static int papr_vpd_reread(void) 178 + { 179 + const int devfd = open(DEVPATH, O_RDONLY); 180 + struct papr_location_code lc = { .str = "", }; 181 + int fd; 182 + 183 + SKIP_IF_MSG(devfd < 0 && errno == ENOENT, 184 + DEVPATH " not present"); 185 + 186 + FAIL_IF(devfd < 0); 187 + 188 + errno = 0; 189 + fd = ioctl(devfd, PAPR_VPD_IOC_CREATE_HANDLE, &lc); 190 + FAIL_IF(errno != 0); 191 + FAIL_IF(fd < 0); 192 + 193 + FAIL_IF(close(devfd) != 0); 194 + 195 + const off_t size = lseek(fd, 0, SEEK_END); 196 + FAIL_IF(size <= 0); 197 + 198 + char *bufs[2]; 199 + 200 + for (size_t i = 0; i < ARRAY_SIZE(bufs); ++i) { 201 + bufs[i] = malloc(size); 202 + FAIL_IF(!bufs[i]); 203 + ssize_t consumed = pread(fd, bufs[i], size, 0); 204 + FAIL_IF(consumed != size); 205 + } 206 + 207 + FAIL_IF(memcmp(bufs[0], bufs[1], size)); 208 + 209 + FAIL_IF(close(fd) != 0); 210 + 211 + return 0; 212 + } 213 + 214 + static int get_system_loc_code(struct papr_location_code *lc) 215 + { 216 + static const char system_id_path[] = "/sys/firmware/devicetree/base/system-id"; 217 + static const char model_path[] = "/sys/firmware/devicetree/base/model"; 218 + char *system_id; 219 + char *model; 220 + int err = -1; 221 + 222 + if (read_file_alloc(model_path, &model, NULL)) 223 + return err; 224 + 225 + if (read_file_alloc(system_id_path, &system_id, NULL)) 226 + goto free_model; 227 + 228 + char *mtm; 229 + int sscanf_ret = sscanf(model, "IBM,%ms", &mtm); 230 + if (sscanf_ret != 1) 231 + goto free_system_id; 232 + 233 + char *plant_and_seq; 234 + if (sscanf(system_id, "IBM,%*c%*c%ms", &plant_and_seq) != 1) 235 + goto free_mtm; 236 + /* 237 + * Replace - with . to build location code. 238 + */ 239 + char *sep = strchr(mtm, '-'); 240 + if (!sep) 241 + goto free_mtm; 242 + else 243 + *sep = '.'; 244 + 245 + snprintf(lc->str, sizeof(lc->str), 246 + "U%s.%s", mtm, plant_and_seq); 247 + err = 0; 248 + 249 + free(plant_and_seq); 250 + free_mtm: 251 + free(mtm); 252 + free_system_id: 253 + free(system_id); 254 + free_model: 255 + free(model); 256 + return err; 257 + } 258 + 259 + static int papr_vpd_system_loc_code(void) 260 + { 261 + struct papr_location_code lc; 262 + const int devfd = open(DEVPATH, O_RDONLY); 263 + off_t size; 264 + int fd; 265 + 266 + SKIP_IF_MSG(get_system_loc_code(&lc), 267 + "Cannot determine system location code"); 268 + SKIP_IF_MSG(devfd < 0 && errno == ENOENT, 269 + DEVPATH " not present"); 270 + 271 + FAIL_IF(devfd < 0); 272 + 273 + errno = 0; 274 + fd = ioctl(devfd, PAPR_VPD_IOC_CREATE_HANDLE, &lc); 275 + FAIL_IF(errno != 0); 276 + FAIL_IF(fd < 0); 277 + 278 + FAIL_IF(close(devfd) != 0); 279 + 280 + size = lseek(fd, 0, SEEK_END); 281 + FAIL_IF(size <= 0); 282 + 283 + void *buf = malloc((size_t)size); 284 + FAIL_IF(!buf); 285 + 286 + ssize_t consumed = pread(fd, buf, size, 0); 287 + FAIL_IF(consumed != size); 288 + 289 + /* Ensure EOF */ 290 + FAIL_IF(read(fd, buf, size) != 0); 291 + FAIL_IF(close(fd)); 292 + 293 + /* Verify that the buffer looks like VPD */ 294 + static const char needle[] = "System VPD"; 295 + FAIL_IF(!memmem(buf, size, needle, strlen(needle))); 296 + 297 + return 0; 298 + } 299 + 300 + struct vpd_test { 301 + int (*function)(void); 302 + const char *description; 303 + }; 304 + 305 + static const struct vpd_test vpd_tests[] = { 306 + { 307 + .function = dev_papr_vpd_open_close, 308 + .description = "open/close " DEVPATH, 309 + }, 310 + { 311 + .function = dev_papr_vpd_unterm_loc_code, 312 + .description = "ensure EINVAL on unterminated location code", 313 + }, 314 + { 315 + .function = dev_papr_vpd_null_handle, 316 + .description = "ensure EFAULT on bad handle addr", 317 + }, 318 + { 319 + .function = dev_papr_vpd_get_handle_all, 320 + .description = "get handle for all VPD" 321 + }, 322 + { 323 + .function = papr_vpd_close_handle_without_reading, 324 + .description = "close handle without consuming VPD" 325 + }, 326 + { 327 + .function = dev_papr_vpd_get_handle_byte_at_a_time, 328 + .description = "read all VPD one byte at a time" 329 + }, 330 + { 331 + .function = papr_vpd_reread, 332 + .description = "ensure re-read yields same results" 333 + }, 334 + { 335 + .function = papr_vpd_system_loc_code, 336 + .description = "get handle for system VPD" 337 + }, 338 + }; 339 + 340 + int main(void) 341 + { 342 + size_t fails = 0; 343 + 344 + for (size_t i = 0; i < ARRAY_SIZE(vpd_tests); ++i) { 345 + const struct vpd_test *t = &vpd_tests[i]; 346 + 347 + if (test_harness(t->function, t->description)) 348 + ++fails; 349 + } 350 + 351 + return fails == 0 ? EXIT_SUCCESS : EXIT_FAILURE; 352 + }