Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branches 'for-next/kpti', 'for-next/missing-proto-warn', 'for-next/iss2-decode', 'for-next/kselftest', 'for-next/misc', 'for-next/feat_mops', 'for-next/module-alloc', 'for-next/sysreg', 'for-next/cpucap', 'for-next/acpi', 'for-next/kdump', 'for-next/acpi-doc', 'for-next/doc' and 'for-next/tpidr2-fix', remote-tracking branch 'arm64/for-next/perf' into for-next/core

* arm64/for-next/perf:
docs: perf: Fix warning from 'make htmldocs' in hisi-pmu.rst
docs: perf: Add new description for HiSilicon UC PMU
drivers/perf: hisi: Add support for HiSilicon UC PMU driver
drivers/perf: hisi: Add support for HiSilicon H60PA and PAv3 PMU driver
perf: arm_cspmu: Add missing MODULE_DEVICE_TABLE
perf/arm-cmn: Add sysfs identifier
perf/arm-cmn: Revamp model detection
perf/arm_dmc620: Add cpumask
dt-bindings: perf: fsl-imx-ddr: Add i.MX93 compatible
drivers/perf: imx_ddr: Add support for NXP i.MX9 SoC DDRC PMU driver
perf/arm_cspmu: Decouple APMT dependency
perf/arm_cspmu: Clean up ACPI dependency
ACPI/APMT: Don't register invalid resource
perf/arm_cspmu: Fix event attribute type
perf: arm_cspmu: Set irq affinitiy only if overflow interrupt is used
drivers/perf: hisi: Don't migrate perf to the CPU going to teardown
drivers/perf: apple_m1: Force 63bit counters for M2 CPUs
perf/arm-cmn: Fix DTC reset
perf: qcom_l2_pmu: Make l2_cache_pmu_probe_cluster() more robust
perf/arm-cci: Slightly optimize cci_pmu_sync_counters()

* for-next/kpti:
: Simplify KPTI trampoline exit code
arm64: entry: Simplify tramp_alias macro and tramp_exit routine
arm64: entry: Preserve/restore X29 even for compat tasks

* for-next/missing-proto-warn:
: Address -Wmissing-prototype warnings
arm64: add alt_cb_patch_nops prototype
arm64: move early_brk64 prototype to header
arm64: signal: include asm/exception.h
arm64: kaslr: add kaslr_early_init() declaration
arm64: flush: include linux/libnvdimm.h
arm64: module-plts: inline linux/moduleloader.h
arm64: hide unused is_valid_bugaddr()
arm64: efi: add efi_handle_corrupted_x18 prototype
arm64: cpuidle: fix #ifdef for acpi functions
arm64: kvm: add prototypes for functions called in asm
arm64: spectre: provide prototypes for internal functions
arm64: move cpu_suspend_set_dbg_restorer() prototype to header
arm64: avoid prototype warnings for syscalls
arm64: add scs_patch_vmlinux prototype
arm64: xor-neon: mark xor_arm64_neon_*() static

* for-next/iss2-decode:
: Add decode of ISS2 to data abort reports
arm64/esr: Add decode of ISS2 to data abort reporting
arm64/esr: Use GENMASK() for the ISS mask

* for-next/kselftest:
: Various arm64 kselftest improvements
kselftest/arm64: Log signal code and address for unexpected signals
kselftest/arm64: Add a smoke test for ptracing hardware break/watch points

* for-next/misc:
: Miscellaneous patches
arm64: alternatives: make clean_dcache_range_nopatch() noinstr-safe
arm64: hibernate: remove WARN_ON in save_processor_state
arm64/fpsimd: Exit streaming mode when flushing tasks
arm64: mm: fix VA-range sanity check
arm64/mm: remove now-superfluous ISBs from TTBR writes
arm64: consolidate rox page protection logic
arm64: set __exception_irq_entry with __irq_entry as a default
arm64: syscall: unmask DAIF for tracing status
arm64: lockdep: enable checks for held locks when returning to userspace
arm64/cpucaps: increase string width to properly format cpucaps.h
arm64/cpufeature: Use helper for ECV CNTPOFF cpufeature

* for-next/feat_mops:
: Support for ARMv8.8 memcpy instructions in userspace
kselftest/arm64: add MOPS to hwcap test
arm64: mops: allow disabling MOPS from the kernel command line
arm64: mops: detect and enable FEAT_MOPS
arm64: mops: handle single stepping after MOPS exception
arm64: mops: handle MOPS exceptions
KVM: arm64: hide MOPS from guests
arm64: mops: don't disable host MOPS instructions from EL2
arm64: mops: document boot requirements for MOPS
KVM: arm64: switch HCRX_EL2 between host and guest
arm64: cpufeature: detect FEAT_HCX
KVM: arm64: initialize HCRX_EL2

* for-next/module-alloc:
: Make the arm64 module allocation code more robust (clean-up, VA range expansion)
arm64: module: rework module VA range selection
arm64: module: mandate MODULE_PLTS
arm64: module: move module randomization to module.c
arm64: kaslr: split kaslr/module initialization
arm64: kasan: remove !KASAN_VMALLOC remnants
arm64: module: remove old !KASAN_VMALLOC logic

* for-next/sysreg: (21 commits)
: More sysreg conversions to automatic generation
arm64/sysreg: Convert TRBIDR_EL1 register to automatic generation
arm64/sysreg: Convert TRBTRG_EL1 register to automatic generation
arm64/sysreg: Convert TRBMAR_EL1 register to automatic generation
arm64/sysreg: Convert TRBSR_EL1 register to automatic generation
arm64/sysreg: Convert TRBBASER_EL1 register to automatic generation
arm64/sysreg: Convert TRBPTR_EL1 register to automatic generation
arm64/sysreg: Convert TRBLIMITR_EL1 register to automatic generation
arm64/sysreg: Rename TRBIDR_EL1 fields per auto-gen tools format
arm64/sysreg: Rename TRBTRG_EL1 fields per auto-gen tools format
arm64/sysreg: Rename TRBMAR_EL1 fields per auto-gen tools format
arm64/sysreg: Rename TRBSR_EL1 fields per auto-gen tools format
arm64/sysreg: Rename TRBBASER_EL1 fields per auto-gen tools format
arm64/sysreg: Rename TRBPTR_EL1 fields per auto-gen tools format
arm64/sysreg: Rename TRBLIMITR_EL1 fields per auto-gen tools format
arm64/sysreg: Convert OSECCR_EL1 to automatic generation
arm64/sysreg: Convert OSDTRTX_EL1 to automatic generation
arm64/sysreg: Convert OSDTRRX_EL1 to automatic generation
arm64/sysreg: Convert OSLAR_EL1 to automatic generation
arm64/sysreg: Standardise naming of bitfield constants in OSL[AS]R_EL1
arm64/sysreg: Convert MDSCR_EL1 to automatic register generation
...

* for-next/cpucap:
: arm64 cpucap clean-up
arm64: cpufeature: fold cpus_set_cap() into update_cpu_capabilities()
arm64: cpufeature: use cpucap naming
arm64: alternatives: use cpucap naming
arm64: standardise cpucap bitmap names

* for-next/acpi:
: Various arm64-related ACPI patches
ACPI: bus: Consolidate all arm specific initialisation into acpi_arm_init()

* for-next/kdump:
: Simplify the crashkernel reservation behaviour of crashkernel=X,high on arm64
arm64: add kdump.rst into index.rst
Documentation: add kdump.rst to present crashkernel reservation on arm64
arm64: kdump: simplify the reservation behaviour of crashkernel=,high

* for-next/acpi-doc:
: Update ACPI documentation for Arm systems
Documentation/arm64: Update ACPI tables from BBR
Documentation/arm64: Update references in arm-acpi
Documentation/arm64: Update ARM and arch reference

* for-next/doc:
: arm64 documentation updates
Documentation/arm64: Add ptdump documentation

* for-next/tpidr2-fix:
: Fix the TPIDR2_EL0 register restoring on sigreturn
kselftest/arm64: Add a test case for TPIDR2 restore
arm64/signal: Restore TPIDR2 register rather than memory state

+1282 -585
+3
Documentation/admin-guide/kernel-parameters.txt
··· 429 429 arm64.nosme [ARM64] Unconditionally disable Scalable Matrix 430 430 Extension support 431 431 432 + arm64.nomops [ARM64] Unconditionally disable Memory Copy and Memory 433 + Set instructions support 434 + 432 435 ataflop= [HW,M68k] 433 436 434 437 atarimouse= [HW,MOUSE] Atari Mouse
+76 -5
Documentation/arm64/acpi_object_usage.rst
··· 17 17 18 18 - Recommended: BERT, EINJ, ERST, HEST, PCCT, SSDT 19 19 20 - - Optional: BGRT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT, IBFT, 21 - IORT, MCHI, MPST, MSCT, NFIT, PMTT, RASF, SBST, SLIT, SPMI, SRAT, 22 - STAO, TCPA, TPM2, UEFI, XENV 20 + - Optional: AGDI, BGRT, CEDT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT, 21 + HMAT, IBFT, IORT, MCHI, MPAM, MPST, MSCT, NFIT, PMTT, PPTT, RASF, SBST, 22 + SDEI, SLIT, SPMI, SRAT, STAO, TCPA, TPM2, UEFI, XENV 23 23 24 - - Not supported: BOOT, DBGP, DMAR, ETDT, HPET, IVRS, LPIT, MSDM, OEMx, 25 - PSDT, RSDT, SLIC, WAET, WDAT, WDRT, WPBT 24 + - Not supported: AEST, APMT, BOOT, DBGP, DMAR, ETDT, HPET, IVRS, LPIT, 25 + MSDM, OEMx, PDTT, PSDT, RAS2, RSDT, SLIC, WAET, WDAT, WDRT, WPBT 26 26 27 27 ====== ======================================================================== 28 28 Table Usage for ARMv8 Linux 29 29 ====== ======================================================================== 30 + AEST Signature Reserved (signature == "AEST") 31 + 32 + **Arm Error Source Table** 33 + 34 + This table informs the OS of any error nodes in the system that are 35 + compliant with the Arm RAS architecture. 36 + 37 + AGDI Signature Reserved (signature == "AGDI") 38 + 39 + **Arm Generic diagnostic Dump and Reset Device Interface Table** 40 + 41 + This table describes a non-maskable event, that is used by the platform 42 + firmware, to request the OS to generate a diagnostic dump and reset the device. 43 + 44 + APMT Signature Reserved (signature == "APMT") 45 + 46 + **Arm Performance Monitoring Table** 47 + 48 + This table describes the properties of PMU support implmented by 49 + components in the system. 50 + 30 51 BERT Section 18.3 (signature == "BERT") 31 52 32 53 **Boot Error Record Table** ··· 67 46 68 47 Optional, not currently supported, with no real use-case for an 69 48 ARM server. 49 + 50 + CEDT Signature Reserved (signature == "CEDT") 51 + 52 + **CXL Early Discovery Table** 53 + 54 + This table allows the OS to discover any CXL Host Bridges and the Host 55 + Bridge registers. 70 56 71 57 CPEP Section 5.2.18 (signature == "CPEP") 72 58 ··· 212 184 Must be supplied if RAS support is provided by the platform. It 213 185 is recommended this table be supplied. 214 186 187 + HMAT Section 5.2.28 (signature == "HMAT") 188 + 189 + **Heterogeneous Memory Attribute Table** 190 + 191 + This table describes the memory attributes, such as memory side cache 192 + attributes and bandwidth and latency details, related to Memory Proximity 193 + Domains. The OS uses this information to optimize the system memory 194 + configuration. 195 + 215 196 HPET Signature Reserved (signature == "HPET") 216 197 217 198 **High Precision Event timer Table** ··· 278 241 279 242 Optional, not currently supported. 280 243 244 + MPAM Signature Reserved (signature == "MPAM") 245 + 246 + **Memory Partitioning And Monitoring table** 247 + 248 + This table allows the OS to discover the MPAM controls implemented by 249 + the subsystems. 250 + 281 251 MPST Section 5.2.21 (signature == "MPST") 282 252 283 253 **Memory Power State Table** ··· 325 281 Recommend for use on arm64; use of PCC is recommended when using CPPC 326 282 to control performance and power for platform processors. 327 283 284 + PDTT Section 5.2.29 (signature == "PDTT") 285 + 286 + **Platform Debug Trigger Table** 287 + 288 + This table describes PCC channels used to gather debug logs of 289 + non-architectural features. 290 + 291 + 328 292 PMTT Section 5.2.21.12 (signature == "PMTT") 329 293 330 294 **Platform Memory Topology Table** 331 295 332 296 Optional, not currently supported. 333 297 298 + PPTT Section 5.2.30 (signature == "PPTT") 299 + 300 + **Processor Properties Topology Table** 301 + 302 + This table provides the processor and cache topology. 303 + 334 304 PSDT Section 5.2.11.3 (signature == "PSDT") 335 305 336 306 **Persistent System Description Table** 337 307 338 308 Obsolete table, will not be supported. 309 + 310 + RAS2 Section 5.2.21 (signature == "RAS2") 311 + 312 + **RAS Features 2 table** 313 + 314 + This table provides interfaces for the RAS capabilities implemented in 315 + the platform. 339 316 340 317 RASF Section 5.2.20 (signature == "RASF") 341 318 ··· 382 317 **Smart Battery Subsystem Table** 383 318 384 319 Optional, not currently supported. 320 + 321 + SDEI Signature Reserved (signature == "SDEI") 322 + 323 + **Software Delegated Exception Interface table** 324 + 325 + This table advertises the presence of the SDEI interface. 385 326 386 327 SLIC Signature Reserved (signature == "SLIC") 387 328
+106 -59
Documentation/arm64/arm-acpi.rst
··· 1 - ===================== 2 - ACPI on ARMv8 Servers 3 - ===================== 1 + =================== 2 + ACPI on Arm systems 3 + =================== 4 4 5 - ACPI can be used for ARMv8 general purpose servers designed to follow 6 - the ARM SBSA (Server Base System Architecture) [0] and SBBR (Server 7 - Base Boot Requirements) [1] specifications. Please note that the SBBR 8 - can be retrieved simply by visiting [1], but the SBSA is currently only 9 - available to those with an ARM login due to ARM IP licensing concerns. 5 + ACPI can be used for Armv8 and Armv9 systems designed to follow 6 + the BSA (Arm Base System Architecture) [0] and BBR (Arm 7 + Base Boot Requirements) [1] specifications. Both BSA and BBR are publicly 8 + accessible documents. 9 + Arm Servers, in addition to being BSA compliant, comply with a set 10 + of rules defined in SBSA (Server Base System Architecture) [2]. 10 11 11 - The ARMv8 kernel implements the reduced hardware model of ACPI version 12 + The Arm kernel implements the reduced hardware model of ACPI version 12 13 5.1 or later. Links to the specification and all external documents 13 14 it refers to are managed by the UEFI Forum. The specification is 14 15 available at http://www.uefi.org/specifications and documents referenced 15 16 by the specification can be found via http://www.uefi.org/acpi. 16 17 17 - If an ARMv8 system does not meet the requirements of the SBSA and SBBR, 18 + If an Arm system does not meet the requirements of the BSA and BBR, 18 19 or cannot be described using the mechanisms defined in the required ACPI 19 20 specifications, then ACPI may not be a good fit for the hardware. 20 21 21 22 While the documents mentioned above set out the requirements for building 22 - industry-standard ARMv8 servers, they also apply to more than one operating 23 + industry-standard Arm systems, they also apply to more than one operating 23 24 system. The purpose of this document is to describe the interaction between 24 - ACPI and Linux only, on an ARMv8 system -- that is, what Linux expects of 25 + ACPI and Linux only, on an Arm system -- that is, what Linux expects of 25 26 ACPI and what ACPI can expect of Linux. 26 27 27 28 28 - Why ACPI on ARM? 29 + Why ACPI on Arm? 29 30 ---------------- 30 31 Before examining the details of the interface between ACPI and Linux, it is 31 32 useful to understand why ACPI is being used. Several technologies already 32 33 exist in Linux for describing non-enumerable hardware, after all. In this 33 - section we summarize a blog post [2] from Grant Likely that outlines the 34 - reasoning behind ACPI on ARMv8 servers. Actually, we snitch a good portion 34 + section we summarize a blog post [3] from Grant Likely that outlines the 35 + reasoning behind ACPI on Arm systems. Actually, we snitch a good portion 35 36 of the summary text almost directly, to be honest. 36 37 37 - The short form of the rationale for ACPI on ARM is: 38 + The short form of the rationale for ACPI on Arm is: 38 39 39 40 - ACPI’s byte code (AML) allows the platform to encode hardware behavior, 40 41 while DT explicitly does not support this. For hardware vendors, being ··· 48 47 49 48 - In the enterprise server environment, ACPI has established bindings (such 50 49 as for RAS) which are currently used in production systems. DT does not. 51 - Such bindings could be defined in DT at some point, but doing so means ARM 50 + Such bindings could be defined in DT at some point, but doing so means Arm 52 51 and x86 would end up using completely different code paths in both firmware 53 52 and the kernel. 54 53 ··· 109 108 110 109 Relationship with Device Tree 111 110 ----------------------------- 112 - ACPI support in drivers and subsystems for ARMv8 should never be mutually 111 + ACPI support in drivers and subsystems for Arm should never be mutually 113 112 exclusive with DT support at compile time. 114 113 115 114 At boot time the kernel will only use one description method depending on ··· 122 121 123 122 Booting using ACPI tables 124 123 ------------------------- 125 - The only defined method for passing ACPI tables to the kernel on ARMv8 124 + The only defined method for passing ACPI tables to the kernel on Arm 126 125 is via the UEFI system configuration table. Just so it is explicit, this 127 126 means that ACPI is only supported on platforms that boot via UEFI. 128 127 129 - When an ARMv8 system boots, it can either have DT information, ACPI tables, 128 + When an Arm system boots, it can either have DT information, ACPI tables, 130 129 or in some very unusual cases, both. If no command line parameters are used, 131 130 the kernel will try to use DT for device enumeration; if there is no DT 132 131 present, the kernel will try to use ACPI tables, but only if they are present. ··· 170 169 171 170 For the ACPI core to operate properly, and in turn provide the information 172 171 the kernel needs to configure devices, it expects to find the following 173 - tables (all section numbers refer to the ACPI 6.1 specification): 172 + tables (all section numbers refer to the ACPI 6.5 specification): 174 173 175 174 - RSDP (Root System Description Pointer), section 5.2.5 176 175 ··· 185 184 186 185 - GTDT (Generic Timer Description Table), section 5.2.24 187 186 187 + - PPTT (Processor Properties Topology Table), section 5.2.30 188 + 189 + - DBG2 (DeBuG port table 2), section 5.2.6, specifically Table 5-6. 190 + 191 + - APMT (Arm Performance Monitoring unit Table), section 5.2.6, specifically Table 5-6. 192 + 193 + - AGDI (Arm Generic diagnostic Dump and Reset Device Interface Table), section 5.2.6, specifically Table 5-6. 194 + 188 195 - If PCI is supported, the MCFG (Memory mapped ConFiGuration 189 - Table), section 5.2.6, specifically Table 5-31. 196 + Table), section 5.2.6, specifically Table 5-6. 190 197 191 198 - If booting without a console=<device> kernel parameter is 192 199 supported, the SPCR (Serial Port Console Redirection table), 193 - section 5.2.6, specifically Table 5-31. 200 + section 5.2.6, specifically Table 5-6. 194 201 195 202 - If necessary to describe the I/O topology, SMMUs and GIC ITSs, 196 203 the IORT (Input Output Remapping Table, section 5.2.6, specifically 197 - Table 5-31). 204 + Table 5-6). 198 205 199 - - If NUMA is supported, the SRAT (System Resource Affinity Table) 200 - and SLIT (System Locality distance Information Table), sections 201 - 5.2.16 and 5.2.17, respectively. 206 + - If NUMA is supported, the following tables are required: 207 + 208 + - SRAT (System Resource Affinity Table), section 5.2.16 209 + 210 + - SLIT (System Locality distance Information Table), section 5.2.17 211 + 212 + - If NUMA is supported, and the system contains heterogeneous memory, 213 + the HMAT (Heterogeneous Memory Attribute Table), section 5.2.28. 214 + 215 + - If the ACPI Platform Error Interfaces are required, the following 216 + tables are conditionally required: 217 + 218 + - BERT (Boot Error Record Table, section 18.3.1) 219 + 220 + - EINJ (Error INJection table, section 18.6.1) 221 + 222 + - ERST (Error Record Serialization Table, section 18.5) 223 + 224 + - HEST (Hardware Error Source Table, section 18.3.2) 225 + 226 + - SDEI (Software Delegated Exception Interface table, section 5.2.6, 227 + specifically Table 5-6) 228 + 229 + - AEST (Arm Error Source Table, section 5.2.6, 230 + specifically Table 5-6) 231 + 232 + - RAS2 (ACPI RAS2 feature table, section 5.2.21) 233 + 234 + - If the system contains controllers using PCC channel, the 235 + PCCT (Platform Communications Channel Table), section 14.1 236 + 237 + - If the system contains a controller to capture board-level system state, 238 + and communicates with the host via PCC, the PDTT (Platform Debug Trigger 239 + Table), section 5.2.29. 240 + 241 + - If NVDIMM is supported, the NFIT (NVDIMM Firmware Interface Table), section 5.2.26 242 + 243 + - If video framebuffer is present, the BGRT (Boot Graphics Resource Table), section 5.2.23 244 + 245 + - If IPMI is implemented, the SPMI (Server Platform Management Interface), 246 + section 5.2.6, specifically Table 5-6. 247 + 248 + - If the system contains a CXL Host Bridge, the CEDT (CXL Early Discovery 249 + Table), section 5.2.6, specifically Table 5-6. 250 + 251 + - If the system supports MPAM, the MPAM (Memory Partitioning And Monitoring table), section 5.2.6, 252 + specifically Table 5-6. 253 + 254 + - If the system lacks persistent storage, the IBFT (ISCSI Boot Firmware 255 + Table), section 5.2.6, specifically Table 5-6. 256 + 202 257 203 258 If the above tables are not all present, the kernel may or may not be 204 259 able to boot properly since it may not be able to configure all of the ··· 326 269 object is described in the ACPI specification section 6.2.5, but this only 327 270 describes how to define the structure of an object returned via _DSD, and 328 271 how specific data structures are defined by specific UUIDs. Linux should 329 - only use the _DSD Device Properties UUID [5]: 272 + only use the _DSD Device Properties UUID [4]: 330 273 331 274 - UUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 332 275 333 - - https://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf 334 - 335 - The UEFI Forum provides a mechanism for registering device properties [4] 336 - so that they may be used across all operating systems supporting ACPI. 337 - Device properties that have not been registered with the UEFI Forum should 338 - not be used. 276 + Common device properties can be registered by creating a pull request to [4] so 277 + that they may be used across all operating systems supporting ACPI. 278 + Device properties that have not been registered with the UEFI Forum can be used 279 + but not as "uefi-" common properties. 339 280 340 281 Before creating new device properties, check to be sure that they have not 341 282 been defined before and either registered in the Linux kernel documentation ··· 361 306 362 307 Once registration and review have been completed, the kernel provides an 363 308 interface for looking up device properties in a manner independent of 364 - whether DT or ACPI is being used. This API should be used [6]; it can 309 + whether DT or ACPI is being used. This API should be used [5]; it can 365 310 eliminate some duplication of code paths in driver probing functions and 366 311 discourage divergence between DT bindings and ACPI device properties. 367 312 ··· 503 448 ---- 504 449 The ACPI specification changes regularly. During the year 2014, for instance, 505 450 version 5.1 was released and version 6.0 substantially completed, with most of 506 - the changes being driven by ARM-specific requirements. Proposed changes are 451 + the changes being driven by Arm-specific requirements. Proposed changes are 507 452 presented and discussed in the ASWG (ACPI Specification Working Group) which 508 453 is a part of the UEFI Forum. The current version of the ACPI specification 509 - is 6.1 release in January 2016. 454 + is 6.5 release in August 2022. 510 455 511 456 Participation in this group is open to all UEFI members. Please see 512 457 http://www.uefi.org/workinggroup for details on group membership. 513 458 514 - It is the intent of the ARMv8 ACPI kernel code to follow the ACPI specification 459 + It is the intent of the Arm ACPI kernel code to follow the ACPI specification 515 460 as closely as possible, and to only implement functionality that complies with 516 461 the released standards from UEFI ASWG. As a practical matter, there will be 517 462 vendors that provide bad ACPI tables or violate the standards in some way. ··· 525 470 526 471 Linux Code 527 472 ---------- 528 - Individual items specific to Linux on ARM, contained in the Linux 473 + Individual items specific to Linux on Arm, contained in the Linux 529 474 source code, are in the list that follows: 530 475 531 476 ACPI_OS_NAME 532 477 This macro defines the string to be returned when 533 - an ACPI method invokes the _OS method. On ARM64 478 + an ACPI method invokes the _OS method. On Arm 534 479 systems, this macro will be "Linux" by default. 535 480 The command line parameter acpi_os=<string> 536 481 can be used to set it to some other value. The ··· 545 490 546 491 References 547 492 ---------- 548 - [0] http://silver.arm.com 549 - document ARM-DEN-0029, or newer: 550 - "Server Base System Architecture", version 2.3, dated 27 Mar 2014 493 + [0] https://developer.arm.com/documentation/den0094/latest 494 + document Arm-DEN-0094: "Arm Base System Architecture", version 1.0C, dated 6 Oct 2022 551 495 552 - [1] http://infocenter.arm.com/help/topic/com.arm.doc.den0044a/Server_Base_Boot_Requirements.pdf 553 - Document ARM-DEN-0044A, or newer: "Server Base Boot Requirements, System 554 - Software on ARM Platforms", dated 16 Aug 2014 496 + [1] https://developer.arm.com/documentation/den0044/latest 497 + Document Arm-DEN-0044: "Arm Base Boot Requirements", version 2.0G, dated 15 Apr 2022 555 498 556 - [2] http://www.secretlab.ca/archives/151, 499 + [2] https://developer.arm.com/documentation/den0029/latest 500 + Document Arm-DEN-0029: "Arm Server Base System Architecture", version 7.1, dated 06 Oct 2022 501 + 502 + [3] http://www.secretlab.ca/archives/151, 557 503 10 Jan 2015, Copyright (c) 2015, 558 504 Linaro Ltd., written by Grant Likely. 559 505 560 - [3] AMD ACPI for Seattle platform documentation 561 - http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2012/10/Seattle_ACPI_Guide.pdf 506 + [4] _DSD (Device Specific Data) Implementation Guide 507 + https://github.com/UEFI/DSD-Guide/blob/main/dsd-guide.pdf 562 508 563 - 564 - [4] http://www.uefi.org/acpi 565 - please see the link for the "ACPI _DSD Device 566 - Property Registry Instructions" 567 - 568 - [5] http://www.uefi.org/acpi 569 - please see the link for the "_DSD (Device 570 - Specific Data) Implementation Guide" 571 - 572 - [6] Kernel code for the unified device 509 + [5] Kernel code for the unified device 573 510 property interface can be found in 574 511 include/linux/property.h and drivers/base/property.c. 575 512
+6
Documentation/arm64/booting.rst
··· 379 379 380 380 - SMCR_EL2.EZT0 (bit 30) must be initialised to 0b1. 381 381 382 + For CPUs with Memory Copy and Memory Set instructions (FEAT_MOPS): 383 + 384 + - If the kernel is entered at EL1 and EL2 is present: 385 + 386 + - HCRX_EL2.MSCEn (bit 11) must be initialised to 0b1. 387 + 382 388 The requirements described above for CPU mode, caches, MMUs, architected 383 389 timers, coherency and system registers apply to all CPUs. All CPUs must 384 390 enter the kernel in the same exception level. Where the values documented
+2
Documentation/arm64/cpu-feature-registers.rst
··· 288 288 +------------------------------+---------+---------+ 289 289 | Name | bits | visible | 290 290 +------------------------------+---------+---------+ 291 + | MOPS | [19-16] | y | 292 + +------------------------------+---------+---------+ 291 293 | RPRES | [7-4] | y | 292 294 +------------------------------+---------+---------+ 293 295 | WFXT | [3-0] | y |
+3
Documentation/arm64/elf_hwcaps.rst
··· 302 302 HWCAP2_SMEF16F16 303 303 Functionality implied by ID_AA64SMFR0_EL1.F16F16 == 0b1 304 304 305 + HWCAP2_MOPS 306 + Functionality implied by ID_AA64ISAR2_EL1.MOPS == 0b0001. 307 + 305 308 4. Unused AT_HWCAP bits 306 309 ----------------------- 307 310
+2
Documentation/arm64/index.rst
··· 15 15 cpu-feature-registers 16 16 elf_hwcaps 17 17 hugetlbpage 18 + kdump 18 19 legacy_instructions 19 20 memory 20 21 memory-tagging-extension 21 22 perf 22 23 pointer-authentication 24 + ptdump 23 25 silicon-errata 24 26 sme 25 27 sve
+92
Documentation/arm64/kdump.rst
··· 1 + ======================================= 2 + crashkernel memory reservation on arm64 3 + ======================================= 4 + 5 + Author: Baoquan He <bhe@redhat.com> 6 + 7 + Kdump mechanism is used to capture a corrupted kernel vmcore so that 8 + it can be subsequently analyzed. In order to do this, a preliminarily 9 + reserved memory is needed to pre-load the kdump kernel and boot such 10 + kernel if corruption happens. 11 + 12 + That reserved memory for kdump is adapted to be able to minimally 13 + accommodate the kdump kernel and the user space programs needed for the 14 + vmcore collection. 15 + 16 + Kernel parameter 17 + ================ 18 + 19 + Through the kernel parameters below, memory can be reserved accordingly 20 + during the early stage of the first kernel booting so that a continuous 21 + large chunk of memomy can be found. The low memory reservation needs to 22 + be considered if the crashkernel is reserved from the high memory area. 23 + 24 + - crashkernel=size@offset 25 + - crashkernel=size 26 + - crashkernel=size,high crashkernel=size,low 27 + 28 + Low memory and high memory 29 + ========================== 30 + 31 + For kdump reservations, low memory is the memory area under a specific 32 + limit, usually decided by the accessible address bits of the DMA-capable 33 + devices needed by the kdump kernel to run. Those devices not related to 34 + vmcore dumping can be ignored. On arm64, the low memory upper bound is 35 + not fixed: it is 1G on the RPi4 platform but 4G on most other systems. 36 + On special kernels built with CONFIG_ZONE_(DMA|DMA32) disabled, the 37 + whole system RAM is low memory. Outside of the low memory described 38 + above, the rest of system RAM is considered high memory. 39 + 40 + Implementation 41 + ============== 42 + 43 + 1) crashkernel=size@offset 44 + -------------------------- 45 + 46 + The crashkernel memory must be reserved at the user-specified region or 47 + fail if already occupied. 48 + 49 + 50 + 2) crashkernel=size 51 + ------------------- 52 + 53 + The crashkernel memory region will be reserved in any available position 54 + according to the search order: 55 + 56 + Firstly, the kernel searches the low memory area for an available region 57 + with the specified size. 58 + 59 + If searching for low memory fails, the kernel falls back to searching 60 + the high memory area for an available region of the specified size. If 61 + the reservation in high memory succeeds, a default size reservation in 62 + the low memory will be done. Currently the default size is 128M, 63 + sufficient for the low memory needs of the kdump kernel. 64 + 65 + Note: crashkernel=size is the recommended option for crashkernel kernel 66 + reservations. The user would not need to know the system memory layout 67 + for a specific platform. 68 + 69 + 3) crashkernel=size,high crashkernel=size,low 70 + --------------------------------------------- 71 + 72 + crashkernel=size,(high|low) are an important supplement to 73 + crashkernel=size. They allows the user to specify how much memory needs 74 + to be allocated from the high memory and low memory respectively. On 75 + many systems the low memory is precious and crashkernel reservations 76 + from this area should be kept to a minimum. 77 + 78 + To reserve memory for crashkernel=size,high, searching is first 79 + attempted from the high memory region. If the reservation succeeds, the 80 + low memory reservation will be done subsequently. 81 + 82 + If reservation from the high memory failed, the kernel falls back to 83 + searching the low memory with the specified size in crashkernel=,high. 84 + If it succeeds, no further reservation for low memory is needed. 85 + 86 + Notes: 87 + 88 + - If crashkernel=,low is not specified, the default low memory 89 + reservation will be done automatically. 90 + 91 + - if crashkernel=0,low is specified, it means that the low memory 92 + reservation is omitted intentionally.
+4 -4
Documentation/arm64/memory.rst
··· 33 33 0000000000000000 0000ffffffffffff 256TB user 34 34 ffff000000000000 ffff7fffffffffff 128TB kernel logical memory map 35 35 [ffff600000000000 ffff7fffffffffff] 32TB [kasan shadow region] 36 - ffff800000000000 ffff800007ffffff 128MB modules 37 - ffff800008000000 fffffbffefffffff 124TB vmalloc 36 + ffff800000000000 ffff80007fffffff 2GB modules 37 + ffff800080000000 fffffbffefffffff 124TB vmalloc 38 38 fffffbfff0000000 fffffbfffdffffff 224MB fixed mappings (top down) 39 39 fffffbfffe000000 fffffbfffe7fffff 8MB [guard region] 40 40 fffffbfffe800000 fffffbffff7fffff 16MB PCI I/O space ··· 50 50 0000000000000000 000fffffffffffff 4PB user 51 51 fff0000000000000 ffff7fffffffffff ~4PB kernel logical memory map 52 52 [fffd800000000000 ffff7fffffffffff] 512TB [kasan shadow region] 53 - ffff800000000000 ffff800007ffffff 128MB modules 54 - ffff800008000000 fffffbffefffffff 124TB vmalloc 53 + ffff800000000000 ffff80007fffffff 2GB modules 54 + ffff800080000000 fffffbffefffffff 124TB vmalloc 55 55 fffffbfff0000000 fffffbfffdffffff 224MB fixed mappings (top down) 56 56 fffffbfffe000000 fffffbfffe7fffff 8MB [guard region] 57 57 fffffbfffe800000 fffffbffff7fffff 16MB PCI I/O space
+96
Documentation/arm64/ptdump.rst
··· 1 + ====================== 2 + Kernel page table dump 3 + ====================== 4 + 5 + ptdump is a debugfs interface that provides a detailed dump of the 6 + kernel page tables. It offers a comprehensive overview of the kernel 7 + virtual memory layout as well as the attributes associated with the 8 + various regions in a human-readable format. It is useful to dump the 9 + kernel page tables to verify permissions and memory types. Examining the 10 + page table entries and permissions helps identify potential security 11 + vulnerabilities such as mappings with overly permissive access rights or 12 + improper memory protections. 13 + 14 + Memory hotplug allows dynamic expansion or contraction of available 15 + memory without requiring a system reboot. To maintain the consistency 16 + and integrity of the memory management data structures, arm64 makes use 17 + of the ``mem_hotplug_lock`` semaphore in write mode. Additionally, in 18 + read mode, ``mem_hotplug_lock`` supports an efficient implementation of 19 + ``get_online_mems()`` and ``put_online_mems()``. These protect the 20 + offlining of memory being accessed by the ptdump code. 21 + 22 + In order to dump the kernel page tables, enable the following 23 + configurations and mount debugfs:: 24 + 25 + CONFIG_GENERIC_PTDUMP=y 26 + CONFIG_PTDUMP_CORE=y 27 + CONFIG_PTDUMP_DEBUGFS=y 28 + 29 + mount -t debugfs nodev /sys/kernel/debug 30 + cat /sys/kernel/debug/kernel_page_tables 31 + 32 + On analysing the output of ``cat /sys/kernel/debug/kernel_page_tables`` 33 + one can derive information about the virtual address range of the entry, 34 + followed by size of the memory region covered by this entry, the 35 + hierarchical structure of the page tables and finally the attributes 36 + associated with each page. The page attributes provide information about 37 + access permissions, execution capability, type of mapping such as leaf 38 + level PTE or block level PGD, PMD and PUD, and access status of a page 39 + within the kernel memory. Assessing these attributes can assist in 40 + understanding the memory layout, access patterns and security 41 + characteristics of the kernel pages. 42 + 43 + Kernel virtual memory layout example:: 44 + 45 + start address end address size attributes 46 + +---------------------------------------------------------------------------------------+ 47 + | ---[ Linear Mapping start ]---------------------------------------------------------- | 48 + | .................. | 49 + | 0xfff0000000000000-0xfff0000000210000 2112K PTE RW NX SHD AF UXN MEM/NORMAL-TAGGED | 50 + | 0xfff0000000210000-0xfff0000001c00000 26560K PTE ro NX SHD AF UXN MEM/NORMAL | 51 + | .................. | 52 + | ---[ Linear Mapping end ]------------------------------------------------------------ | 53 + +---------------------------------------------------------------------------------------+ 54 + | ---[ Modules start ]----------------------------------------------------------------- | 55 + | .................. | 56 + | 0xffff800000000000-0xffff800008000000 128M PTE | 57 + | .................. | 58 + | ---[ Modules end ]------------------------------------------------------------------- | 59 + +---------------------------------------------------------------------------------------+ 60 + | ---[ vmalloc() area ]---------------------------------------------------------------- | 61 + | .................. | 62 + | 0xffff800008010000-0xffff800008200000 1984K PTE ro x SHD AF UXN MEM/NORMAL | 63 + | 0xffff800008200000-0xffff800008e00000 12M PTE ro x SHD AF CON UXN MEM/NORMAL | 64 + | .................. | 65 + | ---[ vmalloc() end ]----------------------------------------------------------------- | 66 + +---------------------------------------------------------------------------------------+ 67 + | ---[ Fixmap start ]------------------------------------------------------------------ | 68 + | .................. | 69 + | 0xfffffbfffdb80000-0xfffffbfffdb90000 64K PTE ro x SHD AF UXN MEM/NORMAL | 70 + | 0xfffffbfffdb90000-0xfffffbfffdba0000 64K PTE ro NX SHD AF UXN MEM/NORMAL | 71 + | .................. | 72 + | ---[ Fixmap end ]-------------------------------------------------------------------- | 73 + +---------------------------------------------------------------------------------------+ 74 + | ---[ PCI I/O start ]----------------------------------------------------------------- | 75 + | .................. | 76 + | 0xfffffbfffe800000-0xfffffbffff800000 16M PTE | 77 + | .................. | 78 + | ---[ PCI I/O end ]------------------------------------------------------------------- | 79 + +---------------------------------------------------------------------------------------+ 80 + | ---[ vmemmap start ]----------------------------------------------------------------- | 81 + | .................. | 82 + | 0xfffffc0002000000-0xfffffc0002200000 2M PTE RW NX SHD AF UXN MEM/NORMAL | 83 + | 0xfffffc0002200000-0xfffffc0020000000 478M PTE | 84 + | .................. | 85 + | ---[ vmemmap end ]------------------------------------------------------------------- | 86 + +---------------------------------------------------------------------------------------+ 87 + 88 + ``cat /sys/kernel/debug/kernel_page_tables`` output:: 89 + 90 + 0xfff0000001c00000-0xfff0000080000000 2020M PTE RW NX SHD AF UXN MEM/NORMAL-TAGGED 91 + 0xfff0000080000000-0xfff0000800000000 30G PMD 92 + 0xfff0000800000000-0xfff0000800700000 7M PTE RW NX SHD AF UXN MEM/NORMAL-TAGGED 93 + 0xfff0000800700000-0xfff0000800710000 64K PTE ro NX SHD AF UXN MEM/NORMAL-TAGGED 94 + 0xfff0000800710000-0xfff0000880000000 2089920K PTE RW NX SHD AF UXN MEM/NORMAL-TAGGED 95 + 0xfff0000880000000-0xfff0040000000000 4062G PMD 96 + 0xfff0040000000000-0xffff800000000000 3964T PGD
+3 -25
arch/arm64/Kconfig
··· 207 207 select HAVE_IOREMAP_PROT 208 208 select HAVE_IRQ_TIME_ACCOUNTING 209 209 select HAVE_KVM 210 + select HAVE_MOD_ARCH_SPECIFIC 210 211 select HAVE_NMI 211 212 select HAVE_PERF_EVENTS 212 213 select HAVE_PERF_REGS ··· 578 577 config ARM64_ERRATUM_843419 579 578 bool "Cortex-A53: 843419: A load or store might access an incorrect address" 580 579 default y 581 - select ARM64_MODULE_PLTS if MODULES 582 580 help 583 581 This option links the kernel with '--fix-cortex-a53-843419' and 584 582 enables PLT support to replace certain ADRP instructions, which can ··· 2107 2107 register state capable of holding two dimensional matrix tiles to 2108 2108 enable various matrix operations. 2109 2109 2110 - config ARM64_MODULE_PLTS 2111 - bool "Use PLTs to allow module memory to spill over into vmalloc area" 2112 - depends on MODULES 2113 - select HAVE_MOD_ARCH_SPECIFIC 2114 - help 2115 - Allocate PLTs when loading modules so that jumps and calls whose 2116 - targets are too far away for their relative offsets to be encoded 2117 - in the instructions themselves can be bounced via veneers in the 2118 - module's PLT. This allows modules to be allocated in the generic 2119 - vmalloc area after the dedicated module memory area has been 2120 - exhausted. 2121 - 2122 - When running with address space randomization (KASLR), the module 2123 - region itself may be too far away for ordinary relative jumps and 2124 - calls, and so in that case, module PLTs are required and cannot be 2125 - disabled. 2126 - 2127 - Specific errata workaround(s) might also force module PLTs to be 2128 - enabled (ARM64_ERRATUM_843419). 2129 - 2130 2110 config ARM64_PSEUDO_NMI 2131 2111 bool "Support for NMI-like interrupts" 2132 2112 select ARM_GIC_V3 ··· 2147 2167 2148 2168 config RANDOMIZE_BASE 2149 2169 bool "Randomize the address of the kernel image" 2150 - select ARM64_MODULE_PLTS if MODULES 2151 2170 select RELOCATABLE 2152 2171 help 2153 2172 Randomizes the virtual address at which the kernel image is ··· 2177 2198 When this option is not set, the module region will be randomized over 2178 2199 a limited range that contains the [_stext, _etext] interval of the 2179 2200 core kernel, so branch relocations are almost always in range unless 2180 - ARM64_MODULE_PLTS is enabled and the region is exhausted. In this 2181 - particular case of region exhaustion, modules might be able to fall 2182 - back to a larger 2GB area. 2201 + the region is exhausted. In this particular case of region 2202 + exhaustion, modules might be able to fall back to a larger 2GB area. 2183 2203 2184 2204 config CC_HAVE_STACKPROTECTOR_SYSREG 2185 2205 def_bool $(cc-option,-mstack-protector-guard=sysreg -mstack-protector-guard-reg=sp_el0 -mstack-protector-guard-offset=0)
+27 -27
arch/arm64/include/asm/alternative-macros.h
··· 23 23 24 24 #include <linux/stringify.h> 25 25 26 - #define ALTINSTR_ENTRY(feature) \ 26 + #define ALTINSTR_ENTRY(cpucap) \ 27 27 " .word 661b - .\n" /* label */ \ 28 28 " .word 663f - .\n" /* new instruction */ \ 29 - " .hword " __stringify(feature) "\n" /* feature bit */ \ 29 + " .hword " __stringify(cpucap) "\n" /* cpucap */ \ 30 30 " .byte 662b-661b\n" /* source len */ \ 31 31 " .byte 664f-663f\n" /* replacement len */ 32 32 33 - #define ALTINSTR_ENTRY_CB(feature, cb) \ 33 + #define ALTINSTR_ENTRY_CB(cpucap, cb) \ 34 34 " .word 661b - .\n" /* label */ \ 35 - " .word " __stringify(cb) "- .\n" /* callback */ \ 36 - " .hword " __stringify(feature) "\n" /* feature bit */ \ 35 + " .word " __stringify(cb) "- .\n" /* callback */ \ 36 + " .hword " __stringify(cpucap) "\n" /* cpucap */ \ 37 37 " .byte 662b-661b\n" /* source len */ \ 38 38 " .byte 664f-663f\n" /* replacement len */ 39 39 ··· 53 53 * 54 54 * Alternatives with callbacks do not generate replacement instructions. 55 55 */ 56 - #define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \ 56 + #define __ALTERNATIVE_CFG(oldinstr, newinstr, cpucap, cfg_enabled) \ 57 57 ".if "__stringify(cfg_enabled)" == 1\n" \ 58 58 "661:\n\t" \ 59 59 oldinstr "\n" \ 60 60 "662:\n" \ 61 61 ".pushsection .altinstructions,\"a\"\n" \ 62 - ALTINSTR_ENTRY(feature) \ 62 + ALTINSTR_ENTRY(cpucap) \ 63 63 ".popsection\n" \ 64 64 ".subsection 1\n" \ 65 65 "663:\n\t" \ ··· 70 70 ".previous\n" \ 71 71 ".endif\n" 72 72 73 - #define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \ 73 + #define __ALTERNATIVE_CFG_CB(oldinstr, cpucap, cfg_enabled, cb) \ 74 74 ".if "__stringify(cfg_enabled)" == 1\n" \ 75 75 "661:\n\t" \ 76 76 oldinstr "\n" \ 77 77 "662:\n" \ 78 78 ".pushsection .altinstructions,\"a\"\n" \ 79 - ALTINSTR_ENTRY_CB(feature, cb) \ 79 + ALTINSTR_ENTRY_CB(cpucap, cb) \ 80 80 ".popsection\n" \ 81 81 "663:\n\t" \ 82 82 "664:\n\t" \ 83 83 ".endif\n" 84 84 85 - #define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \ 86 - __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg)) 85 + #define _ALTERNATIVE_CFG(oldinstr, newinstr, cpucap, cfg, ...) \ 86 + __ALTERNATIVE_CFG(oldinstr, newinstr, cpucap, IS_ENABLED(cfg)) 87 87 88 - #define ALTERNATIVE_CB(oldinstr, feature, cb) \ 89 - __ALTERNATIVE_CFG_CB(oldinstr, (1 << ARM64_CB_SHIFT) | (feature), 1, cb) 88 + #define ALTERNATIVE_CB(oldinstr, cpucap, cb) \ 89 + __ALTERNATIVE_CFG_CB(oldinstr, (1 << ARM64_CB_SHIFT) | (cpucap), 1, cb) 90 90 #else 91 91 92 92 #include <asm/assembler.h> 93 93 94 - .macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len 94 + .macro altinstruction_entry orig_offset alt_offset cpucap orig_len alt_len 95 95 .word \orig_offset - . 96 96 .word \alt_offset - . 97 - .hword (\feature) 97 + .hword (\cpucap) 98 98 .byte \orig_len 99 99 .byte \alt_len 100 100 .endm ··· 210 210 #endif /* __ASSEMBLY__ */ 211 211 212 212 /* 213 - * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature)); 213 + * Usage: asm(ALTERNATIVE(oldinstr, newinstr, cpucap)); 214 214 * 215 - * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature, CONFIG_FOO)); 215 + * Usage: asm(ALTERNATIVE(oldinstr, newinstr, cpucap, CONFIG_FOO)); 216 216 * N.B. If CONFIG_FOO is specified, but not selected, the whole block 217 217 * will be omitted, including oldinstr. 218 218 */ ··· 224 224 #include <linux/types.h> 225 225 226 226 static __always_inline bool 227 - alternative_has_feature_likely(const unsigned long feature) 227 + alternative_has_cap_likely(const unsigned long cpucap) 228 228 { 229 - compiletime_assert(feature < ARM64_NCAPS, 230 - "feature must be < ARM64_NCAPS"); 229 + compiletime_assert(cpucap < ARM64_NCAPS, 230 + "cpucap must be < ARM64_NCAPS"); 231 231 232 232 asm_volatile_goto( 233 - ALTERNATIVE_CB("b %l[l_no]", %[feature], alt_cb_patch_nops) 233 + ALTERNATIVE_CB("b %l[l_no]", %[cpucap], alt_cb_patch_nops) 234 234 : 235 - : [feature] "i" (feature) 235 + : [cpucap] "i" (cpucap) 236 236 : 237 237 : l_no); 238 238 ··· 242 242 } 243 243 244 244 static __always_inline bool 245 - alternative_has_feature_unlikely(const unsigned long feature) 245 + alternative_has_cap_unlikely(const unsigned long cpucap) 246 246 { 247 - compiletime_assert(feature < ARM64_NCAPS, 248 - "feature must be < ARM64_NCAPS"); 247 + compiletime_assert(cpucap < ARM64_NCAPS, 248 + "cpucap must be < ARM64_NCAPS"); 249 249 250 250 asm_volatile_goto( 251 - ALTERNATIVE("nop", "b %l[l_yes]", %[feature]) 251 + ALTERNATIVE("nop", "b %l[l_yes]", %[cpucap]) 252 252 : 253 - : [feature] "i" (feature) 253 + : [cpucap] "i" (cpucap) 254 254 : 255 255 : l_yes); 256 256
+5 -2
arch/arm64/include/asm/alternative.h
··· 13 13 struct alt_instr { 14 14 s32 orig_offset; /* offset to original instruction */ 15 15 s32 alt_offset; /* offset to replacement instruction */ 16 - u16 cpufeature; /* cpufeature bit set for replacement */ 16 + u16 cpucap; /* cpucap bit set for replacement */ 17 17 u8 orig_len; /* size of original instruction(s) */ 18 18 u8 alt_len; /* size of new instruction(s), <= orig_len */ 19 19 }; ··· 23 23 24 24 void __init apply_boot_alternatives(void); 25 25 void __init apply_alternatives_all(void); 26 - bool alternative_is_applied(u16 cpufeature); 26 + bool alternative_is_applied(u16 cpucap); 27 27 28 28 #ifdef CONFIG_MODULES 29 29 void apply_alternatives_module(void *start, size_t length); 30 30 #else 31 31 static inline void apply_alternatives_module(void *start, size_t length) { } 32 32 #endif 33 + 34 + void alt_cb_patch_nops(struct alt_instr *alt, __le32 *origptr, 35 + __le32 *updptr, int nr_inst); 33 36 34 37 #endif /* __ASSEMBLY__ */ 35 38 #endif /* __ASM_ALTERNATIVE_H */
+2
arch/arm64/include/asm/archrandom.h
··· 129 129 return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf; 130 130 } 131 131 132 + u64 kaslr_early_init(void *fdt); 133 + 132 134 #endif /* _ASM_ARCHRANDOM_H */
-2
arch/arm64/include/asm/asm-uaccess.h
··· 18 18 bic \tmp1, \tmp1, #TTBR_ASID_MASK 19 19 sub \tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET // reserved_pg_dir 20 20 msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1 21 - isb 22 21 add \tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET 23 22 msr ttbr1_el1, \tmp1 // set reserved ASID 24 23 isb ··· 30 31 extr \tmp2, \tmp2, \tmp1, #48 31 32 ror \tmp2, \tmp2, #16 32 33 msr ttbr1_el1, \tmp2 // set the active ASID 33 - isb 34 34 msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1 35 35 isb 36 36 .endm
+2
arch/arm64/include/asm/compat.h
··· 96 96 return test_ti_thread_flag(thread, TIF_32BIT); 97 97 } 98 98 99 + long compat_arm_syscall(struct pt_regs *regs, int scno); 100 + 99 101 #else /* !CONFIG_COMPAT */ 100 102 101 103 static inline int is_compat_thread(struct thread_info *thread)
+7 -17
arch/arm64/include/asm/cpufeature.h
··· 107 107 * CPU capabilities: 108 108 * 109 109 * We use arm64_cpu_capabilities to represent system features, errata work 110 - * arounds (both used internally by kernel and tracked in cpu_hwcaps) and 110 + * arounds (both used internally by kernel and tracked in system_cpucaps) and 111 111 * ELF HWCAPs (which are exposed to user). 112 112 * 113 113 * To support systems with heterogeneous CPUs, we need to make sure that we ··· 419 419 return is_vhe_hyp_code() || is_nvhe_hyp_code(); 420 420 } 421 421 422 - extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); 422 + extern DECLARE_BITMAP(system_cpucaps, ARM64_NCAPS); 423 423 424 - extern DECLARE_BITMAP(boot_capabilities, ARM64_NCAPS); 424 + extern DECLARE_BITMAP(boot_cpucaps, ARM64_NCAPS); 425 425 426 426 #define for_each_available_cap(cap) \ 427 - for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS) 427 + for_each_set_bit(cap, system_cpucaps, ARM64_NCAPS) 428 428 429 429 bool this_cpu_has_cap(unsigned int cap); 430 430 void cpu_set_feature(unsigned int num); ··· 437 437 438 438 static __always_inline bool system_capabilities_finalized(void) 439 439 { 440 - return alternative_has_feature_likely(ARM64_ALWAYS_SYSTEM); 440 + return alternative_has_cap_likely(ARM64_ALWAYS_SYSTEM); 441 441 } 442 442 443 443 /* ··· 449 449 { 450 450 if (num >= ARM64_NCAPS) 451 451 return false; 452 - return arch_test_bit(num, cpu_hwcaps); 452 + return arch_test_bit(num, system_cpucaps); 453 453 } 454 454 455 455 /* ··· 464 464 { 465 465 if (num >= ARM64_NCAPS) 466 466 return false; 467 - return alternative_has_feature_unlikely(num); 467 + return alternative_has_cap_unlikely(num); 468 468 } 469 469 470 470 /* ··· 502 502 return __cpus_have_const_cap(num); 503 503 else 504 504 return cpus_have_cap(num); 505 - } 506 - 507 - static inline void cpus_set_cap(unsigned int num) 508 - { 509 - if (num >= ARM64_NCAPS) { 510 - pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n", 511 - num, ARM64_NCAPS); 512 - } else { 513 - __set_bit(num, cpu_hwcaps); 514 - } 515 505 } 516 506 517 507 static inline int __attribute_const__
+2
arch/arm64/include/asm/efi.h
··· 166 166 dcache_clean_inval_poc((unsigned long)addr, (unsigned long)addr + size); 167 167 } 168 168 169 + efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f); 170 + 169 171 #endif /* _ASM_EFI_H */
+11 -9
arch/arm64/include/asm/el2_setup.h
··· 22 22 isb 23 23 .endm 24 24 25 + .macro __init_el2_hcrx 26 + mrs x0, id_aa64mmfr1_el1 27 + ubfx x0, x0, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4 28 + cbz x0, .Lskip_hcrx_\@ 29 + mov_q x0, HCRX_HOST_FLAGS 30 + msr_s SYS_HCRX_EL2, x0 31 + .Lskip_hcrx_\@: 32 + .endm 33 + 25 34 /* 26 35 * Allow Non-secure EL1 and EL0 to access physical timer and counter. 27 36 * This is not necessary for VHE, since the host kernel runs in EL2, ··· 78 69 cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present 79 70 80 71 mrs_s x0, SYS_TRBIDR_EL1 81 - and x0, x0, TRBIDR_PROG 72 + and x0, x0, TRBIDR_EL1_P 82 73 cbnz x0, .Lskip_trace_\@ // If TRBE is available at EL2 83 74 84 75 mov x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT) ··· 193 184 */ 194 185 .macro init_el2_state 195 186 __init_el2_sctlr 187 + __init_el2_hcrx 196 188 __init_el2_timers 197 189 __init_el2_debug 198 190 __init_el2_lor ··· 294 284 cbz x1, .Lskip_sme_\@ 295 285 296 286 msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal 297 - 298 - mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present? 299 - ubfx x1, x1, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4 300 - cbz x1, .Lskip_sme_\@ 301 - 302 - mrs_s x1, SYS_HCRX_EL2 303 - orr x1, x1, #HCRX_EL2_SMPME_MASK // Enable priority mapping 304 - msr_s SYS_HCRX_EL2, x1 305 287 .Lskip_sme_\@: 306 288 .endm 307 289
+28 -2
arch/arm64/include/asm/esr.h
··· 47 47 #define ESR_ELx_EC_DABT_LOW (0x24) 48 48 #define ESR_ELx_EC_DABT_CUR (0x25) 49 49 #define ESR_ELx_EC_SP_ALIGN (0x26) 50 - /* Unallocated EC: 0x27 */ 50 + #define ESR_ELx_EC_MOPS (0x27) 51 51 #define ESR_ELx_EC_FP_EXC32 (0x28) 52 52 /* Unallocated EC: 0x29 - 0x2B */ 53 53 #define ESR_ELx_EC_FP_EXC64 (0x2C) ··· 75 75 76 76 #define ESR_ELx_IL_SHIFT (25) 77 77 #define ESR_ELx_IL (UL(1) << ESR_ELx_IL_SHIFT) 78 - #define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1) 78 + #define ESR_ELx_ISS_MASK (GENMASK(24, 0)) 79 79 #define ESR_ELx_ISS(esr) ((esr) & ESR_ELx_ISS_MASK) 80 + #define ESR_ELx_ISS2_SHIFT (32) 81 + #define ESR_ELx_ISS2_MASK (GENMASK_ULL(55, 32)) 82 + #define ESR_ELx_ISS2(esr) (((esr) & ESR_ELx_ISS2_MASK) >> ESR_ELx_ISS2_SHIFT) 80 83 81 84 /* ISS field definitions shared by different classes */ 82 85 #define ESR_ELx_WNR_SHIFT (6) ··· 142 139 #define ESR_ELx_AR (UL(1) << ESR_ELx_AR_SHIFT) 143 140 #define ESR_ELx_CM_SHIFT (8) 144 141 #define ESR_ELx_CM (UL(1) << ESR_ELx_CM_SHIFT) 142 + 143 + /* ISS2 field definitions for Data Aborts */ 144 + #define ESR_ELx_TnD_SHIFT (10) 145 + #define ESR_ELx_TnD (UL(1) << ESR_ELx_TnD_SHIFT) 146 + #define ESR_ELx_TagAccess_SHIFT (9) 147 + #define ESR_ELx_TagAccess (UL(1) << ESR_ELx_TagAccess_SHIFT) 148 + #define ESR_ELx_GCS_SHIFT (8) 149 + #define ESR_ELx_GCS (UL(1) << ESR_ELx_GCS_SHIFT) 150 + #define ESR_ELx_Overlay_SHIFT (6) 151 + #define ESR_ELx_Overlay (UL(1) << ESR_ELx_Overlay_SHIFT) 152 + #define ESR_ELx_DirtyBit_SHIFT (5) 153 + #define ESR_ELx_DirtyBit (UL(1) << ESR_ELx_DirtyBit_SHIFT) 154 + #define ESR_ELx_Xs_SHIFT (0) 155 + #define ESR_ELx_Xs_MASK (GENMASK_ULL(4, 0)) 145 156 146 157 /* ISS field definitions for exceptions taken in to Hyp */ 147 158 #define ESR_ELx_CV (UL(1) << 24) ··· 372 355 #define ESR_ELx_SME_ISS_SM_DISABLED 2 373 356 #define ESR_ELx_SME_ISS_ZA_DISABLED 3 374 357 #define ESR_ELx_SME_ISS_ZT_DISABLED 4 358 + 359 + /* ISS field definitions for MOPS exceptions */ 360 + #define ESR_ELx_MOPS_ISS_MEM_INST (UL(1) << 24) 361 + #define ESR_ELx_MOPS_ISS_FROM_EPILOGUE (UL(1) << 18) 362 + #define ESR_ELx_MOPS_ISS_WRONG_OPTION (UL(1) << 17) 363 + #define ESR_ELx_MOPS_ISS_OPTION_A (UL(1) << 16) 364 + #define ESR_ELx_MOPS_ISS_DESTREG(esr) (((esr) & (UL(0x1f) << 10)) >> 10) 365 + #define ESR_ELx_MOPS_ISS_SRCREG(esr) (((esr) & (UL(0x1f) << 5)) >> 5) 366 + #define ESR_ELx_MOPS_ISS_SIZEREG(esr) (((esr) & (UL(0x1f) << 0)) >> 0) 375 367 376 368 #ifndef __ASSEMBLY__ 377 369 #include <asm/types.h>
+1 -5
arch/arm64/include/asm/exception.h
··· 8 8 #define __ASM_EXCEPTION_H 9 9 10 10 #include <asm/esr.h> 11 - #include <asm/kprobes.h> 12 11 #include <asm/ptrace.h> 13 12 14 13 #include <linux/interrupt.h> 15 14 16 - #ifdef CONFIG_FUNCTION_GRAPH_TRACER 17 15 #define __exception_irq_entry __irq_entry 18 - #else 19 - #define __exception_irq_entry __kprobes 20 - #endif 21 16 22 17 static inline unsigned long disr_to_esr(u64 disr) 23 18 { ··· 72 77 void do_el0_svc_compat(struct pt_regs *regs); 73 78 void do_el0_fpac(struct pt_regs *regs, unsigned long esr); 74 79 void do_el1_fpac(struct pt_regs *regs, unsigned long esr); 80 + void do_el0_mops(struct pt_regs *regs, unsigned long esr); 75 81 void do_serror(struct pt_regs *regs, unsigned long esr); 76 82 void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags); 77 83
+8
arch/arm64/include/asm/hw_breakpoint.h
··· 154 154 ID_AA64DFR0_EL1_WRPs_SHIFT); 155 155 } 156 156 157 + #ifdef CONFIG_CPU_PM 158 + extern void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int)); 159 + #else 160 + static inline void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int)) 161 + { 162 + } 163 + #endif 164 + 157 165 #endif /* __ASM_BREAKPOINT_H */
+1
arch/arm64/include/asm/hwcap.h
··· 137 137 #define KERNEL_HWCAP_SME_BI32I32 __khwcap2_feature(SME_BI32I32) 138 138 #define KERNEL_HWCAP_SME_B16B16 __khwcap2_feature(SME_B16B16) 139 139 #define KERNEL_HWCAP_SME_F16F16 __khwcap2_feature(SME_F16F16) 140 + #define KERNEL_HWCAP_MOPS __khwcap2_feature(MOPS) 140 141 141 142 /* 142 143 * This yields a mask that user programs can use to figure out what
+1 -1
arch/arm64/include/asm/irqflags.h
··· 24 24 static __always_inline bool __irqflags_uses_pmr(void) 25 25 { 26 26 return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && 27 - alternative_has_feature_unlikely(ARM64_HAS_GIC_PRIO_MASKING); 27 + alternative_has_cap_unlikely(ARM64_HAS_GIC_PRIO_MASKING); 28 28 } 29 29 30 30 static __always_inline void __daif_local_irq_enable(void)
+4
arch/arm64/include/asm/kvm_arm.h
··· 9 9 10 10 #include <asm/esr.h> 11 11 #include <asm/memory.h> 12 + #include <asm/sysreg.h> 12 13 #include <asm/types.h> 13 14 14 15 /* Hyp Configuration Register (HCR) bits */ ··· 92 91 #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA) 93 92 #define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC) 94 93 #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) 94 + 95 + #define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME) 96 + #define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn) 95 97 96 98 /* TCR_EL2 Registers bits */ 97 99 #define TCR_EL2_RES1 ((1U << 31) | (1 << 23))
+18
arch/arm64/include/asm/kvm_asm.h
··· 267 267 __kvm_at_err; \ 268 268 } ) 269 269 270 + void __noreturn hyp_panic(void); 271 + asmlinkage void kvm_unexpected_el2_exception(void); 272 + asmlinkage void __noreturn hyp_panic(void); 273 + asmlinkage void __noreturn hyp_panic_bad_stack(void); 274 + asmlinkage void kvm_unexpected_el2_exception(void); 275 + struct kvm_cpu_context; 276 + void handle_trap(struct kvm_cpu_context *host_ctxt); 277 + asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on); 278 + void __noreturn __pkvm_init_finalise(void); 279 + void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc); 280 + void kvm_patch_vector_branch(struct alt_instr *alt, 281 + __le32 *origptr, __le32 *updptr, int nr_inst); 282 + void kvm_get_kimage_voffset(struct alt_instr *alt, 283 + __le32 *origptr, __le32 *updptr, int nr_inst); 284 + void kvm_compute_final_ctr_el0(struct alt_instr *alt, 285 + __le32 *origptr, __le32 *updptr, int nr_inst); 286 + void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_virt, 287 + u64 elr_phys, u64 par, uintptr_t vcpu, u64 far, u64 hpfar); 270 288 271 289 #else /* __ASSEMBLY__ */ 272 290
+1 -1
arch/arm64/include/asm/kvm_host.h
··· 1031 1031 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); 1032 1032 1033 1033 #define kvm_vcpu_os_lock_enabled(vcpu) \ 1034 - (!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & SYS_OSLSR_OSLK)) 1034 + (!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & OSLSR_EL1_OSLK)) 1035 1035 1036 1036 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 1037 1037 struct kvm_device_attr *attr);
+1 -1
arch/arm64/include/asm/lse.h
··· 18 18 19 19 static __always_inline bool system_uses_lse_atomics(void) 20 20 { 21 - return alternative_has_feature_likely(ARM64_HAS_LSE_ATOMICS); 21 + return alternative_has_cap_likely(ARM64_HAS_LSE_ATOMICS); 22 22 } 23 23 24 24 #define __lse_ll_sc_body(op, ...) \
+9 -7
arch/arm64/include/asm/memory.h
··· 46 46 #define KIMAGE_VADDR (MODULES_END) 47 47 #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) 48 48 #define MODULES_VADDR (_PAGE_END(VA_BITS_MIN)) 49 - #define MODULES_VSIZE (SZ_128M) 49 + #define MODULES_VSIZE (SZ_2G) 50 50 #define VMEMMAP_START (-(UL(1) << (VA_BITS - VMEMMAP_SHIFT))) 51 51 #define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE) 52 52 #define PCI_IO_END (VMEMMAP_START - SZ_8M) ··· 204 204 return kimage_vaddr - KIMAGE_VADDR; 205 205 } 206 206 207 + #ifdef CONFIG_RANDOMIZE_BASE 208 + void kaslr_init(void); 207 209 static inline bool kaslr_enabled(void) 208 210 { 209 - /* 210 - * The KASLR offset modulo MIN_KIMG_ALIGN is taken from the physical 211 - * placement of the image rather than from the seed, so a displacement 212 - * of less than MIN_KIMG_ALIGN means that no seed was provided. 213 - */ 214 - return kaslr_offset() >= MIN_KIMG_ALIGN; 211 + extern bool __kaslr_is_enabled; 212 + return __kaslr_is_enabled; 215 213 } 214 + #else 215 + static inline void kaslr_init(void) { } 216 + static inline bool kaslr_enabled(void) { return false; } 217 + #endif 216 218 217 219 /* 218 220 * Allow all memory at the discovery stage. We will clip it later.
+7 -3
arch/arm64/include/asm/mmu_context.h
··· 39 39 /* 40 40 * Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0. 41 41 */ 42 - static inline void cpu_set_reserved_ttbr0(void) 42 + static inline void cpu_set_reserved_ttbr0_nosync(void) 43 43 { 44 44 unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir)); 45 45 46 46 write_sysreg(ttbr, ttbr0_el1); 47 + } 48 + 49 + static inline void cpu_set_reserved_ttbr0(void) 50 + { 51 + cpu_set_reserved_ttbr0_nosync(); 47 52 isb(); 48 53 } 49 54 ··· 57 52 static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm) 58 53 { 59 54 BUG_ON(pgd == swapper_pg_dir); 60 - cpu_set_reserved_ttbr0(); 61 55 cpu_do_switch_mm(virt_to_phys(pgd),mm); 62 56 } 63 57 ··· 168 164 * up (i.e. cpufeature framework is not up yet) and 169 165 * latter only when we enable CNP via cpufeature's 170 166 * enable() callback. 171 - * Also we rely on the cpu_hwcap bit being set before 167 + * Also we rely on the system_cpucaps bit being set before 172 168 * calling the enable() function. 173 169 */ 174 170 ttbr1 |= TTBR_CNP_BIT;
-8
arch/arm64/include/asm/module.h
··· 7 7 8 8 #include <asm-generic/module.h> 9 9 10 - #ifdef CONFIG_ARM64_MODULE_PLTS 11 10 struct mod_plt_sec { 12 11 int plt_shndx; 13 12 int plt_num_entries; ··· 20 21 /* for CONFIG_DYNAMIC_FTRACE */ 21 22 struct plt_entry *ftrace_trampolines; 22 23 }; 23 - #endif 24 24 25 25 u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs, 26 26 void *loc, const Elf64_Rela *rela, ··· 27 29 28 30 u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs, 29 31 void *loc, u64 val); 30 - 31 - #ifdef CONFIG_RANDOMIZE_BASE 32 - extern u64 module_alloc_base; 33 - #else 34 - #define module_alloc_base ((u64)_etext - MODULES_VSIZE) 35 - #endif 36 32 37 33 struct plt_entry { 38 34 /*
-2
arch/arm64/include/asm/module.lds.h
··· 1 1 SECTIONS { 2 - #ifdef CONFIG_ARM64_MODULE_PLTS 3 2 .plt 0 : { BYTE(0) } 4 3 .init.plt 0 : { BYTE(0) } 5 4 .text.ftrace_trampoline 0 : { BYTE(0) } 6 - #endif 7 5 8 6 #ifdef CONFIG_KASAN_SW_TAGS 9 7 /*
+1
arch/arm64/include/asm/scs.h
··· 73 73 #endif 74 74 75 75 int scs_patch(const u8 eh_frame[], int size); 76 + asmlinkage void scs_patch_vmlinux(void); 76 77 77 78 #endif /* __ASSEMBLY __ */ 78 79
+16
arch/arm64/include/asm/spectre.h
··· 100 100 u8 spectre_bhb_loop_affected(int scope); 101 101 void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused); 102 102 bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr); 103 + 104 + void spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt, __le32 *origptr, 105 + __le32 *updptr, int nr_inst); 106 + void smccc_patch_fw_mitigation_conduit(struct alt_instr *alt, __le32 *origptr, 107 + __le32 *updptr, int nr_inst); 108 + void spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt, __le32 *origptr, 109 + __le32 *updptr, int nr_inst); 110 + void spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt, __le32 *origptr, 111 + __le32 *updptr, int nr_inst); 112 + void spectre_bhb_patch_loop_iter(struct alt_instr *alt, 113 + __le32 *origptr, __le32 *updptr, int nr_inst); 114 + void spectre_bhb_patch_wa3(struct alt_instr *alt, 115 + __le32 *origptr, __le32 *updptr, int nr_inst); 116 + void spectre_bhb_patch_clearbhb(struct alt_instr *alt, 117 + __le32 *origptr, __le32 *updptr, int nr_inst); 118 + 103 119 #endif /* __ASSEMBLY__ */ 104 120 #endif /* __ASM_SPECTRE_H */
+4
arch/arm64/include/asm/syscall_wrapper.h
··· 38 38 asmlinkage long __arm64_compat_sys_##sname(const struct pt_regs *__unused) 39 39 40 40 #define COND_SYSCALL_COMPAT(name) \ 41 + asmlinkage long __arm64_compat_sys_##name(const struct pt_regs *regs); \ 41 42 asmlinkage long __weak __arm64_compat_sys_##name(const struct pt_regs *regs) \ 42 43 { \ 43 44 return sys_ni_syscall(); \ ··· 54 53 ALLOW_ERROR_INJECTION(__arm64_sys##name, ERRNO); \ 55 54 static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ 56 55 static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ 56 + asmlinkage long __arm64_sys##name(const struct pt_regs *regs); \ 57 57 asmlinkage long __arm64_sys##name(const struct pt_regs *regs) \ 58 58 { \ 59 59 return __se_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__)); \ ··· 75 73 asmlinkage long __arm64_sys_##sname(const struct pt_regs *__unused) 76 74 77 75 #define COND_SYSCALL(name) \ 76 + asmlinkage long __arm64_sys_##name(const struct pt_regs *regs); \ 78 77 asmlinkage long __weak __arm64_sys_##name(const struct pt_regs *regs) \ 79 78 { \ 80 79 return sys_ni_syscall(); \ 81 80 } 82 81 82 + asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused); 83 83 #define SYS_NI(name) SYSCALL_ALIAS(__arm64_sys_##name, sys_ni_posix_timers); 84 84 85 85 #endif /* __ASM_SYSCALL_WRAPPER_H */
+6 -60
arch/arm64/include/asm/sysreg.h
··· 134 134 #define SYS_SVCR_SMSTART_SM_EL0 sys_reg(0, 3, 4, 3, 3) 135 135 #define SYS_SVCR_SMSTOP_SMZA_EL0 sys_reg(0, 3, 4, 6, 3) 136 136 137 - #define SYS_OSDTRRX_EL1 sys_reg(2, 0, 0, 0, 2) 138 - #define SYS_MDCCINT_EL1 sys_reg(2, 0, 0, 2, 0) 139 - #define SYS_MDSCR_EL1 sys_reg(2, 0, 0, 2, 2) 140 - #define SYS_OSDTRTX_EL1 sys_reg(2, 0, 0, 3, 2) 141 - #define SYS_OSECCR_EL1 sys_reg(2, 0, 0, 6, 2) 142 137 #define SYS_DBGBVRn_EL1(n) sys_reg(2, 0, 0, n, 4) 143 138 #define SYS_DBGBCRn_EL1(n) sys_reg(2, 0, 0, n, 5) 144 139 #define SYS_DBGWVRn_EL1(n) sys_reg(2, 0, 0, n, 6) 145 140 #define SYS_DBGWCRn_EL1(n) sys_reg(2, 0, 0, n, 7) 146 141 #define SYS_MDRAR_EL1 sys_reg(2, 0, 1, 0, 0) 147 142 148 - #define SYS_OSLAR_EL1 sys_reg(2, 0, 1, 0, 4) 149 - #define SYS_OSLAR_OSLK BIT(0) 150 - 151 143 #define SYS_OSLSR_EL1 sys_reg(2, 0, 1, 1, 4) 152 - #define SYS_OSLSR_OSLM_MASK (BIT(3) | BIT(0)) 153 - #define SYS_OSLSR_OSLM_NI 0 154 - #define SYS_OSLSR_OSLM_IMPLEMENTED BIT(3) 155 - #define SYS_OSLSR_OSLK BIT(1) 144 + #define OSLSR_EL1_OSLM_MASK (BIT(3) | BIT(0)) 145 + #define OSLSR_EL1_OSLM_NI 0 146 + #define OSLSR_EL1_OSLM_IMPLEMENTED BIT(3) 147 + #define OSLSR_EL1_OSLK BIT(1) 156 148 157 149 #define SYS_OSDLR_EL1 sys_reg(2, 0, 1, 3, 4) 158 150 #define SYS_DBGPRCR_EL1 sys_reg(2, 0, 1, 4, 4) ··· 227 235 228 236 /*** End of Statistical Profiling Extension ***/ 229 237 230 - /* 231 - * TRBE Registers 232 - */ 233 - #define SYS_TRBLIMITR_EL1 sys_reg(3, 0, 9, 11, 0) 234 - #define SYS_TRBPTR_EL1 sys_reg(3, 0, 9, 11, 1) 235 - #define SYS_TRBBASER_EL1 sys_reg(3, 0, 9, 11, 2) 236 - #define SYS_TRBSR_EL1 sys_reg(3, 0, 9, 11, 3) 237 - #define SYS_TRBMAR_EL1 sys_reg(3, 0, 9, 11, 4) 238 - #define SYS_TRBTRG_EL1 sys_reg(3, 0, 9, 11, 6) 239 - #define SYS_TRBIDR_EL1 sys_reg(3, 0, 9, 11, 7) 240 - 241 - #define TRBLIMITR_LIMIT_MASK GENMASK_ULL(51, 0) 242 - #define TRBLIMITR_LIMIT_SHIFT 12 243 - #define TRBLIMITR_NVM BIT(5) 244 - #define TRBLIMITR_TRIG_MODE_MASK GENMASK(1, 0) 245 - #define TRBLIMITR_TRIG_MODE_SHIFT 3 246 - #define TRBLIMITR_FILL_MODE_MASK GENMASK(1, 0) 247 - #define TRBLIMITR_FILL_MODE_SHIFT 1 248 - #define TRBLIMITR_ENABLE BIT(0) 249 - #define TRBPTR_PTR_MASK GENMASK_ULL(63, 0) 250 - #define TRBPTR_PTR_SHIFT 0 251 - #define TRBBASER_BASE_MASK GENMASK_ULL(51, 0) 252 - #define TRBBASER_BASE_SHIFT 12 253 - #define TRBSR_EC_MASK GENMASK(5, 0) 254 - #define TRBSR_EC_SHIFT 26 255 - #define TRBSR_IRQ BIT(22) 256 - #define TRBSR_TRG BIT(21) 257 - #define TRBSR_WRAP BIT(20) 258 - #define TRBSR_ABORT BIT(18) 259 - #define TRBSR_STOP BIT(17) 260 - #define TRBSR_MSS_MASK GENMASK(15, 0) 261 - #define TRBSR_MSS_SHIFT 0 262 - #define TRBSR_BSC_MASK GENMASK(5, 0) 263 - #define TRBSR_BSC_SHIFT 0 264 - #define TRBSR_FSC_MASK GENMASK(5, 0) 265 - #define TRBSR_FSC_SHIFT 0 266 - #define TRBMAR_SHARE_MASK GENMASK(1, 0) 267 - #define TRBMAR_SHARE_SHIFT 8 268 - #define TRBMAR_OUTER_MASK GENMASK(3, 0) 269 - #define TRBMAR_OUTER_SHIFT 4 270 - #define TRBMAR_INNER_MASK GENMASK(3, 0) 271 - #define TRBMAR_INNER_SHIFT 0 272 - #define TRBTRG_TRG_MASK GENMASK(31, 0) 273 - #define TRBTRG_TRG_SHIFT 0 274 - #define TRBIDR_FLAG BIT(5) 275 - #define TRBIDR_PROG BIT(4) 276 - #define TRBIDR_ALIGN_MASK GENMASK(3, 0) 277 - #define TRBIDR_ALIGN_SHIFT 0 238 + #define TRBSR_EL1_BSC_MASK GENMASK(5, 0) 239 + #define TRBSR_EL1_BSC_SHIFT 0 278 240 279 241 #define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1) 280 242 #define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2)
+2
arch/arm64/include/asm/traps.h
··· 29 29 void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, const char *str); 30 30 void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far, const char *str); 31 31 32 + int early_brk64(unsigned long addr, unsigned long esr, struct pt_regs *regs); 33 + 32 34 /* 33 35 * Move regs->pc to next instruction and do necessary setup before it 34 36 * is executed.
-2
arch/arm64/include/asm/uaccess.h
··· 65 65 ttbr &= ~TTBR_ASID_MASK; 66 66 /* reserved_pg_dir placed before swapper_pg_dir */ 67 67 write_sysreg(ttbr - RESERVED_SWAPPER_OFFSET, ttbr0_el1); 68 - isb(); 69 68 /* Set reserved ASID */ 70 69 write_sysreg(ttbr, ttbr1_el1); 71 70 isb(); ··· 88 89 ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */ 89 90 ttbr1 |= ttbr0 & TTBR_ASID_MASK; 90 91 write_sysreg(ttbr1, ttbr1_el1); 91 - isb(); 92 92 93 93 /* Restore user page table */ 94 94 write_sysreg(ttbr0, ttbr0_el1);
+1
arch/arm64/include/uapi/asm/hwcap.h
··· 102 102 #define HWCAP2_SME_BI32I32 (1UL << 40) 103 103 #define HWCAP2_SME_B16B16 (1UL << 41) 104 104 #define HWCAP2_SME_F16F16 (1UL << 42) 105 + #define HWCAP2_MOPS (1UL << 43) 105 106 106 107 #endif /* _UAPI__ASM_HWCAP_H */
+1 -2
arch/arm64/kernel/Makefile
··· 42 42 obj-$(CONFIG_COMPAT_ALIGNMENT_FIXUPS) += compat_alignment.o 43 43 obj-$(CONFIG_KUSER_HELPERS) += kuser32.o 44 44 obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o 45 - obj-$(CONFIG_MODULES) += module.o 46 - obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o 45 + obj-$(CONFIG_MODULES) += module.o module-plts.o 47 46 obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o 48 47 obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o 49 48 obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
+13 -14
arch/arm64/kernel/alternative.c
··· 24 24 #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) 25 25 #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) 26 26 27 - #define ALT_CAP(a) ((a)->cpufeature & ~ARM64_CB_BIT) 28 - #define ALT_HAS_CB(a) ((a)->cpufeature & ARM64_CB_BIT) 27 + #define ALT_CAP(a) ((a)->cpucap & ~ARM64_CB_BIT) 28 + #define ALT_HAS_CB(a) ((a)->cpucap & ARM64_CB_BIT) 29 29 30 30 /* Volatile, as we may be patching the guts of READ_ONCE() */ 31 31 static volatile int all_alternatives_applied; ··· 37 37 struct alt_instr *end; 38 38 }; 39 39 40 - bool alternative_is_applied(u16 cpufeature) 40 + bool alternative_is_applied(u16 cpucap) 41 41 { 42 - if (WARN_ON(cpufeature >= ARM64_NCAPS)) 42 + if (WARN_ON(cpucap >= ARM64_NCAPS)) 43 43 return false; 44 44 45 - return test_bit(cpufeature, applied_alternatives); 45 + return test_bit(cpucap, applied_alternatives); 46 46 } 47 47 48 48 /* ··· 121 121 * accidentally call into the cache.S code, which is patched by us at 122 122 * runtime. 123 123 */ 124 - static void clean_dcache_range_nopatch(u64 start, u64 end) 124 + static noinstr void clean_dcache_range_nopatch(u64 start, u64 end) 125 125 { 126 126 u64 cur, d_size, ctr_el0; 127 127 128 - ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0); 128 + ctr_el0 = arm64_ftr_reg_ctrel0.sys_val; 129 129 d_size = 4 << cpuid_feature_extract_unsigned_field(ctr_el0, 130 130 CTR_EL0_DminLine_SHIFT); 131 131 cur = start & ~(d_size - 1); ··· 141 141 142 142 static void __apply_alternatives(const struct alt_region *region, 143 143 bool is_module, 144 - unsigned long *feature_mask) 144 + unsigned long *cpucap_mask) 145 145 { 146 146 struct alt_instr *alt; 147 147 __le32 *origptr, *updptr; ··· 151 151 int nr_inst; 152 152 int cap = ALT_CAP(alt); 153 153 154 - if (!test_bit(cap, feature_mask)) 154 + if (!test_bit(cap, cpucap_mask)) 155 155 continue; 156 156 157 157 if (!cpus_have_cap(cap)) ··· 188 188 icache_inval_all_pou(); 189 189 isb(); 190 190 191 - /* Ignore ARM64_CB bit from feature mask */ 192 191 bitmap_or(applied_alternatives, applied_alternatives, 193 - feature_mask, ARM64_NCAPS); 192 + cpucap_mask, ARM64_NCAPS); 194 193 bitmap_and(applied_alternatives, applied_alternatives, 195 - cpu_hwcaps, ARM64_NCAPS); 194 + system_cpucaps, ARM64_NCAPS); 196 195 } 197 196 } 198 197 ··· 238 239 } else { 239 240 DECLARE_BITMAP(remaining_capabilities, ARM64_NCAPS); 240 241 241 - bitmap_complement(remaining_capabilities, boot_capabilities, 242 + bitmap_complement(remaining_capabilities, boot_cpucaps, 242 243 ARM64_NCAPS); 243 244 244 245 BUG_ON(all_alternatives_applied); ··· 273 274 pr_info("applying boot alternatives\n"); 274 275 275 276 __apply_alternatives(&kernel_alternatives, false, 276 - &boot_capabilities[0]); 277 + &boot_cpucaps[0]); 277 278 } 278 279 279 280 #ifdef CONFIG_MODULES
+51 -31
arch/arm64/kernel/cpufeature.c
··· 105 105 unsigned int compat_elf_hwcap2 __read_mostly; 106 106 #endif 107 107 108 - DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); 109 - EXPORT_SYMBOL(cpu_hwcaps); 110 - static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS]; 108 + DECLARE_BITMAP(system_cpucaps, ARM64_NCAPS); 109 + EXPORT_SYMBOL(system_cpucaps); 110 + static struct arm64_cpu_capabilities const __ro_after_init *cpucap_ptrs[ARM64_NCAPS]; 111 111 112 - DECLARE_BITMAP(boot_capabilities, ARM64_NCAPS); 112 + DECLARE_BITMAP(boot_cpucaps, ARM64_NCAPS); 113 113 114 114 bool arm64_use_ng_mappings = false; 115 115 EXPORT_SYMBOL(arm64_use_ng_mappings); ··· 137 137 void dump_cpu_features(void) 138 138 { 139 139 /* file-wide pr_fmt adds "CPU features: " prefix */ 140 - pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps); 140 + pr_emerg("0x%*pb\n", ARM64_NCAPS, &system_cpucaps); 141 141 } 142 142 143 143 #define ARM64_CPUID_FIELDS(reg, field, min_value) \ ··· 223 223 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CSSC_SHIFT, 4, 0), 224 224 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRFM_SHIFT, 4, 0), 225 225 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0), 226 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_MOPS_SHIFT, 4, 0), 226 227 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), 227 228 FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_EL1_APA3_SHIFT, 4, 0), 228 229 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), ··· 365 364 static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { 366 365 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TIDCP1_SHIFT, 4, 0), 367 366 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, 0), 367 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HCX_SHIFT, 4, 0), 368 368 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_ETS_SHIFT, 4, 0), 369 369 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TWED_SHIFT, 4, 0), 370 370 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_XNX_SHIFT, 4, 0), ··· 956 954 static const struct arm64_cpu_capabilities arm64_features[]; 957 955 958 956 static void __init 959 - init_cpu_hwcaps_indirect_list_from_array(const struct arm64_cpu_capabilities *caps) 957 + init_cpucap_indirect_list_from_array(const struct arm64_cpu_capabilities *caps) 960 958 { 961 959 for (; caps->matches; caps++) { 962 960 if (WARN(caps->capability >= ARM64_NCAPS, 963 961 "Invalid capability %d\n", caps->capability)) 964 962 continue; 965 - if (WARN(cpu_hwcaps_ptrs[caps->capability], 963 + if (WARN(cpucap_ptrs[caps->capability], 966 964 "Duplicate entry for capability %d\n", 967 965 caps->capability)) 968 966 continue; 969 - cpu_hwcaps_ptrs[caps->capability] = caps; 967 + cpucap_ptrs[caps->capability] = caps; 970 968 } 971 969 } 972 970 973 - static void __init init_cpu_hwcaps_indirect_list(void) 971 + static void __init init_cpucap_indirect_list(void) 974 972 { 975 - init_cpu_hwcaps_indirect_list_from_array(arm64_features); 976 - init_cpu_hwcaps_indirect_list_from_array(arm64_errata); 973 + init_cpucap_indirect_list_from_array(arm64_features); 974 + init_cpucap_indirect_list_from_array(arm64_errata); 977 975 } 978 976 979 977 static void __init setup_boot_cpu_capabilities(void); ··· 1051 1049 init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid); 1052 1050 1053 1051 /* 1054 - * Initialize the indirect array of CPU hwcaps capabilities pointers 1055 - * before we handle the boot CPU below. 1052 + * Initialize the indirect array of CPU capabilities pointers before we 1053 + * handle the boot CPU below. 1056 1054 */ 1057 - init_cpu_hwcaps_indirect_list(); 1055 + init_cpucap_indirect_list(); 1058 1056 1059 1057 /* 1060 1058 * Detect and enable early CPU capabilities based on the boot CPU, ··· 2050 2048 static bool has_address_auth_metacap(const struct arm64_cpu_capabilities *entry, 2051 2049 int scope) 2052 2050 { 2053 - bool api = has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_IMP_DEF], scope); 2054 - bool apa = has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA5], scope); 2055 - bool apa3 = has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA3], scope); 2051 + bool api = has_address_auth_cpucap(cpucap_ptrs[ARM64_HAS_ADDRESS_AUTH_IMP_DEF], scope); 2052 + bool apa = has_address_auth_cpucap(cpucap_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA5], scope); 2053 + bool apa3 = has_address_auth_cpucap(cpucap_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA3], scope); 2056 2054 2057 2055 return apa || apa3 || api; 2058 2056 } ··· 2188 2186 set_pstate_dit(1); 2189 2187 } 2190 2188 2189 + static void cpu_enable_mops(const struct arm64_cpu_capabilities *__unused) 2190 + { 2191 + sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_MSCEn); 2192 + } 2193 + 2191 2194 /* Internal helper functions to match cpu capability type */ 2192 2195 static bool 2193 2196 cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap) ··· 2242 2235 .capability = ARM64_HAS_ECV_CNTPOFF, 2243 2236 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2244 2237 .matches = has_cpuid_feature, 2245 - .sys_reg = SYS_ID_AA64MMFR0_EL1, 2246 - .field_pos = ID_AA64MMFR0_EL1_ECV_SHIFT, 2247 - .field_width = 4, 2248 - .sign = FTR_UNSIGNED, 2249 - .min_field_value = ID_AA64MMFR0_EL1_ECV_CNTPOFF, 2238 + ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, ECV, CNTPOFF) 2250 2239 }, 2251 2240 #ifdef CONFIG_ARM64_PAN 2252 2241 { ··· 2311 2308 .capability = ARM64_KVM_PROTECTED_MODE, 2312 2309 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2313 2310 .matches = is_kvm_protected_mode, 2311 + }, 2312 + { 2313 + .desc = "HCRX_EL2 register", 2314 + .capability = ARM64_HAS_HCX, 2315 + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, 2316 + .matches = has_cpuid_feature, 2317 + ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, HCX, IMP) 2314 2318 }, 2315 2319 #endif 2316 2320 { ··· 2651 2641 .cpu_enable = cpu_enable_dit, 2652 2642 ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, DIT, IMP) 2653 2643 }, 2644 + { 2645 + .desc = "Memory Copy and Memory Set instructions", 2646 + .capability = ARM64_HAS_MOPS, 2647 + .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2648 + .matches = has_cpuid_feature, 2649 + .cpu_enable = cpu_enable_mops, 2650 + ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, MOPS, IMP) 2651 + }, 2654 2652 {}, 2655 2653 }; 2656 2654 ··· 2787 2769 HWCAP_CAP(ID_AA64ISAR2_EL1, RPRFM, IMP, CAP_HWCAP, KERNEL_HWCAP_RPRFM), 2788 2770 HWCAP_CAP(ID_AA64ISAR2_EL1, RPRES, IMP, CAP_HWCAP, KERNEL_HWCAP_RPRES), 2789 2771 HWCAP_CAP(ID_AA64ISAR2_EL1, WFxT, IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT), 2772 + HWCAP_CAP(ID_AA64ISAR2_EL1, MOPS, IMP, CAP_HWCAP, KERNEL_HWCAP_MOPS), 2790 2773 #ifdef CONFIG_ARM64_SME 2791 2774 HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME), 2792 2775 HWCAP_CAP(ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), ··· 2914 2895 2915 2896 scope_mask &= ARM64_CPUCAP_SCOPE_MASK; 2916 2897 for (i = 0; i < ARM64_NCAPS; i++) { 2917 - caps = cpu_hwcaps_ptrs[i]; 2898 + caps = cpucap_ptrs[i]; 2918 2899 if (!caps || !(caps->type & scope_mask) || 2919 2900 cpus_have_cap(caps->capability) || 2920 2901 !caps->matches(caps, cpucap_default_scope(caps))) ··· 2922 2903 2923 2904 if (caps->desc) 2924 2905 pr_info("detected: %s\n", caps->desc); 2925 - cpus_set_cap(caps->capability); 2906 + 2907 + __set_bit(caps->capability, system_cpucaps); 2926 2908 2927 2909 if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU)) 2928 - set_bit(caps->capability, boot_capabilities); 2910 + set_bit(caps->capability, boot_cpucaps); 2929 2911 } 2930 2912 } 2931 2913 ··· 2940 2920 u16 non_boot_scope = SCOPE_ALL & ~SCOPE_BOOT_CPU; 2941 2921 2942 2922 for_each_available_cap(i) { 2943 - const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[i]; 2923 + const struct arm64_cpu_capabilities *cap = cpucap_ptrs[i]; 2944 2924 2945 2925 if (WARN_ON(!cap)) 2946 2926 continue; ··· 2970 2950 for (i = 0; i < ARM64_NCAPS; i++) { 2971 2951 unsigned int num; 2972 2952 2973 - caps = cpu_hwcaps_ptrs[i]; 2953 + caps = cpucap_ptrs[i]; 2974 2954 if (!caps || !(caps->type & scope_mask)) 2975 2955 continue; 2976 2956 num = caps->capability; ··· 3015 2995 scope_mask &= ARM64_CPUCAP_SCOPE_MASK; 3016 2996 3017 2997 for (i = 0; i < ARM64_NCAPS; i++) { 3018 - caps = cpu_hwcaps_ptrs[i]; 2998 + caps = cpucap_ptrs[i]; 3019 2999 if (!caps || !(caps->type & scope_mask)) 3020 3000 continue; 3021 3001 ··· 3214 3194 bool this_cpu_has_cap(unsigned int n) 3215 3195 { 3216 3196 if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) { 3217 - const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n]; 3197 + const struct arm64_cpu_capabilities *cap = cpucap_ptrs[n]; 3218 3198 3219 3199 if (cap) 3220 3200 return cap->matches(cap, SCOPE_LOCAL_CPU); ··· 3227 3207 /* 3228 3208 * This helper function is used in a narrow window when, 3229 3209 * - The system wide safe registers are set with all the SMP CPUs and, 3230 - * - The SYSTEM_FEATURE cpu_hwcaps may not have been set. 3210 + * - The SYSTEM_FEATURE system_cpucaps may not have been set. 3231 3211 * In all other cases cpus_have_{const_}cap() should be used. 3232 3212 */ 3233 3213 static bool __maybe_unused __system_matches_cap(unsigned int n) 3234 3214 { 3235 3215 if (n < ARM64_NCAPS) { 3236 - const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n]; 3216 + const struct arm64_cpu_capabilities *cap = cpucap_ptrs[n]; 3237 3217 3238 3218 if (cap) 3239 3219 return cap->matches(cap, SCOPE_SYSTEM);
+1 -1
arch/arm64/kernel/cpuidle.c
··· 13 13 #include <linux/of_device.h> 14 14 #include <linux/psci.h> 15 15 16 - #ifdef CONFIG_ACPI 16 + #ifdef CONFIG_ACPI_PROCESSOR_IDLE 17 17 18 18 #include <acpi/processor.h> 19 19
+1
arch/arm64/kernel/cpuinfo.c
··· 125 125 [KERNEL_HWCAP_SME_BI32I32] = "smebi32i32", 126 126 [KERNEL_HWCAP_SME_B16B16] = "smeb16b16", 127 127 [KERNEL_HWCAP_SME_F16F16] = "smef16f16", 128 + [KERNEL_HWCAP_MOPS] = "mops", 128 129 }; 129 130 130 131 #ifdef CONFIG_COMPAT
+15 -2
arch/arm64/kernel/entry-common.c
··· 126 126 lockdep_hardirqs_on(CALLER_ADDR0); 127 127 } 128 128 129 - static __always_inline void prepare_exit_to_user_mode(struct pt_regs *regs) 129 + static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs) 130 130 { 131 131 unsigned long flags; 132 132 ··· 135 135 flags = read_thread_flags(); 136 136 if (unlikely(flags & _TIF_WORK_MASK)) 137 137 do_notify_resume(regs, flags); 138 + 139 + lockdep_sys_exit(); 138 140 } 139 141 140 142 static __always_inline void exit_to_user_mode(struct pt_regs *regs) 141 143 { 142 - prepare_exit_to_user_mode(regs); 144 + exit_to_user_mode_prepare(regs); 143 145 mte_check_tfsr_exit(); 144 146 __exit_to_user_mode(); 145 147 } ··· 613 611 exit_to_user_mode(regs); 614 612 } 615 613 614 + static void noinstr el0_mops(struct pt_regs *regs, unsigned long esr) 615 + { 616 + enter_from_user_mode(regs); 617 + local_daif_restore(DAIF_PROCCTX); 618 + do_el0_mops(regs, esr); 619 + exit_to_user_mode(regs); 620 + } 621 + 616 622 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr) 617 623 { 618 624 enter_from_user_mode(regs); ··· 697 687 break; 698 688 case ESR_ELx_EC_BTI: 699 689 el0_bti(regs); 690 + break; 691 + case ESR_ELx_EC_MOPS: 692 + el0_mops(regs, esr); 700 693 break; 701 694 case ESR_ELx_EC_BREAKPT_LOW: 702 695 case ESR_ELx_EC_SOFTSTP_LOW:
+20 -37
arch/arm64/kernel/entry.S
··· 101 101 .org .Lventry_start\@ + 128 // Did we overflow the ventry slot? 102 102 .endm 103 103 104 - .macro tramp_alias, dst, sym, tmp 105 - mov_q \dst, TRAMP_VALIAS 106 - adr_l \tmp, \sym 107 - add \dst, \dst, \tmp 108 - adr_l \tmp, .entry.tramp.text 109 - sub \dst, \dst, \tmp 104 + .macro tramp_alias, dst, sym 105 + .set .Lalias\@, TRAMP_VALIAS + \sym - .entry.tramp.text 106 + movz \dst, :abs_g2_s:.Lalias\@ 107 + movk \dst, :abs_g1_nc:.Lalias\@ 108 + movk \dst, :abs_g0_nc:.Lalias\@ 110 109 .endm 111 110 112 111 /* ··· 434 435 eret 435 436 alternative_else_nop_endif 436 437 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 437 - bne 4f 438 438 msr far_el1, x29 439 - tramp_alias x30, tramp_exit_native, x29 440 - br x30 441 - 4: 442 - tramp_alias x30, tramp_exit_compat, x29 443 - br x30 439 + 440 + ldr_this_cpu x30, this_cpu_vector, x29 441 + tramp_alias x29, tramp_exit 442 + msr vbar_el1, x30 // install vector table 443 + ldr lr, [sp, #S_LR] // restore x30 444 + add sp, sp, #PT_REGS_SIZE // restore sp 445 + br x29 444 446 #endif 445 447 .else 446 448 ldr lr, [sp, #S_LR] ··· 732 732 .org 1b + 128 // Did we overflow the ventry slot? 733 733 .endm 734 734 735 - .macro tramp_exit, regsize = 64 736 - tramp_data_read_var x30, this_cpu_vector 737 - get_this_cpu_offset x29 738 - ldr x30, [x30, x29] 739 - 740 - msr vbar_el1, x30 741 - ldr lr, [sp, #S_LR] 742 - tramp_unmap_kernel x29 743 - .if \regsize == 64 744 - mrs x29, far_el1 745 - .endif 746 - add sp, sp, #PT_REGS_SIZE // restore sp 747 - eret 748 - sb 749 - .endm 750 - 751 735 .macro generate_tramp_vector, kpti, bhb 752 736 .Lvector_start\@: 753 737 .space 0x400 ··· 752 768 */ 753 769 .pushsection ".entry.tramp.text", "ax" 754 770 .align 11 755 - SYM_CODE_START_NOALIGN(tramp_vectors) 771 + SYM_CODE_START_LOCAL_NOALIGN(tramp_vectors) 756 772 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY 757 773 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP 758 774 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW ··· 761 777 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE 762 778 SYM_CODE_END(tramp_vectors) 763 779 764 - SYM_CODE_START(tramp_exit_native) 765 - tramp_exit 766 - SYM_CODE_END(tramp_exit_native) 767 - 768 - SYM_CODE_START(tramp_exit_compat) 769 - tramp_exit 32 770 - SYM_CODE_END(tramp_exit_compat) 780 + SYM_CODE_START_LOCAL(tramp_exit) 781 + tramp_unmap_kernel x29 782 + mrs x29, far_el1 // restore x29 783 + eret 784 + sb 785 + SYM_CODE_END(tramp_exit) 771 786 .popsection // .entry.tramp.text 772 787 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ 773 788 ··· 1060 1077 alternative_else_nop_endif 1061 1078 1062 1079 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 1063 - tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3 1080 + tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline 1064 1081 br x5 1065 1082 #endif 1066 1083 SYM_CODE_END(__sdei_asm_handler)
+1
arch/arm64/kernel/fpsimd.c
··· 1649 1649 1650 1650 fpsimd_flush_thread_vl(ARM64_VEC_SME); 1651 1651 current->thread.svcr = 0; 1652 + sme_smstop(); 1652 1653 } 1653 1654 1654 1655 current->thread.fp_type = FP_STATE_FPSIMD;
+3 -5
arch/arm64/kernel/ftrace.c
··· 197 197 198 198 static struct plt_entry *get_ftrace_plt(struct module *mod) 199 199 { 200 - #ifdef CONFIG_ARM64_MODULE_PLTS 200 + #ifdef CONFIG_MODULES 201 201 struct plt_entry *plt = mod->arch.ftrace_trampolines; 202 202 203 203 return &plt[FTRACE_PLT_IDX]; ··· 249 249 * must use a PLT to reach it. We can only place PLTs for modules, and 250 250 * only when module PLT support is built-in. 251 251 */ 252 - if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) 252 + if (!IS_ENABLED(CONFIG_MODULES)) 253 253 return false; 254 254 255 255 /* ··· 431 431 * 432 432 * Note: 'mod' is only set at module load time. 433 433 */ 434 - if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS) && 435 - IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) { 434 + if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS) && mod) 436 435 return aarch64_insn_patch_text_nosync((void *)pc, new); 437 - } 438 436 439 437 if (!ftrace_find_callable_addr(rec, mod, &addr)) 440 438 return -EINVAL;
-1
arch/arm64/kernel/hibernate.c
··· 99 99 100 100 void notrace save_processor_state(void) 101 101 { 102 - WARN_ON(num_online_cpus() != 1); 103 102 } 104 103 105 104 void notrace restore_processor_state(void)
-8
arch/arm64/kernel/hw_breakpoint.c
··· 973 973 return 0; 974 974 } 975 975 976 - #ifdef CONFIG_CPU_PM 977 - extern void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int)); 978 - #else 979 - static inline void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int)) 980 - { 981 - } 982 - #endif 983 - 984 976 /* 985 977 * One-time initialisation. 986 978 */
+2
arch/arm64/kernel/idreg-override.c
··· 123 123 .fields = { 124 124 FIELD("gpa3", ID_AA64ISAR2_EL1_GPA3_SHIFT, NULL), 125 125 FIELD("apa3", ID_AA64ISAR2_EL1_APA3_SHIFT, NULL), 126 + FIELD("mops", ID_AA64ISAR2_EL1_MOPS_SHIFT, NULL), 126 127 {} 127 128 }, 128 129 }; ··· 175 174 "id_aa64isar1.gpi=0 id_aa64isar1.gpa=0 " 176 175 "id_aa64isar1.api=0 id_aa64isar1.apa=0 " 177 176 "id_aa64isar2.gpa3=0 id_aa64isar2.apa3=0" }, 177 + { "arm64.nomops", "id_aa64isar2.mops=0" }, 178 178 { "arm64.nomte", "id_aa64pfr1.mte=0" }, 179 179 { "nokaslr", "kaslr.disabled=1" }, 180 180 };
+14 -69
arch/arm64/kernel/kaslr.c
··· 4 4 */ 5 5 6 6 #include <linux/cache.h> 7 - #include <linux/crc32.h> 8 7 #include <linux/init.h> 9 - #include <linux/libfdt.h> 10 - #include <linux/mm_types.h> 11 - #include <linux/sched.h> 12 - #include <linux/types.h> 13 - #include <linux/pgtable.h> 14 - #include <linux/random.h> 8 + #include <linux/printk.h> 15 9 16 - #include <asm/fixmap.h> 17 - #include <asm/kernel-pgtable.h> 10 + #include <asm/cpufeature.h> 18 11 #include <asm/memory.h> 19 - #include <asm/mmu.h> 20 - #include <asm/sections.h> 21 - #include <asm/setup.h> 22 12 23 - u64 __ro_after_init module_alloc_base; 24 13 u16 __initdata memstart_offset_seed; 25 14 26 15 struct arm64_ftr_override kaslr_feature_override __initdata; 27 16 28 - static int __init kaslr_init(void) 17 + bool __ro_after_init __kaslr_is_enabled = false; 18 + 19 + void __init kaslr_init(void) 29 20 { 30 - u64 module_range; 31 - u32 seed; 32 - 33 - /* 34 - * Set a reasonable default for module_alloc_base in case 35 - * we end up running with module randomization disabled. 36 - */ 37 - module_alloc_base = (u64)_etext - MODULES_VSIZE; 38 - 39 21 if (kaslr_feature_override.val & kaslr_feature_override.mask & 0xf) { 40 22 pr_info("KASLR disabled on command line\n"); 41 - return 0; 23 + return; 42 24 } 43 25 44 - if (!kaslr_enabled()) { 26 + /* 27 + * The KASLR offset modulo MIN_KIMG_ALIGN is taken from the physical 28 + * placement of the image rather than from the seed, so a displacement 29 + * of less than MIN_KIMG_ALIGN means that no seed was provided. 30 + */ 31 + if (kaslr_offset() < MIN_KIMG_ALIGN) { 45 32 pr_warn("KASLR disabled due to lack of seed\n"); 46 - return 0; 33 + return; 47 34 } 48 35 49 36 pr_info("KASLR enabled\n"); 50 - 51 - /* 52 - * KASAN without KASAN_VMALLOC does not expect the module region to 53 - * intersect the vmalloc region, since shadow memory is allocated for 54 - * each module at load time, whereas the vmalloc region will already be 55 - * shadowed by KASAN zero pages. 56 - */ 57 - BUILD_BUG_ON((IS_ENABLED(CONFIG_KASAN_GENERIC) || 58 - IS_ENABLED(CONFIG_KASAN_SW_TAGS)) && 59 - !IS_ENABLED(CONFIG_KASAN_VMALLOC)); 60 - 61 - seed = get_random_u32(); 62 - 63 - if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) { 64 - /* 65 - * Randomize the module region over a 2 GB window covering the 66 - * kernel. This reduces the risk of modules leaking information 67 - * about the address of the kernel itself, but results in 68 - * branches between modules and the core kernel that are 69 - * resolved via PLTs. (Branches between modules will be 70 - * resolved normally.) 71 - */ 72 - module_range = SZ_2G - (u64)(_end - _stext); 73 - module_alloc_base = max((u64)_end - SZ_2G, (u64)MODULES_VADDR); 74 - } else { 75 - /* 76 - * Randomize the module region by setting module_alloc_base to 77 - * a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE, 78 - * _stext) . This guarantees that the resulting region still 79 - * covers [_stext, _etext], and that all relative branches can 80 - * be resolved without veneers unless this region is exhausted 81 - * and we fall back to a larger 2GB window in module_alloc() 82 - * when ARM64_MODULE_PLTS is enabled. 83 - */ 84 - module_range = MODULES_VSIZE - (u64)(_etext - _stext); 85 - } 86 - 87 - /* use the lower 21 bits to randomize the base of the module region */ 88 - module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; 89 - module_alloc_base &= PAGE_MASK; 90 - 91 - return 0; 37 + __kaslr_is_enabled = true; 92 38 } 93 - subsys_initcall(kaslr_init)
+1
arch/arm64/kernel/module-plts.c
··· 7 7 #include <linux/ftrace.h> 8 8 #include <linux/kernel.h> 9 9 #include <linux/module.h> 10 + #include <linux/moduleloader.h> 10 11 #include <linux/sort.h> 11 12 12 13 static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc,
+116 -37
arch/arm64/kernel/module.c
··· 7 7 * Author: Will Deacon <will.deacon@arm.com> 8 8 */ 9 9 10 + #define pr_fmt(fmt) "Modules: " fmt 11 + 10 12 #include <linux/bitops.h> 11 13 #include <linux/elf.h> 12 14 #include <linux/ftrace.h> ··· 17 15 #include <linux/kernel.h> 18 16 #include <linux/mm.h> 19 17 #include <linux/moduleloader.h> 18 + #include <linux/random.h> 20 19 #include <linux/scs.h> 21 20 #include <linux/vmalloc.h> 21 + 22 22 #include <asm/alternative.h> 23 23 #include <asm/insn.h> 24 24 #include <asm/scs.h> 25 25 #include <asm/sections.h> 26 26 27 + static u64 module_direct_base __ro_after_init = 0; 28 + static u64 module_plt_base __ro_after_init = 0; 29 + 30 + /* 31 + * Choose a random page-aligned base address for a window of 'size' bytes which 32 + * entirely contains the interval [start, end - 1]. 33 + */ 34 + static u64 __init random_bounding_box(u64 size, u64 start, u64 end) 35 + { 36 + u64 max_pgoff, pgoff; 37 + 38 + if ((end - start) >= size) 39 + return 0; 40 + 41 + max_pgoff = (size - (end - start)) / PAGE_SIZE; 42 + pgoff = get_random_u32_inclusive(0, max_pgoff); 43 + 44 + return start - pgoff * PAGE_SIZE; 45 + } 46 + 47 + /* 48 + * Modules may directly reference data and text anywhere within the kernel 49 + * image and other modules. References using PREL32 relocations have a +/-2G 50 + * range, and so we need to ensure that the entire kernel image and all modules 51 + * fall within a 2G window such that these are always within range. 52 + * 53 + * Modules may directly branch to functions and code within the kernel text, 54 + * and to functions and code within other modules. These branches will use 55 + * CALL26/JUMP26 relocations with a +/-128M range. Without PLTs, we must ensure 56 + * that the entire kernel text and all module text falls within a 128M window 57 + * such that these are always within range. With PLTs, we can expand this to a 58 + * 2G window. 59 + * 60 + * We chose the 128M region to surround the entire kernel image (rather than 61 + * just the text) as using the same bounds for the 128M and 2G regions ensures 62 + * by construction that we never select a 128M region that is not a subset of 63 + * the 2G region. For very large and unusual kernel configurations this means 64 + * we may fall back to PLTs where they could have been avoided, but this keeps 65 + * the logic significantly simpler. 66 + */ 67 + static int __init module_init_limits(void) 68 + { 69 + u64 kernel_end = (u64)_end; 70 + u64 kernel_start = (u64)_text; 71 + u64 kernel_size = kernel_end - kernel_start; 72 + 73 + /* 74 + * The default modules region is placed immediately below the kernel 75 + * image, and is large enough to use the full 2G relocation range. 76 + */ 77 + BUILD_BUG_ON(KIMAGE_VADDR != MODULES_END); 78 + BUILD_BUG_ON(MODULES_VSIZE < SZ_2G); 79 + 80 + if (!kaslr_enabled()) { 81 + if (kernel_size < SZ_128M) 82 + module_direct_base = kernel_end - SZ_128M; 83 + if (kernel_size < SZ_2G) 84 + module_plt_base = kernel_end - SZ_2G; 85 + } else { 86 + u64 min = kernel_start; 87 + u64 max = kernel_end; 88 + 89 + if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) { 90 + pr_info("2G module region forced by RANDOMIZE_MODULE_REGION_FULL\n"); 91 + } else { 92 + module_direct_base = random_bounding_box(SZ_128M, min, max); 93 + if (module_direct_base) { 94 + min = module_direct_base; 95 + max = module_direct_base + SZ_128M; 96 + } 97 + } 98 + 99 + module_plt_base = random_bounding_box(SZ_2G, min, max); 100 + } 101 + 102 + pr_info("%llu pages in range for non-PLT usage", 103 + module_direct_base ? (SZ_128M - kernel_size) / PAGE_SIZE : 0); 104 + pr_info("%llu pages in range for PLT usage", 105 + module_plt_base ? (SZ_2G - kernel_size) / PAGE_SIZE : 0); 106 + 107 + return 0; 108 + } 109 + subsys_initcall(module_init_limits); 110 + 27 111 void *module_alloc(unsigned long size) 28 112 { 29 - u64 module_alloc_end = module_alloc_base + MODULES_VSIZE; 30 - gfp_t gfp_mask = GFP_KERNEL; 31 - void *p; 113 + void *p = NULL; 32 114 33 - /* Silence the initial allocation */ 34 - if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) 35 - gfp_mask |= __GFP_NOWARN; 115 + /* 116 + * Where possible, prefer to allocate within direct branch range of the 117 + * kernel such that no PLTs are necessary. 118 + */ 119 + if (module_direct_base) { 120 + p = __vmalloc_node_range(size, MODULE_ALIGN, 121 + module_direct_base, 122 + module_direct_base + SZ_128M, 123 + GFP_KERNEL | __GFP_NOWARN, 124 + PAGE_KERNEL, 0, NUMA_NO_NODE, 125 + __builtin_return_address(0)); 126 + } 36 127 37 - if (IS_ENABLED(CONFIG_KASAN_GENERIC) || 38 - IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 39 - /* don't exceed the static module region - see below */ 40 - module_alloc_end = MODULES_END; 128 + if (!p && module_plt_base) { 129 + p = __vmalloc_node_range(size, MODULE_ALIGN, 130 + module_plt_base, 131 + module_plt_base + SZ_2G, 132 + GFP_KERNEL | __GFP_NOWARN, 133 + PAGE_KERNEL, 0, NUMA_NO_NODE, 134 + __builtin_return_address(0)); 135 + } 41 136 42 - p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, 43 - module_alloc_end, gfp_mask, PAGE_KERNEL, VM_DEFER_KMEMLEAK, 44 - NUMA_NO_NODE, __builtin_return_address(0)); 137 + if (!p) { 138 + pr_warn_ratelimited("%s: unable to allocate memory\n", 139 + __func__); 140 + } 45 141 46 - if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && 47 - (IS_ENABLED(CONFIG_KASAN_VMALLOC) || 48 - (!IS_ENABLED(CONFIG_KASAN_GENERIC) && 49 - !IS_ENABLED(CONFIG_KASAN_SW_TAGS)))) 50 - /* 51 - * KASAN without KASAN_VMALLOC can only deal with module 52 - * allocations being served from the reserved module region, 53 - * since the remainder of the vmalloc region is already 54 - * backed by zero shadow pages, and punching holes into it 55 - * is non-trivial. Since the module region is not randomized 56 - * when KASAN is enabled without KASAN_VMALLOC, it is even 57 - * less likely that the module region gets exhausted, so we 58 - * can simply omit this fallback in that case. 59 - */ 60 - p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, 61 - module_alloc_base + SZ_2G, GFP_KERNEL, 62 - PAGE_KERNEL, 0, NUMA_NO_NODE, 63 - __builtin_return_address(0)); 64 - 65 - if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) { 142 + if (p && (kasan_alloc_module_shadow(p, size, GFP_KERNEL) < 0)) { 66 143 vfree(p); 67 144 return NULL; 68 145 } ··· 529 448 case R_AARCH64_CALL26: 530 449 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26, 531 450 AARCH64_INSN_IMM_26); 532 - 533 - if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && 534 - ovf == -ERANGE) { 451 + if (ovf == -ERANGE) { 535 452 val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym); 536 453 if (!val) 537 454 return -ENOEXEC; ··· 566 487 const Elf_Shdr *sechdrs, 567 488 struct module *mod) 568 489 { 569 - #if defined(CONFIG_ARM64_MODULE_PLTS) && defined(CONFIG_DYNAMIC_FTRACE) 490 + #if defined(CONFIG_DYNAMIC_FTRACE) 570 491 const Elf_Shdr *s; 571 492 struct plt_entry *plts; 572 493
+2
arch/arm64/kernel/setup.c
··· 296 296 297 297 *cmdline_p = boot_command_line; 298 298 299 + kaslr_init(); 300 + 299 301 /* 300 302 * If know now we are going to need KPTI then use non-global 301 303 * mappings from the start, avoiding the cost of rewriting
+2 -1
arch/arm64/kernel/signal.c
··· 23 23 #include <asm/daifflags.h> 24 24 #include <asm/debug-monitors.h> 25 25 #include <asm/elf.h> 26 + #include <asm/exception.h> 26 27 #include <asm/cacheflush.h> 27 28 #include <asm/ucontext.h> 28 29 #include <asm/unistd.h> ··· 399 398 400 399 __get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err); 401 400 if (!err) 402 - current->thread.tpidr2_el0 = tpidr2_el0; 401 + write_sysreg_s(tpidr2_el0, SYS_TPIDR2_EL0); 403 402 404 403 return err; 405 404 }
-2
arch/arm64/kernel/syscall.c
··· 147 147 * exit regardless, as the old entry assembly did. 148 148 */ 149 149 if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) { 150 - local_daif_mask(); 151 150 flags = read_thread_flags(); 152 151 if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP)) 153 152 return; 154 - local_daif_restore(DAIF_PROCCTX); 155 153 } 156 154 157 155 trace_exit:
+60 -1
arch/arm64/kernel/traps.c
··· 514 514 die("Oops - FPAC", regs, esr); 515 515 } 516 516 517 + void do_el0_mops(struct pt_regs *regs, unsigned long esr) 518 + { 519 + bool wrong_option = esr & ESR_ELx_MOPS_ISS_WRONG_OPTION; 520 + bool option_a = esr & ESR_ELx_MOPS_ISS_OPTION_A; 521 + int dstreg = ESR_ELx_MOPS_ISS_DESTREG(esr); 522 + int srcreg = ESR_ELx_MOPS_ISS_SRCREG(esr); 523 + int sizereg = ESR_ELx_MOPS_ISS_SIZEREG(esr); 524 + unsigned long dst, src, size; 525 + 526 + dst = pt_regs_read_reg(regs, dstreg); 527 + src = pt_regs_read_reg(regs, srcreg); 528 + size = pt_regs_read_reg(regs, sizereg); 529 + 530 + /* 531 + * Put the registers back in the original format suitable for a 532 + * prologue instruction, using the generic return routine from the 533 + * Arm ARM (DDI 0487I.a) rules CNTMJ and MWFQH. 534 + */ 535 + if (esr & ESR_ELx_MOPS_ISS_MEM_INST) { 536 + /* SET* instruction */ 537 + if (option_a ^ wrong_option) { 538 + /* Format is from Option A; forward set */ 539 + pt_regs_write_reg(regs, dstreg, dst + size); 540 + pt_regs_write_reg(regs, sizereg, -size); 541 + } 542 + } else { 543 + /* CPY* instruction */ 544 + if (!(option_a ^ wrong_option)) { 545 + /* Format is from Option B */ 546 + if (regs->pstate & PSR_N_BIT) { 547 + /* Backward copy */ 548 + pt_regs_write_reg(regs, dstreg, dst - size); 549 + pt_regs_write_reg(regs, srcreg, src - size); 550 + } 551 + } else { 552 + /* Format is from Option A */ 553 + if (size & BIT(63)) { 554 + /* Forward copy */ 555 + pt_regs_write_reg(regs, dstreg, dst + size); 556 + pt_regs_write_reg(regs, srcreg, src + size); 557 + pt_regs_write_reg(regs, sizereg, -size); 558 + } 559 + } 560 + } 561 + 562 + if (esr & ESR_ELx_MOPS_ISS_FROM_EPILOGUE) 563 + regs->pc -= 8; 564 + else 565 + regs->pc -= 4; 566 + 567 + /* 568 + * If single stepping then finish the step before executing the 569 + * prologue instruction. 570 + */ 571 + user_fastforward_single_step(current); 572 + } 573 + 517 574 #define __user_cache_maint(insn, address, res) \ 518 575 if (address >= TASK_SIZE_MAX) { \ 519 576 res = -EFAULT; \ ··· 881 824 [ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)", 882 825 [ESR_ELx_EC_DABT_CUR] = "DABT (current EL)", 883 826 [ESR_ELx_EC_SP_ALIGN] = "SP Alignment", 827 + [ESR_ELx_EC_MOPS] = "MOPS", 884 828 [ESR_ELx_EC_FP_EXC32] = "FP (AArch32)", 885 829 [ESR_ELx_EC_FP_EXC64] = "FP (AArch64)", 886 830 [ESR_ELx_EC_SERROR] = "SError", ··· 1005 947 } 1006 948 1007 949 /* GENERIC_BUG traps */ 1008 - 950 + #ifdef CONFIG_GENERIC_BUG 1009 951 int is_valid_bugaddr(unsigned long addr) 1010 952 { 1011 953 /* ··· 1017 959 */ 1018 960 return 1; 1019 961 } 962 + #endif 1020 963 1021 964 static int bug_handler(struct pt_regs *regs, unsigned long esr) 1022 965 {
+1 -1
arch/arm64/kvm/debug.c
··· 333 333 334 334 /* Check if we have TRBE implemented and available at the host */ 335 335 if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) && 336 - !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG)) 336 + !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_P)) 337 337 vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE); 338 338 } 339 339
+6
arch/arm64/kvm/hyp/include/hyp/switch.h
··· 130 130 131 131 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) 132 132 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); 133 + 134 + if (cpus_have_final_cap(ARM64_HAS_HCX)) 135 + write_sysreg_s(HCRX_GUEST_FLAGS, SYS_HCRX_EL2); 133 136 } 134 137 135 138 static inline void ___deactivate_traps(struct kvm_vcpu *vcpu) ··· 147 144 vcpu->arch.hcr_el2 &= ~HCR_VSE; 148 145 vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE; 149 146 } 147 + 148 + if (cpus_have_final_cap(ARM64_HAS_HCX)) 149 + write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2); 150 150 } 151 151 152 152 static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
+1 -1
arch/arm64/kvm/hyp/nvhe/debug-sr.c
··· 56 56 *trfcr_el1 = 0; 57 57 58 58 /* Check if the TRBE is enabled */ 59 - if (!(read_sysreg_s(SYS_TRBLIMITR_EL1) & TRBLIMITR_ENABLE)) 59 + if (!(read_sysreg_s(SYS_TRBLIMITR_EL1) & TRBLIMITR_EL1_E)) 60 60 return; 61 61 /* 62 62 * Prohibit trace generation while we are in guest.
+6 -5
arch/arm64/kvm/sys_regs.c
··· 388 388 return read_from_write_only(vcpu, p, r); 389 389 390 390 /* Forward the OSLK bit to OSLSR */ 391 - oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~SYS_OSLSR_OSLK; 392 - if (p->regval & SYS_OSLAR_OSLK) 393 - oslsr |= SYS_OSLSR_OSLK; 391 + oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~OSLSR_EL1_OSLK; 392 + if (p->regval & OSLAR_EL1_OSLK) 393 + oslsr |= OSLSR_EL1_OSLK; 394 394 395 395 __vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr; 396 396 return true; ··· 414 414 * The only modifiable bit is the OSLK bit. Refuse the write if 415 415 * userspace attempts to change any other bit in the register. 416 416 */ 417 - if ((val ^ rd->val) & ~SYS_OSLSR_OSLK) 417 + if ((val ^ rd->val) & ~OSLSR_EL1_OSLK) 418 418 return -EINVAL; 419 419 420 420 __vcpu_sys_reg(vcpu, rd->reg) = val; ··· 1252 1252 ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3)); 1253 1253 if (!cpus_have_final_cap(ARM64_HAS_WFXT)) 1254 1254 val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT); 1255 + val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS); 1255 1256 break; 1256 1257 case SYS_ID_AA64DFR0_EL1: 1257 1258 /* Limit debug to ARMv8.0 */ ··· 1782 1781 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi }, 1783 1782 { SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 }, 1784 1783 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1, 1785 - SYS_OSLSR_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, }, 1784 + OSLSR_EL1_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, }, 1786 1785 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi }, 1787 1786 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi }, 1788 1787 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
+4 -4
arch/arm64/lib/xor-neon.c
··· 10 10 #include <linux/module.h> 11 11 #include <asm/neon-intrinsics.h> 12 12 13 - void xor_arm64_neon_2(unsigned long bytes, unsigned long * __restrict p1, 13 + static void xor_arm64_neon_2(unsigned long bytes, unsigned long * __restrict p1, 14 14 const unsigned long * __restrict p2) 15 15 { 16 16 uint64_t *dp1 = (uint64_t *)p1; ··· 37 37 } while (--lines > 0); 38 38 } 39 39 40 - void xor_arm64_neon_3(unsigned long bytes, unsigned long * __restrict p1, 40 + static void xor_arm64_neon_3(unsigned long bytes, unsigned long * __restrict p1, 41 41 const unsigned long * __restrict p2, 42 42 const unsigned long * __restrict p3) 43 43 { ··· 73 73 } while (--lines > 0); 74 74 } 75 75 76 - void xor_arm64_neon_4(unsigned long bytes, unsigned long * __restrict p1, 76 + static void xor_arm64_neon_4(unsigned long bytes, unsigned long * __restrict p1, 77 77 const unsigned long * __restrict p2, 78 78 const unsigned long * __restrict p3, 79 79 const unsigned long * __restrict p4) ··· 118 118 } while (--lines > 0); 119 119 } 120 120 121 - void xor_arm64_neon_5(unsigned long bytes, unsigned long * __restrict p1, 121 + static void xor_arm64_neon_5(unsigned long bytes, unsigned long * __restrict p1, 122 122 const unsigned long * __restrict p2, 123 123 const unsigned long * __restrict p3, 124 124 const unsigned long * __restrict p4,
+1 -1
arch/arm64/mm/context.c
··· 364 364 ttbr1 &= ~TTBR_ASID_MASK; 365 365 ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid); 366 366 367 + cpu_set_reserved_ttbr0_nosync(); 367 368 write_sysreg(ttbr1, ttbr1_el1); 368 - isb(); 369 369 write_sysreg(ttbr0, ttbr0_el1); 370 370 isb(); 371 371 post_ttbr_update_workaround();
+14 -6
arch/arm64/mm/fault.c
··· 66 66 67 67 static void data_abort_decode(unsigned long esr) 68 68 { 69 + unsigned long iss2 = ESR_ELx_ISS2(esr); 70 + 69 71 pr_alert("Data abort info:\n"); 70 72 71 73 if (esr & ESR_ELx_ISV) { ··· 80 78 (esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT, 81 79 (esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT); 82 80 } else { 83 - pr_alert(" ISV = 0, ISS = 0x%08lx\n", esr & ESR_ELx_ISS_MASK); 81 + pr_alert(" ISV = 0, ISS = 0x%08lx, ISS2 = 0x%08lx\n", 82 + esr & ESR_ELx_ISS_MASK, iss2); 84 83 } 85 84 86 - pr_alert(" CM = %lu, WnR = %lu\n", 85 + pr_alert(" CM = %lu, WnR = %lu, TnD = %lu, TagAccess = %lu\n", 87 86 (esr & ESR_ELx_CM) >> ESR_ELx_CM_SHIFT, 88 - (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT); 87 + (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT, 88 + (iss2 & ESR_ELx_TnD) >> ESR_ELx_TnD_SHIFT, 89 + (iss2 & ESR_ELx_TagAccess) >> ESR_ELx_TagAccess_SHIFT); 90 + 91 + pr_alert(" GCS = %ld, Overlay = %lu, DirtyBit = %lu, Xs = %llu\n", 92 + (iss2 & ESR_ELx_GCS) >> ESR_ELx_GCS_SHIFT, 93 + (iss2 & ESR_ELx_Overlay) >> ESR_ELx_Overlay_SHIFT, 94 + (iss2 & ESR_ELx_DirtyBit) >> ESR_ELx_DirtyBit_SHIFT, 95 + (iss2 & ESR_ELx_Xs_MASK) >> ESR_ELx_Xs_SHIFT); 89 96 } 90 97 91 98 static void mem_abort_decode(unsigned long esr) ··· 896 885 addr, esr); 897 886 } 898 887 NOKPROBE_SYMBOL(do_sp_pc_abort); 899 - 900 - int __init early_brk64(unsigned long addr, unsigned long esr, 901 - struct pt_regs *regs); 902 888 903 889 /* 904 890 * __refdata because early_brk64 is __init, but the reference to it is
+1
arch/arm64/mm/flush.c
··· 8 8 9 9 #include <linux/export.h> 10 10 #include <linux/mm.h> 11 + #include <linux/libnvdimm.h> 11 12 #include <linux/pagemap.h> 12 13 13 14 #include <asm/cacheflush.h>
+34 -10
arch/arm64/mm/init.c
··· 69 69 70 70 #define CRASH_ADDR_LOW_MAX arm64_dma_phys_limit 71 71 #define CRASH_ADDR_HIGH_MAX (PHYS_MASK + 1) 72 + #define CRASH_HIGH_SEARCH_BASE SZ_4G 72 73 73 74 #define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20) 74 75 ··· 102 101 */ 103 102 static void __init reserve_crashkernel(void) 104 103 { 105 - unsigned long long crash_base, crash_size; 106 - unsigned long long crash_low_size = 0; 104 + unsigned long long crash_low_size = 0, search_base = 0; 107 105 unsigned long long crash_max = CRASH_ADDR_LOW_MAX; 106 + unsigned long long crash_base, crash_size; 108 107 char *cmdline = boot_command_line; 109 - int ret; 110 108 bool fixed_base = false; 109 + bool high = false; 110 + int ret; 111 111 112 112 if (!IS_ENABLED(CONFIG_KEXEC_CORE)) 113 113 return; ··· 131 129 else if (ret) 132 130 return; 133 131 132 + search_base = CRASH_HIGH_SEARCH_BASE; 134 133 crash_max = CRASH_ADDR_HIGH_MAX; 134 + high = true; 135 135 } else if (ret || !crash_size) { 136 136 /* The specified value is invalid */ 137 137 return; ··· 144 140 /* User specifies base address explicitly. */ 145 141 if (crash_base) { 146 142 fixed_base = true; 143 + search_base = crash_base; 147 144 crash_max = crash_base + crash_size; 148 145 } 149 146 150 147 retry: 151 148 crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN, 152 - crash_base, crash_max); 149 + search_base, crash_max); 153 150 if (!crash_base) { 154 151 /* 155 - * If the first attempt was for low memory, fall back to 156 - * high memory, the minimum required low memory will be 157 - * reserved later. 152 + * For crashkernel=size[KMG]@offset[KMG], print out failure 153 + * message if can't reserve the specified region. 158 154 */ 159 - if (!fixed_base && (crash_max == CRASH_ADDR_LOW_MAX)) { 155 + if (fixed_base) { 156 + pr_warn("crashkernel reservation failed - memory is in use.\n"); 157 + return; 158 + } 159 + 160 + /* 161 + * For crashkernel=size[KMG], if the first attempt was for 162 + * low memory, fall back to high memory, the minimum required 163 + * low memory will be reserved later. 164 + */ 165 + if (!high && crash_max == CRASH_ADDR_LOW_MAX) { 160 166 crash_max = CRASH_ADDR_HIGH_MAX; 167 + search_base = CRASH_ADDR_LOW_MAX; 161 168 crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE; 162 169 goto retry; 163 170 } 164 171 172 + /* 173 + * For crashkernel=size[KMG],high, if the first attempt was 174 + * for high memory, fall back to low memory. 175 + */ 176 + if (high && crash_max == CRASH_ADDR_HIGH_MAX) { 177 + crash_max = CRASH_ADDR_LOW_MAX; 178 + search_base = 0; 179 + goto retry; 180 + } 165 181 pr_warn("cannot allocate crashkernel (size:0x%llx)\n", 166 182 crash_size); 167 183 return; 168 184 } 169 185 170 - if ((crash_base > CRASH_ADDR_LOW_MAX - crash_low_size) && 171 - crash_low_size && reserve_crashkernel_low(crash_low_size)) { 186 + if ((crash_base >= CRASH_ADDR_LOW_MAX) && crash_low_size && 187 + reserve_crashkernel_low(crash_low_size)) { 172 188 memblock_phys_free(crash_base, crash_size); 173 189 return; 174 190 }
+4 -13
arch/arm64/mm/kasan_init.c
··· 214 214 static void __init kasan_init_shadow(void) 215 215 { 216 216 u64 kimg_shadow_start, kimg_shadow_end; 217 - u64 mod_shadow_start, mod_shadow_end; 217 + u64 mod_shadow_start; 218 218 u64 vmalloc_shadow_end; 219 219 phys_addr_t pa_start, pa_end; 220 220 u64 i; ··· 223 223 kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(KERNEL_END)); 224 224 225 225 mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR); 226 - mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END); 227 226 228 227 vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END); 229 228 ··· 245 246 kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END), 246 247 (void *)mod_shadow_start); 247 248 248 - if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) { 249 - BUILD_BUG_ON(VMALLOC_START != MODULES_END); 250 - kasan_populate_early_shadow((void *)vmalloc_shadow_end, 251 - (void *)KASAN_SHADOW_END); 252 - } else { 253 - kasan_populate_early_shadow((void *)kimg_shadow_end, 254 - (void *)KASAN_SHADOW_END); 255 - if (kimg_shadow_start > mod_shadow_end) 256 - kasan_populate_early_shadow((void *)mod_shadow_end, 257 - (void *)kimg_shadow_start); 258 - } 249 + BUILD_BUG_ON(VMALLOC_START != MODULES_END); 250 + kasan_populate_early_shadow((void *)vmalloc_shadow_end, 251 + (void *)KASAN_SHADOW_END); 259 252 260 253 for_each_mem_range(i, &pa_start, &pa_end) { 261 254 void *start = (void *)__phys_to_virt(pa_start);
+9 -4
arch/arm64/mm/mmu.c
··· 451 451 void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, 452 452 phys_addr_t size, pgprot_t prot) 453 453 { 454 - if ((virt >= PAGE_END) && (virt < VMALLOC_START)) { 454 + if (virt < PAGE_OFFSET) { 455 455 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", 456 456 &phys, virt); 457 457 return; ··· 478 478 static void update_mapping_prot(phys_addr_t phys, unsigned long virt, 479 479 phys_addr_t size, pgprot_t prot) 480 480 { 481 - if ((virt >= PAGE_END) && (virt < VMALLOC_START)) { 481 + if (virt < PAGE_OFFSET) { 482 482 pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n", 483 483 &phys, virt); 484 484 return; ··· 663 663 vm_area_add_early(vma); 664 664 } 665 665 666 + static pgprot_t kernel_exec_prot(void) 667 + { 668 + return rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; 669 + } 670 + 666 671 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 667 672 static int __init map_entry_trampoline(void) 668 673 { 669 674 int i; 670 675 671 - pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; 676 + pgprot_t prot = kernel_exec_prot(); 672 677 phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); 673 678 674 679 /* The trampoline is always mapped and can therefore be global */ ··· 728 723 * mapping to install SW breakpoints. Allow this (only) when 729 724 * explicitly requested with rodata=off. 730 725 */ 731 - pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; 726 + pgprot_t text_prot = kernel_exec_prot(); 732 727 733 728 /* 734 729 * If we have a CPU that supports BTI and a kernel built for
+2
arch/arm64/tools/cpucaps
··· 32 32 HAS_GIC_CPUIF_SYSREGS 33 33 HAS_GIC_PRIO_MASKING 34 34 HAS_GIC_PRIO_RELAXED_SYNC 35 + HAS_HCX 35 36 HAS_LDAPR 36 37 HAS_LSE_ATOMICS 38 + HAS_MOPS 37 39 HAS_NESTED_VIRT 38 40 HAS_NO_FPSIMD 39 41 HAS_NO_HW_PREFETCH
+2 -2
arch/arm64/tools/gen-cpucaps.awk
··· 24 24 } 25 25 26 26 /^[vA-Z0-9_]+$/ { 27 - printf("#define ARM64_%-30s\t%d\n", $0, cap_num++) 27 + printf("#define ARM64_%-40s\t%d\n", $0, cap_num++) 28 28 next 29 29 } 30 30 31 31 END { 32 - printf("#define ARM64_NCAPS\t\t\t\t%d\n", cap_num) 32 + printf("#define ARM64_NCAPS\t\t\t\t\t%d\n", cap_num) 33 33 print "" 34 34 print "#endif /* __ASM_CPUCAPS_H */" 35 35 }
+132
arch/arm64/tools/sysreg
··· 48 48 # feature that introduces them (eg, FEAT_LS64_ACCDATA introduces enumeration 49 49 # item ACCDATA) though it may be more taseful to do something else. 50 50 51 + Sysreg OSDTRRX_EL1 2 0 0 0 2 52 + Res0 63:32 53 + Field 31:0 DTRRX 54 + EndSysreg 55 + 56 + Sysreg MDCCINT_EL1 2 0 0 2 0 57 + Res0 63:31 58 + Field 30 RX 59 + Field 29 TX 60 + Res0 28:0 61 + EndSysreg 62 + 63 + Sysreg MDSCR_EL1 2 0 0 2 2 64 + Res0 63:36 65 + Field 35 EHBWE 66 + Field 34 EnSPM 67 + Field 33 TTA 68 + Field 32 EMBWE 69 + Field 31 TFO 70 + Field 30 RXfull 71 + Field 29 TXfull 72 + Res0 28 73 + Field 27 RXO 74 + Field 26 TXU 75 + Res0 25:24 76 + Field 23:22 INTdis 77 + Field 21 TDA 78 + Res0 20 79 + Field 19 SC2 80 + Res0 18:16 81 + Field 15 MDE 82 + Field 14 HDE 83 + Field 13 KDE 84 + Field 12 TDCC 85 + Res0 11:7 86 + Field 6 ERR 87 + Res0 5:1 88 + Field 0 SS 89 + EndSysreg 90 + 91 + Sysreg OSDTRTX_EL1 2 0 0 3 2 92 + Res0 63:32 93 + Field 31:0 DTRTX 94 + EndSysreg 95 + 96 + Sysreg OSECCR_EL1 2 0 0 6 2 97 + Res0 63:32 98 + Field 31:0 EDECCR 99 + EndSysreg 100 + 101 + Sysreg OSLAR_EL1 2 0 1 0 4 102 + Res0 63:1 103 + Field 0 OSLK 104 + EndSysreg 105 + 51 106 Sysreg ID_PFR0_EL1 3 0 0 1 0 52 107 Res0 63:32 53 108 UnsignedEnum 31:28 RAS ··· 2254 2199 Sysreg ICC_NMIAR1_EL1 3 0 12 9 5 2255 2200 Res0 63:24 2256 2201 Field 23:0 INTID 2202 + EndSysreg 2203 + 2204 + Sysreg TRBLIMITR_EL1 3 0 9 11 0 2205 + Field 63:12 LIMIT 2206 + Res0 11:7 2207 + Field 6 XE 2208 + Field 5 nVM 2209 + Enum 4:3 TM 2210 + 0b00 STOP 2211 + 0b01 IRQ 2212 + 0b11 IGNR 2213 + EndEnum 2214 + Enum 2:1 FM 2215 + 0b00 FILL 2216 + 0b01 WRAP 2217 + 0b11 CBUF 2218 + EndEnum 2219 + Field 0 E 2220 + EndSysreg 2221 + 2222 + Sysreg TRBPTR_EL1 3 0 9 11 1 2223 + Field 63:0 PTR 2224 + EndSysreg 2225 + 2226 + Sysreg TRBBASER_EL1 3 0 9 11 2 2227 + Field 63:12 BASE 2228 + Res0 11:0 2229 + EndSysreg 2230 + 2231 + Sysreg TRBSR_EL1 3 0 9 11 3 2232 + Res0 63:56 2233 + Field 55:32 MSS2 2234 + Field 31:26 EC 2235 + Res0 25:24 2236 + Field 23 DAT 2237 + Field 22 IRQ 2238 + Field 21 TRG 2239 + Field 20 WRAP 2240 + Res0 19 2241 + Field 18 EA 2242 + Field 17 S 2243 + Res0 16 2244 + Field 15:0 MSS 2245 + EndSysreg 2246 + 2247 + Sysreg TRBMAR_EL1 3 0 9 11 4 2248 + Res0 63:12 2249 + Enum 11:10 PAS 2250 + 0b00 SECURE 2251 + 0b01 NON_SECURE 2252 + 0b10 ROOT 2253 + 0b11 REALM 2254 + EndEnum 2255 + Enum 9:8 SH 2256 + 0b00 NON_SHAREABLE 2257 + 0b10 OUTER_SHAREABLE 2258 + 0b11 INNER_SHAREABLE 2259 + EndEnum 2260 + Field 7:0 Attr 2261 + EndSysreg 2262 + 2263 + Sysreg TRBTRG_EL1 3 0 9 11 6 2264 + Res0 63:32 2265 + Field 31:0 TRG 2266 + EndSysreg 2267 + 2268 + Sysreg TRBIDR_EL1 3 0 9 11 7 2269 + Res0 63:12 2270 + Enum 11:8 EA 2271 + 0b0000 NON_DESC 2272 + 0b0001 IGNORE 2273 + 0b0010 SERROR 2274 + EndEnum 2275 + Res0 7:6 2276 + Field 5 F 2277 + Field 4 P 2278 + Field 3:0 Align 2257 2279 EndSysreg
+1 -1
drivers/acpi/arm64/Makefile
··· 3 3 obj-$(CONFIG_ACPI_IORT) += iort.o 4 4 obj-$(CONFIG_ACPI_GTDT) += gtdt.o 5 5 obj-$(CONFIG_ACPI_APMT) += apmt.o 6 - obj-y += dma.o 6 + obj-y += dma.o init.o
+1 -1
drivers/acpi/arm64/agdi.c
··· 9 9 #define pr_fmt(fmt) "ACPI: AGDI: " fmt 10 10 11 11 #include <linux/acpi.h> 12 - #include <linux/acpi_agdi.h> 13 12 #include <linux/arm_sdei.h> 14 13 #include <linux/io.h> 15 14 #include <linux/kernel.h> 16 15 #include <linux/platform_device.h> 16 + #include "init.h" 17 17 18 18 struct agdi_data { 19 19 int sdei_event;
+1 -1
drivers/acpi/arm64/apmt.c
··· 10 10 #define pr_fmt(fmt) "ACPI: APMT: " fmt 11 11 12 12 #include <linux/acpi.h> 13 - #include <linux/acpi_apmt.h> 14 13 #include <linux/init.h> 15 14 #include <linux/kernel.h> 16 15 #include <linux/platform_device.h> 16 + #include "init.h" 17 17 18 18 #define DEV_NAME "arm-cs-arch-pmu" 19 19
+13
drivers/acpi/arm64/init.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + #include <linux/acpi.h> 3 + #include "init.h" 4 + 5 + void __init acpi_arm_init(void) 6 + { 7 + if (IS_ENABLED(CONFIG_ACPI_AGDI)) 8 + acpi_agdi_init(); 9 + if (IS_ENABLED(CONFIG_ACPI_APMT)) 10 + acpi_apmt_init(); 11 + if (IS_ENABLED(CONFIG_ACPI_IORT)) 12 + acpi_iort_init(); 13 + }
+6
drivers/acpi/arm64/init.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + #include <linux/init.h> 3 + 4 + void __init acpi_agdi_init(void); 5 + void __init acpi_apmt_init(void); 6 + void __init acpi_iort_init(void);
+1
drivers/acpi/arm64/iort.c
··· 19 19 #include <linux/platform_device.h> 20 20 #include <linux/slab.h> 21 21 #include <linux/dma-map-ops.h> 22 + #include "init.h" 22 23 23 24 #define IORT_TYPE_MASK(type) (1 << (type)) 24 25 #define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP)
+1 -6
drivers/acpi/bus.c
··· 26 26 #include <asm/mpspec.h> 27 27 #include <linux/dmi.h> 28 28 #endif 29 - #include <linux/acpi_agdi.h> 30 - #include <linux/acpi_apmt.h> 31 - #include <linux/acpi_iort.h> 32 29 #include <linux/acpi_viot.h> 33 30 #include <linux/pci.h> 34 31 #include <acpi/apei.h> ··· 1405 1408 acpi_init_ffh(); 1406 1409 1407 1410 pci_mmcfg_late_init(); 1408 - acpi_iort_init(); 1411 + acpi_arm_init(); 1409 1412 acpi_viot_early_init(); 1410 1413 acpi_hest_init(); 1411 1414 acpi_ghes_init(); ··· 1417 1420 acpi_debugger_init(); 1418 1421 acpi_setup_sb_notify_handler(); 1419 1422 acpi_viot_init(); 1420 - acpi_agdi_init(); 1421 - acpi_apmt_init(); 1422 1423 return 0; 1423 1424 } 1424 1425
+17 -16
drivers/hwtracing/coresight/coresight-trbe.c
··· 218 218 * Enable the TRBE without clearing LIMITPTR which 219 219 * might be required for fetching the buffer limits. 220 220 */ 221 - trblimitr |= TRBLIMITR_ENABLE; 221 + trblimitr |= TRBLIMITR_EL1_E; 222 222 write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1); 223 223 224 224 /* Synchronize the TRBE enable event */ ··· 236 236 * Disable the TRBE without clearing LIMITPTR which 237 237 * might be required for fetching the buffer limits. 238 238 */ 239 - trblimitr &= ~TRBLIMITR_ENABLE; 239 + trblimitr &= ~TRBLIMITR_EL1_E; 240 240 write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1); 241 241 242 242 if (trbe_needs_drain_after_disable(cpudata)) ··· 582 582 u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1); 583 583 584 584 WARN_ON(is_trbe_enabled()); 585 - trbsr &= ~TRBSR_IRQ; 586 - trbsr &= ~TRBSR_TRG; 587 - trbsr &= ~TRBSR_WRAP; 588 - trbsr &= ~(TRBSR_EC_MASK << TRBSR_EC_SHIFT); 589 - trbsr &= ~(TRBSR_BSC_MASK << TRBSR_BSC_SHIFT); 590 - trbsr &= ~TRBSR_STOP; 585 + trbsr &= ~TRBSR_EL1_IRQ; 586 + trbsr &= ~TRBSR_EL1_TRG; 587 + trbsr &= ~TRBSR_EL1_WRAP; 588 + trbsr &= ~TRBSR_EL1_EC_MASK; 589 + trbsr &= ~TRBSR_EL1_BSC_MASK; 590 + trbsr &= ~TRBSR_EL1_S; 591 591 write_sysreg_s(trbsr, SYS_TRBSR_EL1); 592 592 } 593 593 ··· 596 596 u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1); 597 597 unsigned long addr = buf->trbe_limit; 598 598 599 - WARN_ON(!IS_ALIGNED(addr, (1UL << TRBLIMITR_LIMIT_SHIFT))); 599 + WARN_ON(!IS_ALIGNED(addr, (1UL << TRBLIMITR_EL1_LIMIT_SHIFT))); 600 600 WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE)); 601 601 602 - trblimitr &= ~TRBLIMITR_NVM; 603 - trblimitr &= ~(TRBLIMITR_FILL_MODE_MASK << TRBLIMITR_FILL_MODE_SHIFT); 604 - trblimitr &= ~(TRBLIMITR_TRIG_MODE_MASK << TRBLIMITR_TRIG_MODE_SHIFT); 605 - trblimitr &= ~(TRBLIMITR_LIMIT_MASK << TRBLIMITR_LIMIT_SHIFT); 602 + trblimitr &= ~TRBLIMITR_EL1_nVM; 603 + trblimitr &= ~TRBLIMITR_EL1_FM_MASK; 604 + trblimitr &= ~TRBLIMITR_EL1_TM_MASK; 605 + trblimitr &= ~TRBLIMITR_EL1_LIMIT_MASK; 606 606 607 607 /* 608 608 * Fill trace buffer mode is used here while configuring the ··· 613 613 * trace data in the interrupt handler, before reconfiguring 614 614 * the TRBE. 615 615 */ 616 - trblimitr |= (TRBE_FILL_MODE_FILL & TRBLIMITR_FILL_MODE_MASK) << TRBLIMITR_FILL_MODE_SHIFT; 616 + trblimitr |= (TRBLIMITR_EL1_FM_FILL << TRBLIMITR_EL1_FM_SHIFT) & 617 + TRBLIMITR_EL1_FM_MASK; 617 618 618 619 /* 619 620 * Trigger mode is not used here while configuring the TRBE for 620 621 * the trace capture. Hence just keep this in the ignore mode. 621 622 */ 622 - trblimitr |= (TRBE_TRIG_MODE_IGNORE & TRBLIMITR_TRIG_MODE_MASK) << 623 - TRBLIMITR_TRIG_MODE_SHIFT; 623 + trblimitr |= (TRBLIMITR_EL1_TM_IGNR << TRBLIMITR_EL1_TM_SHIFT) & 624 + TRBLIMITR_EL1_TM_MASK; 624 625 trblimitr |= (addr & PAGE_MASK); 625 626 set_trbe_enabled(buf->cpudata, trblimitr); 626 627 }
+15 -23
drivers/hwtracing/coresight/coresight-trbe.h
··· 30 30 { 31 31 u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1); 32 32 33 - return trblimitr & TRBLIMITR_ENABLE; 33 + return trblimitr & TRBLIMITR_EL1_E; 34 34 } 35 35 36 36 #define TRBE_EC_OTHERS 0 ··· 39 39 40 40 static inline int get_trbe_ec(u64 trbsr) 41 41 { 42 - return (trbsr >> TRBSR_EC_SHIFT) & TRBSR_EC_MASK; 42 + return (trbsr & TRBSR_EL1_EC_MASK) >> TRBSR_EL1_EC_SHIFT; 43 43 } 44 44 45 45 #define TRBE_BSC_NOT_STOPPED 0 ··· 48 48 49 49 static inline int get_trbe_bsc(u64 trbsr) 50 50 { 51 - return (trbsr >> TRBSR_BSC_SHIFT) & TRBSR_BSC_MASK; 51 + return (trbsr & TRBSR_EL1_BSC_MASK) >> TRBSR_EL1_BSC_SHIFT; 52 52 } 53 53 54 54 static inline void clr_trbe_irq(void) 55 55 { 56 56 u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1); 57 57 58 - trbsr &= ~TRBSR_IRQ; 58 + trbsr &= ~TRBSR_EL1_IRQ; 59 59 write_sysreg_s(trbsr, SYS_TRBSR_EL1); 60 60 } 61 61 62 62 static inline bool is_trbe_irq(u64 trbsr) 63 63 { 64 - return trbsr & TRBSR_IRQ; 64 + return trbsr & TRBSR_EL1_IRQ; 65 65 } 66 66 67 67 static inline bool is_trbe_trg(u64 trbsr) 68 68 { 69 - return trbsr & TRBSR_TRG; 69 + return trbsr & TRBSR_EL1_TRG; 70 70 } 71 71 72 72 static inline bool is_trbe_wrap(u64 trbsr) 73 73 { 74 - return trbsr & TRBSR_WRAP; 74 + return trbsr & TRBSR_EL1_WRAP; 75 75 } 76 76 77 77 static inline bool is_trbe_abort(u64 trbsr) 78 78 { 79 - return trbsr & TRBSR_ABORT; 79 + return trbsr & TRBSR_EL1_EA; 80 80 } 81 81 82 82 static inline bool is_trbe_running(u64 trbsr) 83 83 { 84 - return !(trbsr & TRBSR_STOP); 84 + return !(trbsr & TRBSR_EL1_S); 85 85 } 86 - 87 - #define TRBE_TRIG_MODE_STOP 0 88 - #define TRBE_TRIG_MODE_IRQ 1 89 - #define TRBE_TRIG_MODE_IGNORE 3 90 - 91 - #define TRBE_FILL_MODE_FILL 0 92 - #define TRBE_FILL_MODE_WRAP 1 93 - #define TRBE_FILL_MODE_CIRCULAR_BUFFER 3 94 86 95 87 static inline bool get_trbe_flag_update(u64 trbidr) 96 88 { 97 - return trbidr & TRBIDR_FLAG; 89 + return trbidr & TRBIDR_EL1_F; 98 90 } 99 91 100 92 static inline bool is_trbe_programmable(u64 trbidr) 101 93 { 102 - return !(trbidr & TRBIDR_PROG); 94 + return !(trbidr & TRBIDR_EL1_P); 103 95 } 104 96 105 97 static inline int get_trbe_address_align(u64 trbidr) 106 98 { 107 - return (trbidr >> TRBIDR_ALIGN_SHIFT) & TRBIDR_ALIGN_MASK; 99 + return (trbidr & TRBIDR_EL1_Align_MASK) >> TRBIDR_EL1_Align_SHIFT; 108 100 } 109 101 110 102 static inline unsigned long get_trbe_write_pointer(void) ··· 113 121 static inline unsigned long get_trbe_limit_pointer(void) 114 122 { 115 123 u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1); 116 - unsigned long addr = trblimitr & (TRBLIMITR_LIMIT_MASK << TRBLIMITR_LIMIT_SHIFT); 124 + unsigned long addr = trblimitr & TRBLIMITR_EL1_LIMIT_MASK; 117 125 118 126 WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE)); 119 127 return addr; ··· 122 130 static inline unsigned long get_trbe_base_pointer(void) 123 131 { 124 132 u64 trbbaser = read_sysreg_s(SYS_TRBBASER_EL1); 125 - unsigned long addr = trbbaser & (TRBBASER_BASE_MASK << TRBBASER_BASE_SHIFT); 133 + unsigned long addr = trbbaser & TRBBASER_EL1_BASE_MASK; 126 134 127 135 WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE)); 128 136 return addr; ··· 131 139 static inline void set_trbe_base_pointer(unsigned long addr) 132 140 { 133 141 WARN_ON(is_trbe_enabled()); 134 - WARN_ON(!IS_ALIGNED(addr, (1UL << TRBBASER_BASE_SHIFT))); 142 + WARN_ON(!IS_ALIGNED(addr, (1UL << TRBBASER_EL1_BASE_SHIFT))); 135 143 WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE)); 136 144 write_sysreg_s(addr, SYS_TRBBASER_EL1); 137 145 }
+6
include/linux/acpi.h
··· 1507 1507 } 1508 1508 #endif 1509 1509 1510 + #ifdef CONFIG_ARM64 1511 + void acpi_arm_init(void); 1512 + #else 1513 + static inline void acpi_arm_init(void) { } 1514 + #endif 1515 + 1510 1516 #ifdef CONFIG_ACPI_PCC 1511 1517 void acpi_init_pcc(void); 1512 1518 #else
-13
include/linux/acpi_agdi.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0-only */ 2 - 3 - #ifndef __ACPI_AGDI_H__ 4 - #define __ACPI_AGDI_H__ 5 - 6 - #include <linux/acpi.h> 7 - 8 - #ifdef CONFIG_ACPI_AGDI 9 - void __init acpi_agdi_init(void); 10 - #else 11 - static inline void acpi_agdi_init(void) {} 12 - #endif 13 - #endif /* __ACPI_AGDI_H__ */
-19
include/linux/acpi_apmt.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 2 - * 3 - * ARM CoreSight PMU driver. 4 - * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. 5 - * 6 - */ 7 - 8 - #ifndef __ACPI_APMT_H__ 9 - #define __ACPI_APMT_H__ 10 - 11 - #include <linux/acpi.h> 12 - 13 - #ifdef CONFIG_ACPI_APMT 14 - void acpi_apmt_init(void); 15 - #else 16 - static inline void acpi_apmt_init(void) { } 17 - #endif /* CONFIG_ACPI_APMT */ 18 - 19 - #endif /* __ACPI_APMT_H__ */
-2
include/linux/acpi_iort.h
··· 27 27 void iort_deregister_domain_token(int trans_id); 28 28 struct fwnode_handle *iort_find_domain_token(int trans_id); 29 29 #ifdef CONFIG_ACPI_IORT 30 - void acpi_iort_init(void); 31 30 u32 iort_msi_map_id(struct device *dev, u32 id); 32 31 struct irq_domain *iort_get_device_domain(struct device *dev, u32 id, 33 32 enum irq_domain_bus_token bus_token); ··· 42 43 void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head); 43 44 phys_addr_t acpi_iort_dma_get_max_cpu_address(void); 44 45 #else 45 - static inline void acpi_iort_init(void) { } 46 46 static inline u32 iort_msi_map_id(struct device *dev, u32 id) 47 47 { return id; } 48 48 static inline struct irq_domain *iort_get_device_domain(
+22
tools/testing/selftests/arm64/abi/hwcap.c
··· 39 39 asm volatile(".inst 0xdac01c00" : : : "x0"); 40 40 } 41 41 42 + static void mops_sigill(void) 43 + { 44 + char dst[1], src[1]; 45 + register char *dstp asm ("x0") = dst; 46 + register char *srcp asm ("x1") = src; 47 + register long size asm ("x2") = 1; 48 + 49 + /* CPYP [x0]!, [x1]!, x2! */ 50 + asm volatile(".inst 0x1d010440" 51 + : "+r" (dstp), "+r" (srcp), "+r" (size) 52 + : 53 + : "cc", "memory"); 54 + } 55 + 42 56 static void rng_sigill(void) 43 57 { 44 58 asm volatile("mrs x0, S3_3_C2_C4_0" : : : "x0"); ··· 222 208 .hwcap_bit = HWCAP2_CSSC, 223 209 .cpuinfo = "cssc", 224 210 .sigill_fn = cssc_sigill, 211 + }, 212 + { 213 + .name = "MOPS", 214 + .at_hwcap = AT_HWCAP2, 215 + .hwcap_bit = HWCAP2_MOPS, 216 + .cpuinfo = "mops", 217 + .sigill_fn = mops_sigill, 218 + .sigill_reliable = true, 225 219 }, 226 220 { 227 221 .name = "RNG",
+31 -1
tools/testing/selftests/arm64/abi/ptrace.c
··· 20 20 21 21 #include "../../kselftest.h" 22 22 23 - #define EXPECTED_TESTS 7 23 + #define EXPECTED_TESTS 11 24 24 25 25 #define MAX_TPIDRS 2 26 26 ··· 132 132 } 133 133 } 134 134 135 + static void test_hw_debug(pid_t child, int type, const char *type_name) 136 + { 137 + struct user_hwdebug_state state; 138 + struct iovec iov; 139 + int slots, arch, ret; 140 + 141 + iov.iov_len = sizeof(state); 142 + iov.iov_base = &state; 143 + 144 + /* Should be able to read the values */ 145 + ret = ptrace(PTRACE_GETREGSET, child, type, &iov); 146 + ksft_test_result(ret == 0, "read_%s\n", type_name); 147 + 148 + if (ret == 0) { 149 + /* Low 8 bits is the number of slots, next 4 bits the arch */ 150 + slots = state.dbg_info & 0xff; 151 + arch = (state.dbg_info >> 8) & 0xf; 152 + 153 + ksft_print_msg("%s version %d with %d slots\n", type_name, 154 + arch, slots); 155 + 156 + /* Zero is not currently architecturally valid */ 157 + ksft_test_result(arch, "%s_arch_set\n", type_name); 158 + } else { 159 + ksft_test_result_skip("%s_arch_set\n"); 160 + } 161 + } 162 + 135 163 static int do_child(void) 136 164 { 137 165 if (ptrace(PTRACE_TRACEME, -1, NULL, NULL)) ··· 235 207 ksft_print_msg("Parent is %d, child is %d\n", getpid(), child); 236 208 237 209 test_tpidr(child); 210 + test_hw_debug(child, NT_ARM_HW_WATCH, "NT_ARM_HW_WATCH"); 211 + test_hw_debug(child, NT_ARM_HW_BREAK, "NT_ARM_HW_BREAK"); 238 212 239 213 ret = EXIT_SUCCESS; 240 214
+1 -1
tools/testing/selftests/arm64/signal/.gitignore
··· 4 4 sme_* 5 5 ssve_* 6 6 sve_* 7 - tpidr2_siginfo 7 + tpidr2_* 8 8 za_* 9 9 zt_* 10 10 !*.[ch]
+2 -1
tools/testing/selftests/arm64/signal/test_signals_utils.c
··· 249 249 fprintf(stderr, "-- Timeout !\n"); 250 250 } else { 251 251 fprintf(stderr, 252 - "-- RX UNEXPECTED SIGNAL: %d\n", signum); 252 + "-- RX UNEXPECTED SIGNAL: %d code %d address %p\n", 253 + signum, si->si_code, si->si_addr); 253 254 } 254 255 default_result(current, 1); 255 256 }
+86
tools/testing/selftests/arm64/signal/testcases/tpidr2_restore.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2023 ARM Limited 4 + * 5 + * Verify that the TPIDR2 register context in signal frames is restored. 6 + */ 7 + 8 + #include <signal.h> 9 + #include <ucontext.h> 10 + #include <sys/auxv.h> 11 + #include <sys/prctl.h> 12 + #include <unistd.h> 13 + #include <asm/sigcontext.h> 14 + 15 + #include "test_signals_utils.h" 16 + #include "testcases.h" 17 + 18 + #define SYS_TPIDR2 "S3_3_C13_C0_5" 19 + 20 + static uint64_t get_tpidr2(void) 21 + { 22 + uint64_t val; 23 + 24 + asm volatile ( 25 + "mrs %0, " SYS_TPIDR2 "\n" 26 + : "=r"(val) 27 + : 28 + : "cc"); 29 + 30 + return val; 31 + } 32 + 33 + static void set_tpidr2(uint64_t val) 34 + { 35 + asm volatile ( 36 + "msr " SYS_TPIDR2 ", %0\n" 37 + : 38 + : "r"(val) 39 + : "cc"); 40 + } 41 + 42 + 43 + static uint64_t initial_tpidr2; 44 + 45 + static bool save_tpidr2(struct tdescr *td) 46 + { 47 + initial_tpidr2 = get_tpidr2(); 48 + fprintf(stderr, "Initial TPIDR2: %lx\n", initial_tpidr2); 49 + 50 + return true; 51 + } 52 + 53 + static int modify_tpidr2(struct tdescr *td, siginfo_t *si, ucontext_t *uc) 54 + { 55 + uint64_t my_tpidr2 = get_tpidr2(); 56 + 57 + my_tpidr2++; 58 + fprintf(stderr, "Setting TPIDR2 to %lx\n", my_tpidr2); 59 + set_tpidr2(my_tpidr2); 60 + 61 + return 0; 62 + } 63 + 64 + static void check_tpidr2(struct tdescr *td) 65 + { 66 + uint64_t tpidr2 = get_tpidr2(); 67 + 68 + td->pass = tpidr2 == initial_tpidr2; 69 + 70 + if (td->pass) 71 + fprintf(stderr, "TPIDR2 restored\n"); 72 + else 73 + fprintf(stderr, "TPIDR2 was %lx but is now %lx\n", 74 + initial_tpidr2, tpidr2); 75 + } 76 + 77 + struct tdescr tde = { 78 + .name = "TPIDR2 restore", 79 + .descr = "Validate that TPIDR2 is restored from the sigframe", 80 + .feats_required = FEAT_SME, 81 + .timeout = 3, 82 + .sig_trig = SIGUSR1, 83 + .init = save_tpidr2, 84 + .run = modify_tpidr2, 85 + .check_result = check_tpidr2, 86 + };