Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arch_topology: move parse_acpi_topology() to common code

Currently, RISC-V lacks arch-specific registers for CPU topology
properties and must get them from ACPI. Thus, parse_acpi_topology()
is moved from arm64/ to drivers/ for RISC-V reuse.

Signed-off-by: Yunhui Cui <cuiyunhui@bytedance.com>
Reviewed-by: Sudeep Holla <sudeep.holla@arm.com>
Link: https://patch.msgid.link/20250923015409.15983-2-cuiyunhui@bytedance.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Yunhui Cui and committed by
Greg Kroah-Hartman
6d0ef689 cebd22dd

+103 -102
+3
arch/arm64/include/asm/topology.h
··· 36 36 #define arch_scale_hw_pressure topology_get_hw_pressure 37 37 #define arch_update_hw_pressure topology_update_hw_pressure 38 38 39 + #undef arch_cpu_is_threaded 40 + #define arch_cpu_is_threaded() (read_cpuid_mpidr() & MPIDR_MT_BITMASK) 41 + 39 42 #include <asm-generic/topology.h> 40 43 41 44 #endif /* _ASM_ARM_TOPOLOGY_H */
-101
arch/arm64/kernel/topology.c
··· 25 25 #include <asm/cputype.h> 26 26 #include <asm/topology.h> 27 27 28 - #ifdef CONFIG_ACPI 29 - static bool __init acpi_cpu_is_threaded(int cpu) 30 - { 31 - int is_threaded = acpi_pptt_cpu_is_thread(cpu); 32 - 33 - /* 34 - * if the PPTT doesn't have thread information, assume a homogeneous 35 - * machine and return the current CPU's thread state. 36 - */ 37 - if (is_threaded < 0) 38 - is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK; 39 - 40 - return !!is_threaded; 41 - } 42 - 43 - struct cpu_smt_info { 44 - unsigned int thread_num; 45 - int core_id; 46 - }; 47 - 48 - /* 49 - * Propagate the topology information of the processor_topology_node tree to the 50 - * cpu_topology array. 51 - */ 52 - int __init parse_acpi_topology(void) 53 - { 54 - unsigned int max_smt_thread_num = 1; 55 - struct cpu_smt_info *entry; 56 - struct xarray hetero_cpu; 57 - unsigned long hetero_id; 58 - int cpu, topology_id; 59 - 60 - if (acpi_disabled) 61 - return 0; 62 - 63 - xa_init(&hetero_cpu); 64 - 65 - for_each_possible_cpu(cpu) { 66 - topology_id = find_acpi_cpu_topology(cpu, 0); 67 - if (topology_id < 0) 68 - return topology_id; 69 - 70 - if (acpi_cpu_is_threaded(cpu)) { 71 - cpu_topology[cpu].thread_id = topology_id; 72 - topology_id = find_acpi_cpu_topology(cpu, 1); 73 - cpu_topology[cpu].core_id = topology_id; 74 - 75 - /* 76 - * In the PPTT, CPUs below a node with the 'identical 77 - * implementation' flag have the same number of threads. 78 - * Count the number of threads for only one CPU (i.e. 79 - * one core_id) among those with the same hetero_id. 80 - * See the comment of find_acpi_cpu_topology_hetero_id() 81 - * for more details. 82 - * 83 - * One entry is created for each node having: 84 - * - the 'identical implementation' flag 85 - * - its parent not having the flag 86 - */ 87 - hetero_id = find_acpi_cpu_topology_hetero_id(cpu); 88 - entry = xa_load(&hetero_cpu, hetero_id); 89 - if (!entry) { 90 - entry = kzalloc(sizeof(*entry), GFP_KERNEL); 91 - WARN_ON_ONCE(!entry); 92 - 93 - if (entry) { 94 - entry->core_id = topology_id; 95 - entry->thread_num = 1; 96 - xa_store(&hetero_cpu, hetero_id, 97 - entry, GFP_KERNEL); 98 - } 99 - } else if (entry->core_id == topology_id) { 100 - entry->thread_num++; 101 - } 102 - } else { 103 - cpu_topology[cpu].thread_id = -1; 104 - cpu_topology[cpu].core_id = topology_id; 105 - } 106 - topology_id = find_acpi_cpu_topology_cluster(cpu); 107 - cpu_topology[cpu].cluster_id = topology_id; 108 - topology_id = find_acpi_cpu_topology_package(cpu); 109 - cpu_topology[cpu].package_id = topology_id; 110 - } 111 - 112 - /* 113 - * This is a short loop since the number of XArray elements is the 114 - * number of heterogeneous CPU clusters. On a homogeneous system 115 - * there's only one entry in the XArray. 116 - */ 117 - xa_for_each(&hetero_cpu, hetero_id, entry) { 118 - max_smt_thread_num = max(max_smt_thread_num, entry->thread_num); 119 - xa_erase(&hetero_cpu, hetero_id); 120 - kfree(entry); 121 - } 122 - 123 - cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num); 124 - xa_destroy(&hetero_cpu); 125 - return 0; 126 - } 127 - #endif 128 - 129 28 #ifdef CONFIG_ARM64_AMU_EXTN 130 29 #define read_corecnt() read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0) 131 30 #define read_constcnt() read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0)
+95 -1
drivers/base/arch_topology.c
··· 823 823 clear_cpu_topology(cpu); 824 824 } 825 825 826 + #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) 827 + struct cpu_smt_info { 828 + unsigned int thread_num; 829 + int core_id; 830 + }; 831 + 832 + static bool __init acpi_cpu_is_threaded(int cpu) 833 + { 834 + int is_threaded = acpi_pptt_cpu_is_thread(cpu); 835 + 836 + /* 837 + * if the PPTT doesn't have thread information, check for architecture 838 + * specific fallback if available 839 + */ 840 + if (is_threaded < 0) 841 + is_threaded = arch_cpu_is_threaded(); 842 + 843 + return !!is_threaded; 844 + } 845 + 846 + /* 847 + * Propagate the topology information of the processor_topology_node tree to the 848 + * cpu_topology array. 849 + */ 826 850 __weak int __init parse_acpi_topology(void) 827 851 { 852 + unsigned int max_smt_thread_num = 1; 853 + struct cpu_smt_info *entry; 854 + struct xarray hetero_cpu; 855 + unsigned long hetero_id; 856 + int cpu, topology_id; 857 + 858 + if (acpi_disabled) 859 + return 0; 860 + 861 + xa_init(&hetero_cpu); 862 + 863 + for_each_possible_cpu(cpu) { 864 + topology_id = find_acpi_cpu_topology(cpu, 0); 865 + if (topology_id < 0) 866 + return topology_id; 867 + 868 + if (acpi_cpu_is_threaded(cpu)) { 869 + cpu_topology[cpu].thread_id = topology_id; 870 + topology_id = find_acpi_cpu_topology(cpu, 1); 871 + cpu_topology[cpu].core_id = topology_id; 872 + 873 + /* 874 + * In the PPTT, CPUs below a node with the 'identical 875 + * implementation' flag have the same number of threads. 876 + * Count the number of threads for only one CPU (i.e. 877 + * one core_id) among those with the same hetero_id. 878 + * See the comment of find_acpi_cpu_topology_hetero_id() 879 + * for more details. 880 + * 881 + * One entry is created for each node having: 882 + * - the 'identical implementation' flag 883 + * - its parent not having the flag 884 + */ 885 + hetero_id = find_acpi_cpu_topology_hetero_id(cpu); 886 + entry = xa_load(&hetero_cpu, hetero_id); 887 + if (!entry) { 888 + entry = kzalloc(sizeof(*entry), GFP_KERNEL); 889 + WARN_ON_ONCE(!entry); 890 + 891 + if (entry) { 892 + entry->core_id = topology_id; 893 + entry->thread_num = 1; 894 + xa_store(&hetero_cpu, hetero_id, 895 + entry, GFP_KERNEL); 896 + } 897 + } else if (entry->core_id == topology_id) { 898 + entry->thread_num++; 899 + } 900 + } else { 901 + cpu_topology[cpu].thread_id = -1; 902 + cpu_topology[cpu].core_id = topology_id; 903 + } 904 + topology_id = find_acpi_cpu_topology_cluster(cpu); 905 + cpu_topology[cpu].cluster_id = topology_id; 906 + topology_id = find_acpi_cpu_topology_package(cpu); 907 + cpu_topology[cpu].package_id = topology_id; 908 + } 909 + 910 + /* 911 + * This is a short loop since the number of XArray elements is the 912 + * number of heterogeneous CPU clusters. On a homogeneous system 913 + * there's only one entry in the XArray. 914 + */ 915 + xa_for_each(&hetero_cpu, hetero_id, entry) { 916 + max_smt_thread_num = max(max_smt_thread_num, entry->thread_num); 917 + xa_erase(&hetero_cpu, hetero_id); 918 + kfree(entry); 919 + } 920 + 921 + cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num); 922 + xa_destroy(&hetero_cpu); 828 923 return 0; 829 924 } 830 925 831 - #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) 832 926 void __init init_cpu_topology(void) 833 927 { 834 928 int cpu, ret;
+5
include/linux/arch_topology.h
··· 80 80 #define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) 81 81 #define topology_cluster_cpumask(cpu) (&cpu_topology[cpu].cluster_sibling) 82 82 #define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling) 83 + 84 + #ifndef arch_cpu_is_threaded 85 + #define arch_cpu_is_threaded() (0) 86 + #endif 87 + 83 88 void init_cpu_topology(void); 84 89 void store_cpu_topology(unsigned int cpuid); 85 90 const struct cpumask *cpu_coregroup_mask(int cpu);