Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'x86-urgent-2026-03-08' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:

- Fix SEV guest boot failures in certain circumstances, due to
very early code relying on a BSS-zeroed variable that isn't
actually zeroed yet an may contain non-zero bootup values

Move the variable into the .data section go gain even earlier
zeroing

- Expose & allow the IBPB-on-Entry feature on SNP guests, which
was not properly exposed to guests due to initial implementational
caution

- Fix O= build failure when CONFIG_EFI_SBAT_FILE is using relative
file paths

- Fix the various SNC (Sub-NUMA Clustering) topology enumeration
bugs/artifacts (sched-domain build errors mostly).

SNC enumeration data got more complicated with Granite Rapids X
(GNR) and Clearwater Forest X (CWF), which exposed these bugs
and made their effects more serious

- Also use the now sane(r) SNC code to fix resctrl SNC detection bugs

- Work around a historic libgcc unwinder bug in the vdso32 sigreturn
code (again), which regressed during an overly aggressive recent
cleanup of DWARF annotations

* tag 'x86-urgent-2026-03-08' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/entry/vdso32: Work around libgcc unwinder bug
x86/resctrl: Fix SNC detection
x86/topo: Fix SNC topology mess
x86/topo: Replace x86_has_numa_in_package
x86/topo: Add topology_num_nodes_per_package()
x86/numa: Store extra copy of numa_nodes_parsed
x86/boot: Handle relative CONFIG_EFI_SBAT_FILE file paths
x86/sev: Allow IBPB-on-Entry feature for SNP guests
x86/boot/sev: Move SEV decompressor variables into the .data section

+227 -94
+1
arch/x86/boot/compressed/Makefile
··· 113 113 114 114 ifdef CONFIG_EFI_SBAT 115 115 $(obj)/sbat.o: $(CONFIG_EFI_SBAT_FILE) 116 + AFLAGS_sbat.o += -I $(srctree) 116 117 endif 117 118 118 119 $(obj)/vmlinux: $(vmlinux-objs-y) $(vmlinux-libs-y) FORCE
+5 -4
arch/x86/boot/compressed/sev.c
··· 28 28 #include "sev.h" 29 29 30 30 static struct ghcb boot_ghcb_page __aligned(PAGE_SIZE); 31 - struct ghcb *boot_ghcb; 31 + struct ghcb *boot_ghcb __section(".data"); 32 32 33 33 #undef __init 34 34 #define __init 35 35 36 36 #define __BOOT_COMPRESSED 37 37 38 - u8 snp_vmpl; 39 - u16 ghcb_version; 38 + u8 snp_vmpl __section(".data"); 39 + u16 ghcb_version __section(".data"); 40 40 41 - u64 boot_svsm_caa_pa; 41 + u64 boot_svsm_caa_pa __section(".data"); 42 42 43 43 /* Include code for early handlers */ 44 44 #include "../../boot/startup/sev-shared.c" ··· 188 188 MSR_AMD64_SNP_RESERVED_BIT13 | \ 189 189 MSR_AMD64_SNP_RESERVED_BIT15 | \ 190 190 MSR_AMD64_SNP_SECURE_AVIC | \ 191 + MSR_AMD64_SNP_RESERVED_BITS19_22 | \ 191 192 MSR_AMD64_SNP_RESERVED_MASK) 192 193 193 194 #ifdef CONFIG_AMD_SECURE_AVIC
+1 -1
arch/x86/boot/startup/sev-shared.c
··· 31 31 static u32 cpuid_hyp_range_max __ro_after_init; 32 32 static u32 cpuid_ext_range_max __ro_after_init; 33 33 34 - bool sev_snp_needs_sfw; 34 + bool sev_snp_needs_sfw __section(".data"); 35 35 36 36 void __noreturn 37 37 sev_es_terminate(unsigned int set, unsigned int reason)
+1
arch/x86/coco/sev/core.c
··· 89 89 [MSR_AMD64_SNP_VMSA_REG_PROT_BIT] = "VMSARegProt", 90 90 [MSR_AMD64_SNP_SMT_PROT_BIT] = "SMTProt", 91 91 [MSR_AMD64_SNP_SECURE_AVIC_BIT] = "SecureAVIC", 92 + [MSR_AMD64_SNP_IBPB_ON_ENTRY_BIT] = "IBPBOnEntry", 92 93 }; 93 94 94 95 /*
+30
arch/x86/entry/vdso/vdso32/sigreturn.S
··· 35 35 #endif 36 36 .endm 37 37 38 + /* 39 + * WARNING: 40 + * 41 + * A bug in the libgcc unwinder as of at least gcc 15.2 (2026) means that 42 + * the unwinder fails to recognize the signal frame flag. 43 + * 44 + * There is a hacky legacy fallback path in libgcc which ends up 45 + * getting invoked instead. It happens to work as long as BOTH of the 46 + * following conditions are true: 47 + * 48 + * 1. There is at least one byte before the each of the sigreturn 49 + * functions which falls outside any function. This is enforced by 50 + * an explicit nop instruction before the ALIGN. 51 + * 2. The code sequences between the entry point up to and including 52 + * the int $0x80 below need to match EXACTLY. Do not change them 53 + * in any way. The exact byte sequences are: 54 + * 55 + * __kernel_sigreturn: 56 + * 0: 58 pop %eax 57 + * 1: b8 77 00 00 00 mov $0x77,%eax 58 + * 6: cd 80 int $0x80 59 + * 60 + * __kernel_rt_sigreturn: 61 + * 0: b8 ad 00 00 00 mov $0xad,%eax 62 + * 5: cd 80 int $0x80 63 + * 64 + * For details, see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=124050 65 + */ 38 66 .text 39 67 .globl __kernel_sigreturn 40 68 .type __kernel_sigreturn,@function 69 + nop /* libgcc hack: see comment above */ 41 70 ALIGN 42 71 __kernel_sigreturn: 43 72 STARTPROC_SIGNAL_FRAME IA32_SIGFRAME_sigcontext ··· 81 52 82 53 .globl __kernel_rt_sigreturn 83 54 .type __kernel_rt_sigreturn,@function 55 + nop /* libgcc hack: see comment above */ 84 56 ALIGN 85 57 __kernel_rt_sigreturn: 86 58 STARTPROC_SIGNAL_FRAME IA32_RT_SIGFRAME_sigcontext
+4 -1
arch/x86/include/asm/msr-index.h
··· 740 740 #define MSR_AMD64_SNP_SMT_PROT BIT_ULL(MSR_AMD64_SNP_SMT_PROT_BIT) 741 741 #define MSR_AMD64_SNP_SECURE_AVIC_BIT 18 742 742 #define MSR_AMD64_SNP_SECURE_AVIC BIT_ULL(MSR_AMD64_SNP_SECURE_AVIC_BIT) 743 - #define MSR_AMD64_SNP_RESV_BIT 19 743 + #define MSR_AMD64_SNP_RESERVED_BITS19_22 GENMASK_ULL(22, 19) 744 + #define MSR_AMD64_SNP_IBPB_ON_ENTRY_BIT 23 745 + #define MSR_AMD64_SNP_IBPB_ON_ENTRY BIT_ULL(MSR_AMD64_SNP_IBPB_ON_ENTRY_BIT) 746 + #define MSR_AMD64_SNP_RESV_BIT 24 744 747 #define MSR_AMD64_SNP_RESERVED_MASK GENMASK_ULL(63, MSR_AMD64_SNP_RESV_BIT) 745 748 #define MSR_AMD64_SAVIC_CONTROL 0xc0010138 746 749 #define MSR_AMD64_SAVIC_EN_BIT 0
+6
arch/x86/include/asm/numa.h
··· 22 22 */ 23 23 extern s16 __apicid_to_node[MAX_LOCAL_APIC]; 24 24 extern nodemask_t numa_nodes_parsed __initdata; 25 + extern nodemask_t numa_phys_nodes_parsed __initdata; 25 26 26 27 static inline void set_apicid_to_node(int apicid, s16 node) 27 28 { ··· 49 48 extern void numa_add_cpu(unsigned int cpu); 50 49 extern void numa_remove_cpu(unsigned int cpu); 51 50 extern void init_gi_nodes(void); 51 + extern int num_phys_nodes(void); 52 52 #else /* CONFIG_NUMA */ 53 53 static inline void numa_set_node(int cpu, int node) { } 54 54 static inline void numa_clear_node(int cpu) { } ··· 57 55 static inline void numa_add_cpu(unsigned int cpu) { } 58 56 static inline void numa_remove_cpu(unsigned int cpu) { } 59 57 static inline void init_gi_nodes(void) { } 58 + static inline int num_phys_nodes(void) 59 + { 60 + return 1; 61 + } 60 62 #endif /* CONFIG_NUMA */ 61 63 62 64 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
+6
arch/x86/include/asm/topology.h
··· 155 155 extern unsigned int __max_threads_per_core; 156 156 extern unsigned int __num_threads_per_package; 157 157 extern unsigned int __num_cores_per_package; 158 + extern unsigned int __num_nodes_per_package; 158 159 159 160 const char *get_topology_cpu_type_name(struct cpuinfo_x86 *c); 160 161 enum x86_topology_cpu_type get_topology_cpu_type(struct cpuinfo_x86 *c); ··· 178 177 static inline unsigned int topology_num_threads_per_package(void) 179 178 { 180 179 return __num_threads_per_package; 180 + } 181 + 182 + static inline unsigned int topology_num_nodes_per_package(void) 183 + { 184 + return __num_nodes_per_package; 181 185 } 182 186 183 187 #ifdef CONFIG_X86_LOCAL_APIC
+3
arch/x86/kernel/cpu/common.c
··· 95 95 unsigned int __max_logical_packages __ro_after_init = 1; 96 96 EXPORT_SYMBOL(__max_logical_packages); 97 97 98 + unsigned int __num_nodes_per_package __ro_after_init = 1; 99 + EXPORT_SYMBOL(__num_nodes_per_package); 100 + 98 101 unsigned int __num_cores_per_package __ro_after_init = 1; 99 102 EXPORT_SYMBOL(__num_cores_per_package); 100 103
+5 -31
arch/x86/kernel/cpu/resctrl/monitor.c
··· 364 364 msr_clear_bit(MSR_RMID_SNC_CONFIG, 0); 365 365 } 366 366 367 - /* CPU models that support MSR_RMID_SNC_CONFIG */ 367 + /* CPU models that support SNC and MSR_RMID_SNC_CONFIG */ 368 368 static const struct x86_cpu_id snc_cpu_ids[] __initconst = { 369 369 X86_MATCH_VFM(INTEL_ICELAKE_X, 0), 370 370 X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, 0), ··· 375 375 {} 376 376 }; 377 377 378 - /* 379 - * There isn't a simple hardware bit that indicates whether a CPU is running 380 - * in Sub-NUMA Cluster (SNC) mode. Infer the state by comparing the 381 - * number of CPUs sharing the L3 cache with CPU0 to the number of CPUs in 382 - * the same NUMA node as CPU0. 383 - * It is not possible to accurately determine SNC state if the system is 384 - * booted with a maxcpus=N parameter. That distorts the ratio of SNC nodes 385 - * to L3 caches. It will be OK if system is booted with hyperthreading 386 - * disabled (since this doesn't affect the ratio). 387 - */ 388 378 static __init int snc_get_config(void) 389 379 { 390 - struct cacheinfo *ci = get_cpu_cacheinfo_level(0, RESCTRL_L3_CACHE); 391 - const cpumask_t *node0_cpumask; 392 - int cpus_per_node, cpus_per_l3; 393 - int ret; 380 + int ret = topology_num_nodes_per_package(); 394 381 395 - if (!x86_match_cpu(snc_cpu_ids) || !ci) 382 + if (ret > 1 && !x86_match_cpu(snc_cpu_ids)) { 383 + pr_warn("CoD enabled system? Resctrl not supported\n"); 396 384 return 1; 397 - 398 - cpus_read_lock(); 399 - if (num_online_cpus() != num_present_cpus()) 400 - pr_warn("Some CPUs offline, SNC detection may be incorrect\n"); 401 - cpus_read_unlock(); 402 - 403 - node0_cpumask = cpumask_of_node(cpu_to_node(0)); 404 - 405 - cpus_per_node = cpumask_weight(node0_cpumask); 406 - cpus_per_l3 = cpumask_weight(&ci->shared_cpu_map); 407 - 408 - if (!cpus_per_node || !cpus_per_l3) 409 - return 1; 410 - 411 - ret = cpus_per_l3 / cpus_per_node; 385 + } 412 386 413 387 /* sanity check: Only valid results are 1, 2, 3, 4, 6 */ 414 388 switch (ret) {
+11 -2
arch/x86/kernel/cpu/topology.c
··· 31 31 #include <asm/mpspec.h> 32 32 #include <asm/msr.h> 33 33 #include <asm/smp.h> 34 + #include <asm/numa.h> 34 35 35 36 #include "cpu.h" 36 37 ··· 493 492 set_nr_cpu_ids(allowed); 494 493 495 494 cnta = domain_weight(TOPO_PKG_DOMAIN); 496 - cntb = domain_weight(TOPO_DIE_DOMAIN); 497 495 __max_logical_packages = cnta; 496 + 497 + pr_info("Max. logical packages: %3u\n", __max_logical_packages); 498 + 499 + cntb = num_phys_nodes(); 500 + __num_nodes_per_package = DIV_ROUND_UP(cntb, cnta); 501 + 502 + pr_info("Max. logical nodes: %3u\n", cntb); 503 + pr_info("Num. nodes per package:%3u\n", __num_nodes_per_package); 504 + 505 + cntb = domain_weight(TOPO_DIE_DOMAIN); 498 506 __max_dies_per_package = 1U << (get_count_order(cntb) - get_count_order(cnta)); 499 507 500 - pr_info("Max. logical packages: %3u\n", cnta); 501 508 pr_info("Max. logical dies: %3u\n", cntb); 502 509 pr_info("Max. dies per package: %3u\n", __max_dies_per_package); 503 510
+144 -55
arch/x86/kernel/smpboot.c
··· 468 468 } 469 469 #endif 470 470 471 - /* 472 - * Set if a package/die has multiple NUMA nodes inside. 473 - * AMD Magny-Cours, Intel Cluster-on-Die, and Intel 474 - * Sub-NUMA Clustering have this. 475 - */ 476 - static bool x86_has_numa_in_package; 477 - 478 471 static struct sched_domain_topology_level x86_topology[] = { 479 472 SDTL_INIT(tl_smt_mask, cpu_smt_flags, SMT), 480 473 #ifdef CONFIG_SCHED_CLUSTER ··· 489 496 * PKG domain since the NUMA domains will auto-magically create the 490 497 * right spanning domains based on the SLIT. 491 498 */ 492 - if (x86_has_numa_in_package) { 499 + if (topology_num_nodes_per_package() > 1) { 493 500 unsigned int pkgdom = ARRAY_SIZE(x86_topology) - 2; 494 501 495 502 memset(&x86_topology[pkgdom], 0, sizeof(x86_topology[pkgdom])); ··· 506 513 } 507 514 508 515 #ifdef CONFIG_NUMA 509 - static int sched_avg_remote_distance; 510 - static int avg_remote_numa_distance(void) 516 + /* 517 + * Test if the on-trace cluster at (N,N) is symmetric. 518 + * Uses upper triangle iteration to avoid obvious duplicates. 519 + */ 520 + static bool slit_cluster_symmetric(int N) 511 521 { 512 - int i, j; 513 - int distance, nr_remote, total_distance; 522 + int u = topology_num_nodes_per_package(); 514 523 515 - if (sched_avg_remote_distance > 0) 516 - return sched_avg_remote_distance; 517 - 518 - nr_remote = 0; 519 - total_distance = 0; 520 - for_each_node_state(i, N_CPU) { 521 - for_each_node_state(j, N_CPU) { 522 - distance = node_distance(i, j); 523 - 524 - if (distance >= REMOTE_DISTANCE) { 525 - nr_remote++; 526 - total_distance += distance; 527 - } 524 + for (int k = 0; k < u; k++) { 525 + for (int l = k; l < u; l++) { 526 + if (node_distance(N + k, N + l) != 527 + node_distance(N + l, N + k)) 528 + return false; 528 529 } 529 530 } 530 - if (nr_remote) 531 - sched_avg_remote_distance = total_distance / nr_remote; 532 - else 533 - sched_avg_remote_distance = REMOTE_DISTANCE; 534 531 535 - return sched_avg_remote_distance; 532 + return true; 533 + } 534 + 535 + /* 536 + * Return the package-id of the cluster, or ~0 if indeterminate. 537 + * Each node in the on-trace cluster should have the same package-id. 538 + */ 539 + static u32 slit_cluster_package(int N) 540 + { 541 + int u = topology_num_nodes_per_package(); 542 + u32 pkg_id = ~0; 543 + 544 + for (int n = 0; n < u; n++) { 545 + const struct cpumask *cpus = cpumask_of_node(N + n); 546 + int cpu; 547 + 548 + for_each_cpu(cpu, cpus) { 549 + u32 id = topology_logical_package_id(cpu); 550 + 551 + if (pkg_id == ~0) 552 + pkg_id = id; 553 + if (pkg_id != id) 554 + return ~0; 555 + } 556 + } 557 + 558 + return pkg_id; 559 + } 560 + 561 + /* 562 + * Validate the SLIT table is of the form expected for SNC, specifically: 563 + * 564 + * - each on-trace cluster should be symmetric, 565 + * - each on-trace cluster should have a unique package-id. 566 + * 567 + * If you NUMA_EMU on top of SNC, you get to keep the pieces. 568 + */ 569 + static bool slit_validate(void) 570 + { 571 + int u = topology_num_nodes_per_package(); 572 + u32 pkg_id, prev_pkg_id = ~0; 573 + 574 + for (int pkg = 0; pkg < topology_max_packages(); pkg++) { 575 + int n = pkg * u; 576 + 577 + /* 578 + * Ensure the on-trace cluster is symmetric and each cluster 579 + * has a different package id. 580 + */ 581 + if (!slit_cluster_symmetric(n)) 582 + return false; 583 + pkg_id = slit_cluster_package(n); 584 + if (pkg_id == ~0) 585 + return false; 586 + if (pkg && pkg_id == prev_pkg_id) 587 + return false; 588 + 589 + prev_pkg_id = pkg_id; 590 + } 591 + 592 + return true; 593 + } 594 + 595 + /* 596 + * Compute a sanitized SLIT table for SNC; notably SNC-3 can end up with 597 + * asymmetric off-trace clusters, reflecting physical assymmetries. However 598 + * this leads to 'unfortunate' sched_domain configurations. 599 + * 600 + * For example dual socket GNR with SNC-3: 601 + * 602 + * node distances: 603 + * node 0 1 2 3 4 5 604 + * 0: 10 15 17 21 28 26 605 + * 1: 15 10 15 23 26 23 606 + * 2: 17 15 10 26 23 21 607 + * 3: 21 28 26 10 15 17 608 + * 4: 23 26 23 15 10 15 609 + * 5: 26 23 21 17 15 10 610 + * 611 + * Fix things up by averaging out the off-trace clusters; resulting in: 612 + * 613 + * node 0 1 2 3 4 5 614 + * 0: 10 15 17 24 24 24 615 + * 1: 15 10 15 24 24 24 616 + * 2: 17 15 10 24 24 24 617 + * 3: 24 24 24 10 15 17 618 + * 4: 24 24 24 15 10 15 619 + * 5: 24 24 24 17 15 10 620 + */ 621 + static int slit_cluster_distance(int i, int j) 622 + { 623 + static int slit_valid = -1; 624 + int u = topology_num_nodes_per_package(); 625 + long d = 0; 626 + int x, y; 627 + 628 + if (slit_valid < 0) { 629 + slit_valid = slit_validate(); 630 + if (!slit_valid) 631 + pr_err(FW_BUG "SLIT table doesn't have the expected form for SNC -- fixup disabled!\n"); 632 + else 633 + pr_info("Fixing up SNC SLIT table.\n"); 634 + } 635 + 636 + /* 637 + * Is this a unit cluster on the trace? 638 + */ 639 + if ((i / u) == (j / u) || !slit_valid) 640 + return node_distance(i, j); 641 + 642 + /* 643 + * Off-trace cluster. 644 + * 645 + * Notably average out the symmetric pair of off-trace clusters to 646 + * ensure the resulting SLIT table is symmetric. 647 + */ 648 + x = i - (i % u); 649 + y = j - (j % u); 650 + 651 + for (i = x; i < x + u; i++) { 652 + for (j = y; j < y + u; j++) { 653 + d += node_distance(i, j); 654 + d += node_distance(j, i); 655 + } 656 + } 657 + 658 + return d / (2*u*u); 536 659 } 537 660 538 661 int arch_sched_node_distance(int from, int to) ··· 658 549 switch (boot_cpu_data.x86_vfm) { 659 550 case INTEL_GRANITERAPIDS_X: 660 551 case INTEL_ATOM_DARKMONT_X: 661 - 662 - if (!x86_has_numa_in_package || topology_max_packages() == 1 || 663 - d < REMOTE_DISTANCE) 552 + if (topology_max_packages() == 1 || 553 + topology_num_nodes_per_package() < 3) 664 554 return d; 665 555 666 556 /* 667 - * With SNC enabled, there could be too many levels of remote 668 - * NUMA node distances, creating NUMA domain levels 669 - * including local nodes and partial remote nodes. 670 - * 671 - * Trim finer distance tuning for NUMA nodes in remote package 672 - * for the purpose of building sched domains. Group NUMA nodes 673 - * in the remote package in the same sched group. 674 - * Simplify NUMA domains and avoid extra NUMA levels including 675 - * different remote NUMA nodes and local nodes. 676 - * 677 - * GNR and CWF don't expect systems with more than 2 packages 678 - * and more than 2 hops between packages. Single average remote 679 - * distance won't be appropriate if there are more than 2 680 - * packages as average distance to different remote packages 681 - * could be different. 557 + * Handle SNC-3 asymmetries. 682 558 */ 683 - WARN_ONCE(topology_max_packages() > 2, 684 - "sched: Expect only up to 2 packages for GNR or CWF, " 685 - "but saw %d packages when building sched domains.", 686 - topology_max_packages()); 687 - 688 - d = avg_remote_numa_distance(); 559 + return slit_cluster_distance(from, to); 689 560 } 690 561 return d; 691 562 } ··· 695 606 o = &cpu_data(i); 696 607 697 608 if (match_pkg(c, o) && !topology_same_node(c, o)) 698 - x86_has_numa_in_package = true; 609 + WARN_ON_ONCE(topology_num_nodes_per_package() == 1); 699 610 700 611 if ((i == cpu) || (has_smt && match_smt(c, o))) 701 612 link_mask(topology_sibling_cpumask, cpu, i);
+8
arch/x86/mm/numa.c
··· 48 48 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE 49 49 }; 50 50 51 + nodemask_t numa_phys_nodes_parsed __initdata; 52 + 51 53 int numa_cpu_node(int cpu) 52 54 { 53 55 u32 apicid = early_per_cpu(x86_cpu_to_apicid, cpu); ··· 57 55 if (apicid != BAD_APICID) 58 56 return __apicid_to_node[apicid]; 59 57 return NUMA_NO_NODE; 58 + } 59 + 60 + int __init num_phys_nodes(void) 61 + { 62 + return bitmap_weight(numa_phys_nodes_parsed.bits, MAX_NUMNODES); 60 63 } 61 64 62 65 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; ··· 217 210 0LLU, PFN_PHYS(max_pfn) - 1); 218 211 219 212 node_set(0, numa_nodes_parsed); 213 + node_set(0, numa_phys_nodes_parsed); 220 214 numa_add_memblk(0, 0, PFN_PHYS(max_pfn)); 221 215 222 216 return 0;
+2
arch/x86/mm/srat.c
··· 57 57 } 58 58 set_apicid_to_node(apic_id, node); 59 59 node_set(node, numa_nodes_parsed); 60 + node_set(node, numa_phys_nodes_parsed); 60 61 pr_debug("SRAT: PXM %u -> APIC 0x%04x -> Node %u\n", pxm, apic_id, node); 61 62 } 62 63 ··· 98 97 99 98 set_apicid_to_node(apic_id, node); 100 99 node_set(node, numa_nodes_parsed); 100 + node_set(node, numa_phys_nodes_parsed); 101 101 pr_debug("SRAT: PXM %u -> APIC 0x%02x -> Node %u\n", pxm, apic_id, node); 102 102 } 103 103