Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'mm-hotfixes-stable-2025-07-11-16-16' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
"19 hotfixes. A whopping 16 are cc:stable and the remainder address
post-6.15 issues or aren't considered necessary for -stable kernels.

14 are for MM. Three gdb-script fixes and a kallsyms build fix"

* tag 'mm-hotfixes-stable-2025-07-11-16-16' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
Revert "sched/numa: add statistics of numa balance task"
mm: fix the inaccurate memory statistics issue for users
mm/damon: fix divide by zero in damon_get_intervals_score()
samples/damon: fix damon sample mtier for start failure
samples/damon: fix damon sample wsse for start failure
samples/damon: fix damon sample prcl for start failure
kasan: remove kasan_find_vm_area() to prevent possible deadlock
scripts: gdb: vfs: support external dentry names
mm/migrate: fix do_pages_stat in compat mode
mm/damon/core: handle damon_call_control as normal under kdmond deactivation
mm/rmap: fix potential out-of-bounds page table access during batched unmap
mm/hugetlb: don't crash when allocating a folio if there are no resv
scripts/gdb: de-reference per-CPU MCE interrupts
scripts/gdb: fix interrupts.py after maple tree conversion
maple_tree: fix mt_destroy_walk() on root leaf node
mm/vmalloc: leave lazy MMU mode on PTE mapping error
scripts/gdb: fix interrupts display after MCP on x86
lib/alloc_tag: do not acquire non-existent lock in alloc_tag_top_users()
kallsyms: fix build without execinfo

+399 -130
-6
Documentation/admin-guide/cgroup-v2.rst
··· 1732 1732 numa_hint_faults (npn) 1733 1733 Number of NUMA hinting faults. 1734 1734 1735 - numa_task_migrated (npn) 1736 - Number of task migration by NUMA balancing. 1737 - 1738 - numa_task_swapped (npn) 1739 - Number of task swap by NUMA balancing. 1740 - 1741 1735 pgdemote_kswapd 1742 1736 Number of pages demoted by kswapd. 1743 1737
+7 -7
fs/proc/task_mmu.c
··· 36 36 unsigned long text, lib, swap, anon, file, shmem; 37 37 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; 38 38 39 - anon = get_mm_counter(mm, MM_ANONPAGES); 40 - file = get_mm_counter(mm, MM_FILEPAGES); 41 - shmem = get_mm_counter(mm, MM_SHMEMPAGES); 39 + anon = get_mm_counter_sum(mm, MM_ANONPAGES); 40 + file = get_mm_counter_sum(mm, MM_FILEPAGES); 41 + shmem = get_mm_counter_sum(mm, MM_SHMEMPAGES); 42 42 43 43 /* 44 44 * Note: to minimize their overhead, mm maintains hiwater_vm and ··· 59 59 text = min(text, mm->exec_vm << PAGE_SHIFT); 60 60 lib = (mm->exec_vm << PAGE_SHIFT) - text; 61 61 62 - swap = get_mm_counter(mm, MM_SWAPENTS); 62 + swap = get_mm_counter_sum(mm, MM_SWAPENTS); 63 63 SEQ_PUT_DEC("VmPeak:\t", hiwater_vm); 64 64 SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm); 65 65 SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm); ··· 92 92 unsigned long *shared, unsigned long *text, 93 93 unsigned long *data, unsigned long *resident) 94 94 { 95 - *shared = get_mm_counter(mm, MM_FILEPAGES) + 96 - get_mm_counter(mm, MM_SHMEMPAGES); 95 + *shared = get_mm_counter_sum(mm, MM_FILEPAGES) + 96 + get_mm_counter_sum(mm, MM_SHMEMPAGES); 97 97 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) 98 98 >> PAGE_SHIFT; 99 99 *data = mm->data_vm + mm->stack_vm; 100 - *resident = *shared + get_mm_counter(mm, MM_ANONPAGES); 100 + *resident = *shared + get_mm_counter_sum(mm, MM_ANONPAGES); 101 101 return mm->total_vm; 102 102 } 103 103
+5
include/linux/mm.h
··· 2568 2568 return percpu_counter_read_positive(&mm->rss_stat[member]); 2569 2569 } 2570 2570 2571 + static inline unsigned long get_mm_counter_sum(struct mm_struct *mm, int member) 2572 + { 2573 + return percpu_counter_sum_positive(&mm->rss_stat[member]); 2574 + } 2575 + 2571 2576 void mm_trace_rss_stat(struct mm_struct *mm, int member); 2572 2577 2573 2578 static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
-4
include/linux/sched.h
··· 548 548 u64 nr_failed_migrations_running; 549 549 u64 nr_failed_migrations_hot; 550 550 u64 nr_forced_migrations; 551 - #ifdef CONFIG_NUMA_BALANCING 552 - u64 numa_task_migrated; 553 - u64 numa_task_swapped; 554 - #endif 555 551 556 552 u64 nr_wakeups; 557 553 u64 nr_wakeups_sync;
-2
include/linux/vm_event_item.h
··· 66 66 NUMA_HINT_FAULTS, 67 67 NUMA_HINT_FAULTS_LOCAL, 68 68 NUMA_PAGE_MIGRATE, 69 - NUMA_TASK_MIGRATE, 70 - NUMA_TASK_SWAP, 71 69 #endif 72 70 #ifdef CONFIG_MIGRATION 73 71 PGMIGRATE_SUCCESS, PGMIGRATE_FAIL,
+2 -7
kernel/sched/core.c
··· 3362 3362 #ifdef CONFIG_NUMA_BALANCING 3363 3363 static void __migrate_swap_task(struct task_struct *p, int cpu) 3364 3364 { 3365 - __schedstat_inc(p->stats.numa_task_swapped); 3366 - count_vm_numa_event(NUMA_TASK_SWAP); 3367 - count_memcg_event_mm(p->mm, NUMA_TASK_SWAP); 3368 - 3369 3365 if (task_on_rq_queued(p)) { 3370 3366 struct rq *src_rq, *dst_rq; 3371 3367 struct rq_flags srf, drf; ··· 7935 7939 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) 7936 7940 return -EINVAL; 7937 7941 7938 - __schedstat_inc(p->stats.numa_task_migrated); 7939 - count_vm_numa_event(NUMA_TASK_MIGRATE); 7940 - count_memcg_event_mm(p->mm, NUMA_TASK_MIGRATE); 7942 + /* TODO: This is not properly updating schedstats */ 7943 + 7941 7944 trace_sched_move_numa(p, curr_cpu, target_cpu); 7942 7945 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); 7943 7946 }
-4
kernel/sched/debug.c
··· 1210 1210 P_SCHEDSTAT(nr_failed_migrations_running); 1211 1211 P_SCHEDSTAT(nr_failed_migrations_hot); 1212 1212 P_SCHEDSTAT(nr_forced_migrations); 1213 - #ifdef CONFIG_NUMA_BALANCING 1214 - P_SCHEDSTAT(numa_task_migrated); 1215 - P_SCHEDSTAT(numa_task_swapped); 1216 - #endif 1217 1213 P_SCHEDSTAT(nr_wakeups); 1218 1214 P_SCHEDSTAT(nr_wakeups_sync); 1219 1215 P_SCHEDSTAT(nr_wakeups_migrate);
+3
lib/alloc_tag.c
··· 135 135 struct codetag_bytes n; 136 136 unsigned int i, nr = 0; 137 137 138 + if (IS_ERR_OR_NULL(alloc_tag_cttype)) 139 + return 0; 140 + 138 141 if (can_sleep) 139 142 codetag_lock_module_list(alloc_tag_cttype, true); 140 143 else if (!codetag_trylock_module_list(alloc_tag_cttype))
+1
lib/maple_tree.c
··· 5319 5319 struct maple_enode *start; 5320 5320 5321 5321 if (mte_is_leaf(enode)) { 5322 + mte_set_node_dead(enode); 5322 5323 node->type = mte_node_type(enode); 5323 5324 goto free_leaf; 5324 5325 }
+4 -4
mm/damon/core.c
··· 1449 1449 } 1450 1450 } 1451 1451 target_access_events = max_access_events * goal_bp / 10000; 1452 + target_access_events = target_access_events ? : 1; 1452 1453 return access_events * 10000 / target_access_events; 1453 1454 } 1454 1455 ··· 2356 2355 * 2357 2356 * If there is a &struct damon_call_control request that registered via 2358 2357 * &damon_call() on @ctx, do or cancel the invocation of the function depending 2359 - * on @cancel. @cancel is set when the kdamond is deactivated by DAMOS 2360 - * watermarks, or the kdamond is already out of the main loop and therefore 2361 - * will be terminated. 2358 + * on @cancel. @cancel is set when the kdamond is already out of the main loop 2359 + * and therefore will be terminated. 2362 2360 */ 2363 2361 static void kdamond_call(struct damon_ctx *ctx, bool cancel) 2364 2362 { ··· 2405 2405 if (ctx->callback.after_wmarks_check && 2406 2406 ctx->callback.after_wmarks_check(ctx)) 2407 2407 break; 2408 - kdamond_call(ctx, true); 2408 + kdamond_call(ctx, false); 2409 2409 damos_walk_cancel(ctx); 2410 2410 } 2411 2411 return -EBUSY;
+6 -3
mm/hugetlb.c
··· 2340 2340 struct folio *folio; 2341 2341 2342 2342 spin_lock_irq(&hugetlb_lock); 2343 + if (!h->resv_huge_pages) { 2344 + spin_unlock_irq(&hugetlb_lock); 2345 + return NULL; 2346 + } 2347 + 2343 2348 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, preferred_nid, 2344 2349 nmask); 2345 - if (folio) { 2346 - VM_BUG_ON(!h->resv_huge_pages); 2350 + if (folio) 2347 2351 h->resv_huge_pages--; 2348 - } 2349 2352 2350 2353 spin_unlock_irq(&hugetlb_lock); 2351 2354 return folio;
+2 -43
mm/kasan/report.c
··· 370 370 sizeof(init_thread_union.stack)); 371 371 } 372 372 373 - /* 374 - * This function is invoked with report_lock (a raw_spinlock) held. A 375 - * PREEMPT_RT kernel cannot call find_vm_area() as it will acquire a sleeping 376 - * rt_spinlock. 377 - * 378 - * For !RT kernel, the PROVE_RAW_LOCK_NESTING config option will print a 379 - * lockdep warning for this raw_spinlock -> spinlock dependency. This config 380 - * option is enabled by default to ensure better test coverage to expose this 381 - * kind of RT kernel problem. This lockdep splat, however, can be suppressed 382 - * by using DEFINE_WAIT_OVERRIDE_MAP() if it serves a useful purpose and the 383 - * invalid PREEMPT_RT case has been taken care of. 384 - */ 385 - static inline struct vm_struct *kasan_find_vm_area(void *addr) 386 - { 387 - static DEFINE_WAIT_OVERRIDE_MAP(vmalloc_map, LD_WAIT_SLEEP); 388 - struct vm_struct *va; 389 - 390 - if (IS_ENABLED(CONFIG_PREEMPT_RT)) 391 - return NULL; 392 - 393 - /* 394 - * Suppress lockdep warning and fetch vmalloc area of the 395 - * offending address. 396 - */ 397 - lock_map_acquire_try(&vmalloc_map); 398 - va = find_vm_area(addr); 399 - lock_map_release(&vmalloc_map); 400 - return va; 401 - } 402 - 403 373 static void print_address_description(void *addr, u8 tag, 404 374 struct kasan_report_info *info) 405 375 { ··· 399 429 } 400 430 401 431 if (is_vmalloc_addr(addr)) { 402 - struct vm_struct *va = kasan_find_vm_area(addr); 403 - 404 - if (va) { 405 - pr_err("The buggy address belongs to the virtual mapping at\n" 406 - " [%px, %px) created by:\n" 407 - " %pS\n", 408 - va->addr, va->addr + va->size, va->caller); 409 - pr_err("\n"); 410 - 411 - page = vmalloc_to_page(addr); 412 - } else { 413 - pr_err("The buggy address %px belongs to a vmalloc virtual mapping\n", addr); 414 - } 432 + pr_err("The buggy address %px belongs to a vmalloc virtual mapping\n", addr); 433 + page = vmalloc_to_page(addr); 415 434 } 416 435 417 436 if (page) {
-2
mm/memcontrol.c
··· 474 474 NUMA_PAGE_MIGRATE, 475 475 NUMA_PTE_UPDATES, 476 476 NUMA_HINT_FAULTS, 477 - NUMA_TASK_MIGRATE, 478 - NUMA_TASK_SWAP, 479 477 #endif 480 478 }; 481 479
+8 -6
mm/migrate.c
··· 2399 2399 2400 2400 static int get_compat_pages_array(const void __user *chunk_pages[], 2401 2401 const void __user * __user *pages, 2402 + unsigned long chunk_offset, 2402 2403 unsigned long chunk_nr) 2403 2404 { 2404 2405 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages; ··· 2407 2406 int i; 2408 2407 2409 2408 for (i = 0; i < chunk_nr; i++) { 2410 - if (get_user(p, pages32 + i)) 2409 + if (get_user(p, pages32 + chunk_offset + i)) 2411 2410 return -EFAULT; 2412 2411 chunk_pages[i] = compat_ptr(p); 2413 2412 } ··· 2426 2425 #define DO_PAGES_STAT_CHUNK_NR 16UL 2427 2426 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 2428 2427 int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 2428 + unsigned long chunk_offset = 0; 2429 2429 2430 2430 while (nr_pages) { 2431 2431 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR); 2432 2432 2433 2433 if (in_compat_syscall()) { 2434 2434 if (get_compat_pages_array(chunk_pages, pages, 2435 - chunk_nr)) 2435 + chunk_offset, chunk_nr)) 2436 2436 break; 2437 2437 } else { 2438 - if (copy_from_user(chunk_pages, pages, 2438 + if (copy_from_user(chunk_pages, pages + chunk_offset, 2439 2439 chunk_nr * sizeof(*chunk_pages))) 2440 2440 break; 2441 2441 } 2442 2442 2443 2443 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 2444 2444 2445 - if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) 2445 + if (copy_to_user(status + chunk_offset, chunk_status, 2446 + chunk_nr * sizeof(*status))) 2446 2447 break; 2447 2448 2448 - pages += chunk_nr; 2449 - status += chunk_nr; 2449 + chunk_offset += chunk_nr; 2450 2450 nr_pages -= chunk_nr; 2451 2451 } 2452 2452 return nr_pages ? -EFAULT : 0;
+28 -18
mm/rmap.c
··· 1845 1845 #endif 1846 1846 } 1847 1847 1848 - /* We support batch unmapping of PTEs for lazyfree large folios */ 1849 - static inline bool can_batch_unmap_folio_ptes(unsigned long addr, 1850 - struct folio *folio, pte_t *ptep) 1848 + static inline unsigned int folio_unmap_pte_batch(struct folio *folio, 1849 + struct page_vma_mapped_walk *pvmw, 1850 + enum ttu_flags flags, pte_t pte) 1851 1851 { 1852 1852 const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY; 1853 - int max_nr = folio_nr_pages(folio); 1854 - pte_t pte = ptep_get(ptep); 1853 + unsigned long end_addr, addr = pvmw->address; 1854 + struct vm_area_struct *vma = pvmw->vma; 1855 + unsigned int max_nr; 1855 1856 1857 + if (flags & TTU_HWPOISON) 1858 + return 1; 1859 + if (!folio_test_large(folio)) 1860 + return 1; 1861 + 1862 + /* We may only batch within a single VMA and a single page table. */ 1863 + end_addr = pmd_addr_end(addr, vma->vm_end); 1864 + max_nr = (end_addr - addr) >> PAGE_SHIFT; 1865 + 1866 + /* We only support lazyfree batching for now ... */ 1856 1867 if (!folio_test_anon(folio) || folio_test_swapbacked(folio)) 1857 - return false; 1868 + return 1; 1858 1869 if (pte_unused(pte)) 1859 - return false; 1860 - if (pte_pfn(pte) != folio_pfn(folio)) 1861 - return false; 1870 + return 1; 1862 1871 1863 - return folio_pte_batch(folio, addr, ptep, pte, max_nr, fpb_flags, NULL, 1864 - NULL, NULL) == max_nr; 1872 + return folio_pte_batch(folio, addr, pvmw->pte, pte, max_nr, fpb_flags, 1873 + NULL, NULL, NULL); 1865 1874 } 1866 1875 1867 1876 /* ··· 2033 2024 if (pte_dirty(pteval)) 2034 2025 folio_mark_dirty(folio); 2035 2026 } else if (likely(pte_present(pteval))) { 2036 - if (folio_test_large(folio) && !(flags & TTU_HWPOISON) && 2037 - can_batch_unmap_folio_ptes(address, folio, pvmw.pte)) 2038 - nr_pages = folio_nr_pages(folio); 2027 + nr_pages = folio_unmap_pte_batch(folio, &pvmw, flags, pteval); 2039 2028 end_addr = address + nr_pages * PAGE_SIZE; 2040 2029 flush_cache_range(vma, address, end_addr); 2041 2030 ··· 2213 2206 hugetlb_remove_rmap(folio); 2214 2207 } else { 2215 2208 folio_remove_rmap_ptes(folio, subpage, nr_pages, vma); 2216 - folio_ref_sub(folio, nr_pages - 1); 2217 2209 } 2218 2210 if (vma->vm_flags & VM_LOCKED) 2219 2211 mlock_drain_local(); 2220 - folio_put(folio); 2221 - /* We have already batched the entire folio */ 2222 - if (nr_pages > 1) 2212 + folio_put_refs(folio, nr_pages); 2213 + 2214 + /* 2215 + * If we are sure that we batched the entire folio and cleared 2216 + * all PTEs, we can just optimize and stop right here. 2217 + */ 2218 + if (nr_pages == folio_nr_pages(folio)) 2223 2219 goto walk_done; 2224 2220 continue; 2225 2221 walk_abort:
+15 -7
mm/vmalloc.c
··· 514 514 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 515 515 pgtbl_mod_mask *mask) 516 516 { 517 + int err = 0; 517 518 pte_t *pte; 518 519 519 520 /* ··· 531 530 do { 532 531 struct page *page = pages[*nr]; 533 532 534 - if (WARN_ON(!pte_none(ptep_get(pte)))) 535 - return -EBUSY; 536 - if (WARN_ON(!page)) 537 - return -ENOMEM; 538 - if (WARN_ON(!pfn_valid(page_to_pfn(page)))) 539 - return -EINVAL; 533 + if (WARN_ON(!pte_none(ptep_get(pte)))) { 534 + err = -EBUSY; 535 + break; 536 + } 537 + if (WARN_ON(!page)) { 538 + err = -ENOMEM; 539 + break; 540 + } 541 + if (WARN_ON(!pfn_valid(page_to_pfn(page)))) { 542 + err = -EINVAL; 543 + break; 544 + } 540 545 541 546 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 542 547 (*nr)++; ··· 550 543 551 544 arch_leave_lazy_mmu_mode(); 552 545 *mask |= PGTBL_PTE_MODIFIED; 553 - return 0; 546 + 547 + return err; 554 548 } 555 549 556 550 static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
-2
mm/vmstat.c
··· 1346 1346 "numa_hint_faults", 1347 1347 "numa_hint_faults_local", 1348 1348 "numa_pages_migrated", 1349 - "numa_task_migrated", 1350 - "numa_task_swapped", 1351 1349 #endif 1352 1350 #ifdef CONFIG_MIGRATION 1353 1351 "pgmigrate_success",
+6 -2
samples/damon/mtier.c
··· 164 164 if (enable == enabled) 165 165 return 0; 166 166 167 - if (enable) 168 - return damon_sample_mtier_start(); 167 + if (enable) { 168 + err = damon_sample_mtier_start(); 169 + if (err) 170 + enable = false; 171 + return err; 172 + } 169 173 damon_sample_mtier_stop(); 170 174 return 0; 171 175 }
+6 -2
samples/damon/prcl.c
··· 122 122 if (enable == enabled) 123 123 return 0; 124 124 125 - if (enable) 126 - return damon_sample_prcl_start(); 125 + if (enable) { 126 + err = damon_sample_prcl_start(); 127 + if (err) 128 + enable = false; 129 + return err; 130 + } 127 131 damon_sample_prcl_stop(); 128 132 return 0; 129 133 }
+6 -2
samples/damon/wsse.c
··· 102 102 if (enable == enabled) 103 103 return 0; 104 104 105 - if (enable) 106 - return damon_sample_wsse_start(); 105 + if (enable) { 106 + err = damon_sample_wsse_start(); 107 + if (err) 108 + enable = false; 109 + return err; 110 + } 107 111 damon_sample_wsse_stop(); 108 112 return 0; 109 113 }
+7
scripts/gdb/linux/constants.py.in
··· 20 20 #include <linux/of_fdt.h> 21 21 #include <linux/page_ext.h> 22 22 #include <linux/radix-tree.h> 23 + #include <linux/maple_tree.h> 23 24 #include <linux/slab.h> 24 25 #include <linux/threads.h> 25 26 #include <linux/vmalloc.h> ··· 93 92 LX_GDBPARSED(RADIX_TREE_MAP_SIZE) 94 93 LX_GDBPARSED(RADIX_TREE_MAP_SHIFT) 95 94 LX_GDBPARSED(RADIX_TREE_MAP_MASK) 95 + 96 + /* linux/maple_tree.h */ 97 + LX_VALUE(MAPLE_NODE_SLOTS) 98 + LX_VALUE(MAPLE_RANGE64_SLOTS) 99 + LX_VALUE(MAPLE_ARANGE64_SLOTS) 100 + LX_GDBPARSED(MAPLE_NODE_MASK) 96 101 97 102 /* linux/vmalloc.h */ 98 103 LX_VALUE(VM_IOREMAP)
+8 -8
scripts/gdb/linux/interrupts.py
··· 7 7 from linux import constants 8 8 from linux import cpus 9 9 from linux import utils 10 - from linux import radixtree 10 + from linux import mapletree 11 11 12 12 irq_desc_type = utils.CachedType("struct irq_desc") 13 13 ··· 23 23 def show_irq_desc(prec, irq): 24 24 text = "" 25 25 26 - desc = radixtree.lookup(gdb.parse_and_eval("&irq_desc_tree"), irq) 26 + desc = mapletree.mtree_load(gdb.parse_and_eval("&sparse_irqs"), irq) 27 27 if desc is None: 28 28 return text 29 29 30 - desc = desc.cast(irq_desc_type.get_type()) 31 - if desc is None: 30 + desc = desc.cast(irq_desc_type.get_type().pointer()) 31 + if desc == 0: 32 32 return text 33 33 34 34 if irq_settings_is_hidden(desc): ··· 110 110 pvar = gdb.parse_and_eval(var) 111 111 text = "%*s: " % (prec, pfx) 112 112 for cpu in cpus.each_online_cpu(): 113 - text += "%10u " % (cpus.per_cpu(pvar, cpu)) 113 + text += "%10u " % (cpus.per_cpu(pvar, cpu).dereference()) 114 114 text += " %s\n" % (desc) 115 115 return text 116 116 ··· 142 142 143 143 if constants.LX_CONFIG_X86_MCE: 144 144 text += x86_show_mce(prec, "&mce_exception_count", "MCE", "Machine check exceptions") 145 - text == x86_show_mce(prec, "&mce_poll_count", "MCP", "Machine check polls") 145 + text += x86_show_mce(prec, "&mce_poll_count", "MCP", "Machine check polls") 146 146 147 147 text += show_irq_err_count(prec) 148 148 ··· 221 221 gdb.write("CPU%-8d" % cpu) 222 222 gdb.write("\n") 223 223 224 - if utils.gdb_eval_or_none("&irq_desc_tree") is None: 225 - return 224 + if utils.gdb_eval_or_none("&sparse_irqs") is None: 225 + raise gdb.GdbError("Unable to find the sparse IRQ tree, is CONFIG_SPARSE_IRQ enabled?") 226 226 227 227 for irq in range(nr_irqs): 228 228 gdb.write(show_irq_desc(prec, irq))
+252
scripts/gdb/linux/mapletree.py
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + # 3 + # Maple tree helpers 4 + # 5 + # Copyright (c) 2025 Broadcom 6 + # 7 + # Authors: 8 + # Florian Fainelli <florian.fainelli@broadcom.com> 9 + 10 + import gdb 11 + 12 + from linux import utils 13 + from linux import constants 14 + from linux import xarray 15 + 16 + maple_tree_root_type = utils.CachedType("struct maple_tree") 17 + maple_node_type = utils.CachedType("struct maple_node") 18 + maple_enode_type = utils.CachedType("void") 19 + 20 + maple_dense = 0 21 + maple_leaf_64 = 1 22 + maple_range_64 = 2 23 + maple_arange_64 = 3 24 + 25 + class Mas(object): 26 + ma_active = 0 27 + ma_start = 1 28 + ma_root = 2 29 + ma_none = 3 30 + ma_pause = 4 31 + ma_overflow = 5 32 + ma_underflow = 6 33 + ma_error = 7 34 + 35 + def __init__(self, mt, first, end): 36 + if mt.type == maple_tree_root_type.get_type().pointer(): 37 + self.tree = mt.dereference() 38 + elif mt.type != maple_tree_root_type.get_type(): 39 + raise gdb.GdbError("must be {} not {}" 40 + .format(maple_tree_root_type.get_type().pointer(), mt.type)) 41 + self.tree = mt 42 + self.index = first 43 + self.last = end 44 + self.node = None 45 + self.status = self.ma_start 46 + self.min = 0 47 + self.max = -1 48 + 49 + def is_start(self): 50 + # mas_is_start() 51 + return self.status == self.ma_start 52 + 53 + def is_ptr(self): 54 + # mas_is_ptr() 55 + return self.status == self.ma_root 56 + 57 + def is_none(self): 58 + # mas_is_none() 59 + return self.status == self.ma_none 60 + 61 + def root(self): 62 + # mas_root() 63 + return self.tree['ma_root'].cast(maple_enode_type.get_type().pointer()) 64 + 65 + def start(self): 66 + # mas_start() 67 + if self.is_start() is False: 68 + return None 69 + 70 + self.min = 0 71 + self.max = ~0 72 + 73 + while True: 74 + self.depth = 0 75 + root = self.root() 76 + if xarray.xa_is_node(root): 77 + self.depth = 0 78 + self.status = self.ma_active 79 + self.node = mte_safe_root(root) 80 + self.offset = 0 81 + if mte_dead_node(self.node) is True: 82 + continue 83 + 84 + return None 85 + 86 + self.node = None 87 + # Empty tree 88 + if root is None: 89 + self.status = self.ma_none 90 + self.offset = constants.LX_MAPLE_NODE_SLOTS 91 + return None 92 + 93 + # Single entry tree 94 + self.status = self.ma_root 95 + self.offset = constants.LX_MAPLE_NODE_SLOTS 96 + 97 + if self.index != 0: 98 + return None 99 + 100 + return root 101 + 102 + return None 103 + 104 + def reset(self): 105 + # mas_reset() 106 + self.status = self.ma_start 107 + self.node = None 108 + 109 + def mte_safe_root(node): 110 + if node.type != maple_enode_type.get_type().pointer(): 111 + raise gdb.GdbError("{} must be {} not {}" 112 + .format(mte_safe_root.__name__, maple_enode_type.get_type().pointer(), node.type)) 113 + ulong_type = utils.get_ulong_type() 114 + indirect_ptr = node.cast(ulong_type) & ~0x2 115 + val = indirect_ptr.cast(maple_enode_type.get_type().pointer()) 116 + return val 117 + 118 + def mte_node_type(entry): 119 + ulong_type = utils.get_ulong_type() 120 + val = None 121 + if entry.type == maple_enode_type.get_type().pointer(): 122 + val = entry.cast(ulong_type) 123 + elif entry.type == ulong_type: 124 + val = entry 125 + else: 126 + raise gdb.GdbError("{} must be {} not {}" 127 + .format(mte_node_type.__name__, maple_enode_type.get_type().pointer(), entry.type)) 128 + return (val >> 0x3) & 0xf 129 + 130 + def ma_dead_node(node): 131 + if node.type != maple_node_type.get_type().pointer(): 132 + raise gdb.GdbError("{} must be {} not {}" 133 + .format(ma_dead_node.__name__, maple_node_type.get_type().pointer(), node.type)) 134 + ulong_type = utils.get_ulong_type() 135 + parent = node['parent'] 136 + indirect_ptr = node['parent'].cast(ulong_type) & ~constants.LX_MAPLE_NODE_MASK 137 + return indirect_ptr == node 138 + 139 + def mte_to_node(enode): 140 + ulong_type = utils.get_ulong_type() 141 + if enode.type == maple_enode_type.get_type().pointer(): 142 + indirect_ptr = enode.cast(ulong_type) 143 + elif enode.type == ulong_type: 144 + indirect_ptr = enode 145 + else: 146 + raise gdb.GdbError("{} must be {} not {}" 147 + .format(mte_to_node.__name__, maple_enode_type.get_type().pointer(), enode.type)) 148 + indirect_ptr = indirect_ptr & ~constants.LX_MAPLE_NODE_MASK 149 + return indirect_ptr.cast(maple_node_type.get_type().pointer()) 150 + 151 + def mte_dead_node(enode): 152 + if enode.type != maple_enode_type.get_type().pointer(): 153 + raise gdb.GdbError("{} must be {} not {}" 154 + .format(mte_dead_node.__name__, maple_enode_type.get_type().pointer(), enode.type)) 155 + node = mte_to_node(enode) 156 + return ma_dead_node(node) 157 + 158 + def ma_is_leaf(tp): 159 + result = tp < maple_range_64 160 + return tp < maple_range_64 161 + 162 + def mt_pivots(t): 163 + if t == maple_dense: 164 + return 0 165 + elif t == maple_leaf_64 or t == maple_range_64: 166 + return constants.LX_MAPLE_RANGE64_SLOTS - 1 167 + elif t == maple_arange_64: 168 + return constants.LX_MAPLE_ARANGE64_SLOTS - 1 169 + 170 + def ma_pivots(node, t): 171 + if node.type != maple_node_type.get_type().pointer(): 172 + raise gdb.GdbError("{}: must be {} not {}" 173 + .format(ma_pivots.__name__, maple_node_type.get_type().pointer(), node.type)) 174 + if t == maple_arange_64: 175 + return node['ma64']['pivot'] 176 + elif t == maple_leaf_64 or t == maple_range_64: 177 + return node['mr64']['pivot'] 178 + else: 179 + return None 180 + 181 + def ma_slots(node, tp): 182 + if node.type != maple_node_type.get_type().pointer(): 183 + raise gdb.GdbError("{}: must be {} not {}" 184 + .format(ma_slots.__name__, maple_node_type.get_type().pointer(), node.type)) 185 + if tp == maple_arange_64: 186 + return node['ma64']['slot'] 187 + elif tp == maple_range_64 or tp == maple_leaf_64: 188 + return node['mr64']['slot'] 189 + elif tp == maple_dense: 190 + return node['slot'] 191 + else: 192 + return None 193 + 194 + def mt_slot(mt, slots, offset): 195 + ulong_type = utils.get_ulong_type() 196 + return slots[offset].cast(ulong_type) 197 + 198 + def mtree_lookup_walk(mas): 199 + ulong_type = utils.get_ulong_type() 200 + n = mas.node 201 + 202 + while True: 203 + node = mte_to_node(n) 204 + tp = mte_node_type(n) 205 + pivots = ma_pivots(node, tp) 206 + end = mt_pivots(tp) 207 + offset = 0 208 + while True: 209 + if pivots[offset] >= mas.index: 210 + break 211 + if offset >= end: 212 + break 213 + offset += 1 214 + 215 + slots = ma_slots(node, tp) 216 + n = mt_slot(mas.tree, slots, offset) 217 + if ma_dead_node(node) is True: 218 + mas.reset() 219 + return None 220 + break 221 + 222 + if ma_is_leaf(tp) is True: 223 + break 224 + 225 + return n 226 + 227 + def mtree_load(mt, index): 228 + ulong_type = utils.get_ulong_type() 229 + # MT_STATE(...) 230 + mas = Mas(mt, index, index) 231 + entry = None 232 + 233 + while True: 234 + entry = mas.start() 235 + if mas.is_none(): 236 + return None 237 + 238 + if mas.is_ptr(): 239 + if index != 0: 240 + entry = None 241 + return entry 242 + 243 + entry = mtree_lookup_walk(mas) 244 + if entry is None and mas.is_start(): 245 + continue 246 + else: 247 + break 248 + 249 + if xarray.xa_is_zero(entry): 250 + return None 251 + 252 + return entry
+1 -1
scripts/gdb/linux/vfs.py
··· 22 22 if parent == d or parent == 0: 23 23 return "" 24 24 p = dentry_name(d['d_parent']) + "/" 25 - return p + d['d_shortname']['string'].string() 25 + return p + d['d_name']['name'].string() 26 26 27 27 class DentryName(gdb.Function): 28 28 """Return string of the full path of a dentry.
+28
scripts/gdb/linux/xarray.py
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + # 3 + # Xarray helpers 4 + # 5 + # Copyright (c) 2025 Broadcom 6 + # 7 + # Authors: 8 + # Florian Fainelli <florian.fainelli@broadcom.com> 9 + 10 + import gdb 11 + 12 + from linux import utils 13 + from linux import constants 14 + 15 + def xa_is_internal(entry): 16 + ulong_type = utils.get_ulong_type() 17 + return ((entry.cast(ulong_type) & 3) == 2) 18 + 19 + def xa_mk_internal(v): 20 + return ((v << 2) | 2) 21 + 22 + def xa_is_zero(entry): 23 + ulong_type = utils.get_ulong_type() 24 + return entry.cast(ulong_type) == xa_mk_internal(257) 25 + 26 + def xa_is_node(entry): 27 + ulong_type = utils.get_ulong_type() 28 + return xa_is_internal(entry) and (entry.cast(ulong_type) > 4096)
+4
tools/include/linux/kallsyms.h
··· 18 18 return NULL; 19 19 } 20 20 21 + #ifdef HAVE_BACKTRACE_SUPPORT 21 22 #include <execinfo.h> 22 23 #include <stdlib.h> 23 24 static inline void print_ip_sym(const char *loglvl, unsigned long ip) ··· 31 30 32 31 free(name); 33 32 } 33 + #else 34 + static inline void print_ip_sym(const char *loglvl, unsigned long ip) {} 35 + #endif 34 36 35 37 #endif