Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

scripts/gdb/aarch64: add aarch64 page operation helper commands and configs

1. Move page table debugging from mm.py to pgtable.py.

2. Add aarch64 kernel config and memory constants value.

3. Add below aarch64 page operation helper commands.
page_to_pfn, page_to_phys, pfn_to_page, page_address,
virt_to_phys, sym_to_pfn, pfn_to_kaddr, virt_to_page.

4. Only support CONFIG_SPARSEMEM_VMEMMAP=y now.

Link: https://lkml.kernel.org/r/20230808083020.22254-5-Kuan-Ying.Lee@mediatek.com
Signed-off-by: Kuan-Ying Lee <Kuan-Ying.Lee@mediatek.com>
Cc: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
Cc: Chinwen Chang <chinwen.chang@mediatek.com>
Cc: Matthias Brugger <matthias.bgg@gmail.com>
Cc: Qun-Wei Lin <qun-wei.lin@mediatek.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Kuan-Ying Lee and committed by
Andrew Morton
eb985b5d 4d040cbc

+617 -195
+23
scripts/gdb/linux/constants.py.in
··· 105 105 LX_CONFIG(CONFIG_X86_MCE) 106 106 LX_CONFIG(CONFIG_X86_IO_APIC) 107 107 LX_CONFIG(CONFIG_HAVE_KVM) 108 + LX_CONFIG(CONFIG_NUMA) 109 + LX_CONFIG(CONFIG_ARM64) 110 + LX_CONFIG(CONFIG_ARM64_4K_PAGES) 111 + LX_CONFIG(CONFIG_ARM64_16K_PAGES) 112 + LX_CONFIG(CONFIG_ARM64_64K_PAGES) 113 + if IS_BUILTIN(CONFIG_ARM64): 114 + LX_VALUE(CONFIG_ARM64_PA_BITS) 115 + LX_VALUE(CONFIG_ARM64_VA_BITS) 116 + LX_VALUE(CONFIG_ARM64_PAGE_SHIFT) 117 + LX_VALUE(CONFIG_ARCH_FORCE_MAX_ORDER) 118 + LX_CONFIG(CONFIG_SPARSEMEM) 119 + LX_CONFIG(CONFIG_SPARSEMEM_EXTREME) 120 + LX_CONFIG(CONFIG_SPARSEMEM_VMEMMAP) 121 + LX_CONFIG(CONFIG_KASAN) 122 + LX_CONFIG(CONFIG_KASAN_GENERIC) 123 + LX_CONFIG(CONFIG_KASAN_SW_TAGS) 124 + LX_CONFIG(CONFIG_KASAN_HW_TAGS) 125 + if IS_BUILTIN(CONFIG_KASAN_GENERIC) or IS_BUILTIN(CONFIG_KASAN_SW_TAGS): 126 + LX_VALUE(CONFIG_KASAN_SHADOW_OFFSET) 127 + LX_CONFIG(CONFIG_VMAP_STACK) 128 + if IS_BUILTIN(CONFIG_NUMA): 129 + LX_VALUE(CONFIG_NODES_SHIFT) 130 + LX_CONFIG(CONFIG_DEBUG_VIRTUAL)
+370 -194
scripts/gdb/linux/mm.py
··· 1 - # SPDX-License-Identifier: GPL-2.0-only 1 + # SPDX-License-Identifier: GPL-2.0 2 2 # 3 - # gdb helper commands and functions for Linux kernel debugging 4 - # 5 - # routines to introspect page table 3 + # Copyright (c) 2023 MediaTek Inc. 6 4 # 7 5 # Authors: 8 - # Dmitrii Bundin <dmitrii.bundin.a@gmail.com> 6 + # Kuan-Ying Lee <Kuan-Ying.Lee@mediatek.com> 9 7 # 10 8 11 9 import gdb 10 + import math 11 + from linux import utils, constants 12 12 13 - from linux import utils 13 + def DIV_ROUND_UP(n,d): 14 + return ((n) + (d) - 1) // (d) 14 15 15 - PHYSICAL_ADDRESS_MASK = gdb.parse_and_eval('0xfffffffffffff') 16 - 17 - 18 - def page_mask(level=1): 19 - # 4KB 20 - if level == 1: 21 - return gdb.parse_and_eval('(u64) ~0xfff') 22 - # 2MB 23 - elif level == 2: 24 - return gdb.parse_and_eval('(u64) ~0x1fffff') 25 - # 1GB 26 - elif level == 3: 27 - return gdb.parse_and_eval('(u64) ~0x3fffffff') 16 + def test_bit(nr, addr): 17 + if addr.dereference() & (0x1 << nr): 18 + return True 28 19 else: 29 - raise Exception(f'Unknown page level: {level}') 20 + return False 30 21 22 + class page_ops(): 23 + ops = None 24 + def __init__(self): 25 + if not constants.LX_CONFIG_SPARSEMEM_VMEMMAP: 26 + raise gdb.GdbError('Only support CONFIG_SPARSEMEM_VMEMMAP now') 27 + if constants.LX_CONFIG_ARM64 and utils.is_target_arch('aarch64'): 28 + self.ops = aarch64_page_ops() 29 + else: 30 + raise gdb.GdbError('Only support aarch64 now') 31 31 32 - #page_offset_base in case CONFIG_DYNAMIC_MEMORY_LAYOUT is disabled 33 - POB_NO_DYNAMIC_MEM_LAYOUT = '0xffff888000000000' 34 - def _page_offset_base(): 35 - pob_symbol = gdb.lookup_global_symbol('page_offset_base') 36 - pob = pob_symbol.name if pob_symbol else POB_NO_DYNAMIC_MEM_LAYOUT 37 - return gdb.parse_and_eval(pob) 32 + class aarch64_page_ops(): 33 + def __init__(self): 34 + self.SUBSECTION_SHIFT = 21 35 + self.SEBSECTION_SIZE = 1 << self.SUBSECTION_SHIFT 36 + self.MODULES_VSIZE = 128 * 1024 * 1024 38 37 38 + if constants.LX_CONFIG_ARM64_64K_PAGES: 39 + self.SECTION_SIZE_BITS = 29 40 + else: 41 + self.SECTION_SIZE_BITS = 27 42 + self.MAX_PHYSMEM_BITS = constants.LX_CONFIG_ARM64_VA_BITS 39 43 40 - def is_bit_defined_tupled(data, offset): 41 - return offset, bool(data >> offset & 1) 44 + self.PAGE_SHIFT = constants.LX_CONFIG_ARM64_PAGE_SHIFT 45 + self.PAGE_SIZE = 1 << self.PAGE_SHIFT 46 + self.PAGE_MASK = (~(self.PAGE_SIZE - 1)) & ((1 << 64) - 1) 42 47 43 - def content_tupled(data, bit_start, bit_end): 44 - return (bit_start, bit_end), data >> bit_start & ((1 << (1 + bit_end - bit_start)) - 1) 48 + self.VA_BITS = constants.LX_CONFIG_ARM64_VA_BITS 49 + if self.VA_BITS > 48: 50 + self.VA_BITS_MIN = 48 51 + self.vabits_actual = gdb.parse_and_eval('vabits_actual') 52 + else: 53 + self.VA_BITS_MIN = self.VA_BITS 54 + self.vabits_actual = self.VA_BITS 55 + self.kimage_voffset = gdb.parse_and_eval('kimage_voffset') & ((1 << 64) - 1) 45 56 46 - def entry_va(level, phys_addr, translating_va): 47 - def start_bit(level): 48 - if level == 5: 49 - return 48 50 - elif level == 4: 51 - return 39 52 - elif level == 3: 53 - return 30 54 - elif level == 2: 55 - return 21 56 - elif level == 1: 57 - return 12 57 + self.SECTIONS_SHIFT = self.MAX_PHYSMEM_BITS - self.SECTION_SIZE_BITS 58 + 59 + if str(constants.LX_CONFIG_ARCH_FORCE_MAX_ORDER).isdigit(): 60 + self.MAX_ORDER = constants.LX_CONFIG_ARCH_FORCE_MAX_ORDER 61 + else: 62 + self.MAX_ORDER = 11 63 + 64 + self.MAX_ORDER_NR_PAGES = 1 << (self.MAX_ORDER - 1) 65 + self.PFN_SECTION_SHIFT = self.SECTION_SIZE_BITS - self.PAGE_SHIFT 66 + self.NR_MEM_SECTIONS = 1 << self.SECTIONS_SHIFT 67 + self.PAGES_PER_SECTION = 1 << self.PFN_SECTION_SHIFT 68 + self.PAGE_SECTION_MASK = (~(self.PAGES_PER_SECTION - 1)) & ((1 << 64) - 1) 69 + 70 + if constants.LX_CONFIG_SPARSEMEM_EXTREME: 71 + self.SECTIONS_PER_ROOT = self.PAGE_SIZE // gdb.lookup_type("struct mem_section").sizeof 72 + else: 73 + self.SECTIONS_PER_ROOT = 1 74 + 75 + self.NR_SECTION_ROOTS = DIV_ROUND_UP(self.NR_MEM_SECTIONS, self.SECTIONS_PER_ROOT) 76 + self.SECTION_ROOT_MASK = self.SECTIONS_PER_ROOT - 1 77 + self.SUBSECTION_SHIFT = 21 78 + self.SEBSECTION_SIZE = 1 << self.SUBSECTION_SHIFT 79 + self.PFN_SUBSECTION_SHIFT = self.SUBSECTION_SHIFT - self.PAGE_SHIFT 80 + self.PAGES_PER_SUBSECTION = 1 << self.PFN_SUBSECTION_SHIFT 81 + 82 + self.SECTION_HAS_MEM_MAP = 1 << int(gdb.parse_and_eval('SECTION_HAS_MEM_MAP_BIT')) 83 + self.SECTION_IS_EARLY = 1 << int(gdb.parse_and_eval('SECTION_IS_EARLY_BIT')) 84 + 85 + self.struct_page_size = utils.get_page_type().sizeof 86 + self.STRUCT_PAGE_MAX_SHIFT = (int)(math.log(self.struct_page_size, 2)) 87 + 88 + self.PAGE_OFFSET = self._PAGE_OFFSET(self.VA_BITS) 89 + self.MODULES_VADDR = self._PAGE_END(self.VA_BITS_MIN) 90 + self.MODULES_END = self.MODULES_VADDR + self.MODULES_VSIZE 91 + 92 + self.VMEMMAP_SHIFT = (self.PAGE_SHIFT - self.STRUCT_PAGE_MAX_SHIFT) 93 + self.VMEMMAP_SIZE = ((self._PAGE_END(self.VA_BITS_MIN) - self.PAGE_OFFSET) >> self.VMEMMAP_SHIFT) 94 + self.VMEMMAP_START = (-(1 << (self.VA_BITS - self.VMEMMAP_SHIFT))) & 0xffffffffffffffff 95 + self.VMEMMAP_END = self.VMEMMAP_START + self.VMEMMAP_SIZE 96 + 97 + self.VMALLOC_START = self.MODULES_END 98 + self.VMALLOC_END = self.VMEMMAP_START - 256 * 1024 * 1024 99 + 100 + self.memstart_addr = gdb.parse_and_eval("memstart_addr") 101 + self.PHYS_OFFSET = self.memstart_addr 102 + self.vmemmap = gdb.Value(self.VMEMMAP_START).cast(utils.get_page_type().pointer()) - (self.memstart_addr >> self.PAGE_SHIFT) 103 + 104 + self.KERNEL_START = gdb.parse_and_eval("_text") 105 + self.KERNEL_END = gdb.parse_and_eval("_end") 106 + 107 + if constants.LX_CONFIG_KASAN_GENERIC or constants.LX_CONFIG_KASAN_SW_TAGS: 108 + if constants.LX_CONFIG_KASAN_GENERIC: 109 + self.KASAN_SHADOW_SCALE_SHIFT = 3 58 110 else: 59 - raise Exception(f'Unknown level {level}') 60 - 61 - entry_offset = ((translating_va >> start_bit(level)) & 511) * 8 62 - entry_va = _page_offset_base() + phys_addr + entry_offset 63 - return entry_va 64 - 65 - class Cr3(): 66 - def __init__(self, cr3, page_levels): 67 - self.cr3 = cr3 68 - self.page_levels = page_levels 69 - self.page_level_write_through = is_bit_defined_tupled(cr3, 3) 70 - self.page_level_cache_disabled = is_bit_defined_tupled(cr3, 4) 71 - self.next_entry_physical_address = cr3 & PHYSICAL_ADDRESS_MASK & page_mask() 72 - 73 - def next_entry(self, va): 74 - next_level = self.page_levels 75 - return PageHierarchyEntry(entry_va(next_level, self.next_entry_physical_address, va), next_level) 76 - 77 - def mk_string(self): 78 - return f"""\ 79 - cr3: 80 - {'cr3 binary data': <30} {hex(self.cr3)} 81 - {'next entry physical address': <30} {hex(self.next_entry_physical_address)} 82 - --- 83 - {'bit' : <4} {self.page_level_write_through[0]: <10} {'page level write through': <30} {self.page_level_write_through[1]} 84 - {'bit' : <4} {self.page_level_cache_disabled[0]: <10} {'page level cache disabled': <30} {self.page_level_cache_disabled[1]} 85 - """ 86 - 87 - 88 - class PageHierarchyEntry(): 89 - def __init__(self, address, level): 90 - data = int.from_bytes( 91 - memoryview(gdb.selected_inferior().read_memory(address, 8)), 92 - "little" 93 - ) 94 - if level == 1: 95 - self.is_page = True 96 - self.entry_present = is_bit_defined_tupled(data, 0) 97 - self.read_write = is_bit_defined_tupled(data, 1) 98 - self.user_access_allowed = is_bit_defined_tupled(data, 2) 99 - self.page_level_write_through = is_bit_defined_tupled(data, 3) 100 - self.page_level_cache_disabled = is_bit_defined_tupled(data, 4) 101 - self.entry_was_accessed = is_bit_defined_tupled(data, 5) 102 - self.dirty = is_bit_defined_tupled(data, 6) 103 - self.pat = is_bit_defined_tupled(data, 7) 104 - self.global_translation = is_bit_defined_tupled(data, 8) 105 - self.page_physical_address = data & PHYSICAL_ADDRESS_MASK & page_mask(level) 106 - self.next_entry_physical_address = None 107 - self.hlat_restart_with_ordinary = is_bit_defined_tupled(data, 11) 108 - self.protection_key = content_tupled(data, 59, 62) 109 - self.executed_disable = is_bit_defined_tupled(data, 63) 111 + self.KASAN_SHADOW_SCALE_SHIFT = 4 112 + self.KASAN_SHADOW_OFFSET = constants.LX_CONFIG_KASAN_SHADOW_OFFSET 113 + self.KASAN_SHADOW_END = (1 << (64 - self.KASAN_SHADOW_SCALE_SHIFT)) + self.KASAN_SHADOW_OFFSET 114 + self.PAGE_END = self.KASAN_SHADOW_END - (1 << (self.vabits_actual - self.KASAN_SHADOW_SCALE_SHIFT)) 110 115 else: 111 - page_size = is_bit_defined_tupled(data, 7) 112 - page_size_bit = page_size[1] 113 - self.is_page = page_size_bit 114 - self.entry_present = is_bit_defined_tupled(data, 0) 115 - self.read_write = is_bit_defined_tupled(data, 1) 116 - self.user_access_allowed = is_bit_defined_tupled(data, 2) 117 - self.page_level_write_through = is_bit_defined_tupled(data, 3) 118 - self.page_level_cache_disabled = is_bit_defined_tupled(data, 4) 119 - self.entry_was_accessed = is_bit_defined_tupled(data, 5) 120 - self.page_size = page_size 121 - self.dirty = is_bit_defined_tupled( 122 - data, 6) if page_size_bit else None 123 - self.global_translation = is_bit_defined_tupled( 124 - data, 8) if page_size_bit else None 125 - self.pat = is_bit_defined_tupled( 126 - data, 12) if page_size_bit else None 127 - self.page_physical_address = data & PHYSICAL_ADDRESS_MASK & page_mask(level) if page_size_bit else None 128 - self.next_entry_physical_address = None if page_size_bit else data & PHYSICAL_ADDRESS_MASK & page_mask() 129 - self.hlat_restart_with_ordinary = is_bit_defined_tupled(data, 11) 130 - self.protection_key = content_tupled(data, 59, 62) if page_size_bit else None 131 - self.executed_disable = is_bit_defined_tupled(data, 63) 132 - self.address = address 133 - self.page_entry_binary_data = data 134 - self.page_hierarchy_level = level 116 + self.PAGE_END = self._PAGE_END(self.VA_BITS_MIN) 135 117 136 - def next_entry(self, va): 137 - if self.is_page or not self.entry_present[1]: 138 - return None 139 - 140 - next_level = self.page_hierarchy_level - 1 141 - return PageHierarchyEntry(entry_va(next_level, self.next_entry_physical_address, va), next_level) 142 - 143 - 144 - def mk_string(self): 145 - if not self.entry_present[1]: 146 - return f"""\ 147 - level {self.page_hierarchy_level}: 148 - {'entry address': <30} {hex(self.address)} 149 - {'page entry binary data': <30} {hex(self.page_entry_binary_data)} 150 - --- 151 - PAGE ENTRY IS NOT PRESENT! 152 - """ 153 - elif self.is_page: 154 - def page_size_line(ps_bit, ps, level): 155 - return "" if level == 1 else f"{'bit': <3} {ps_bit: <5} {'page size': <30} {ps}" 156 - 157 - return f"""\ 158 - level {self.page_hierarchy_level}: 159 - {'entry address': <30} {hex(self.address)} 160 - {'page entry binary data': <30} {hex(self.page_entry_binary_data)} 161 - {'page size': <30} {'1GB' if self.page_hierarchy_level == 3 else '2MB' if self.page_hierarchy_level == 2 else '4KB' if self.page_hierarchy_level == 1 else 'Unknown page size for level:' + self.page_hierarchy_level} 162 - {'page physical address': <30} {hex(self.page_physical_address)} 163 - --- 164 - {'bit': <4} {self.entry_present[0]: <10} {'entry present': <30} {self.entry_present[1]} 165 - {'bit': <4} {self.read_write[0]: <10} {'read/write access allowed': <30} {self.read_write[1]} 166 - {'bit': <4} {self.user_access_allowed[0]: <10} {'user access allowed': <30} {self.user_access_allowed[1]} 167 - {'bit': <4} {self.page_level_write_through[0]: <10} {'page level write through': <30} {self.page_level_write_through[1]} 168 - {'bit': <4} {self.page_level_cache_disabled[0]: <10} {'page level cache disabled': <30} {self.page_level_cache_disabled[1]} 169 - {'bit': <4} {self.entry_was_accessed[0]: <10} {'entry has been accessed': <30} {self.entry_was_accessed[1]} 170 - {"" if self.page_hierarchy_level == 1 else f"{'bit': <4} {self.page_size[0]: <10} {'page size': <30} {self.page_size[1]}"} 171 - {'bit': <4} {self.dirty[0]: <10} {'page dirty': <30} {self.dirty[1]} 172 - {'bit': <4} {self.global_translation[0]: <10} {'global translation': <30} {self.global_translation[1]} 173 - {'bit': <4} {self.hlat_restart_with_ordinary[0]: <10} {'restart to ordinary': <30} {self.hlat_restart_with_ordinary[1]} 174 - {'bit': <4} {self.pat[0]: <10} {'pat': <30} {self.pat[1]} 175 - {'bits': <4} {str(self.protection_key[0]): <10} {'protection key': <30} {self.protection_key[1]} 176 - {'bit': <4} {self.executed_disable[0]: <10} {'execute disable': <30} {self.executed_disable[1]} 177 - """ 118 + if constants.LX_CONFIG_NUMA and constants.LX_CONFIG_NODES_SHIFT: 119 + self.NODE_SHIFT = constants.LX_CONFIG_NODES_SHIFT 178 120 else: 179 - return f"""\ 180 - level {self.page_hierarchy_level}: 181 - {'entry address': <30} {hex(self.address)} 182 - {'page entry binary data': <30} {hex(self.page_entry_binary_data)} 183 - {'next entry physical address': <30} {hex(self.next_entry_physical_address)} 184 - --- 185 - {'bit': <4} {self.entry_present[0]: <10} {'entry present': <30} {self.entry_present[1]} 186 - {'bit': <4} {self.read_write[0]: <10} {'read/write access allowed': <30} {self.read_write[1]} 187 - {'bit': <4} {self.user_access_allowed[0]: <10} {'user access allowed': <30} {self.user_access_allowed[1]} 188 - {'bit': <4} {self.page_level_write_through[0]: <10} {'page level write through': <30} {self.page_level_write_through[1]} 189 - {'bit': <4} {self.page_level_cache_disabled[0]: <10} {'page level cache disabled': <30} {self.page_level_cache_disabled[1]} 190 - {'bit': <4} {self.entry_was_accessed[0]: <10} {'entry has been accessed': <30} {self.entry_was_accessed[1]} 191 - {'bit': <4} {self.page_size[0]: <10} {'page size': <30} {self.page_size[1]} 192 - {'bit': <4} {self.hlat_restart_with_ordinary[0]: <10} {'restart to ordinary': <30} {self.hlat_restart_with_ordinary[1]} 193 - {'bit': <4} {self.executed_disable[0]: <10} {'execute disable': <30} {self.executed_disable[1]} 194 - """ 121 + self.NODE_SHIFT = 0 195 122 123 + self.MAX_NUMNODES = 1 << self.NODE_SHIFT 196 124 197 - class TranslateVM(gdb.Command): 198 - """Prints the entire paging structure used to translate a given virtual address. 125 + def SECTION_NR_TO_ROOT(self, sec): 126 + return sec // self.SECTIONS_PER_ROOT 199 127 200 - Having an address space of the currently executed process translates the virtual address 201 - and prints detailed information of all paging structure levels used for the transaltion. 202 - Currently supported arch: x86""" 128 + def __nr_to_section(self, nr): 129 + root = self.SECTION_NR_TO_ROOT(nr) 130 + mem_section = gdb.parse_and_eval("mem_section") 131 + return mem_section[root][nr & self.SECTION_ROOT_MASK] 132 + 133 + def pfn_to_section_nr(self, pfn): 134 + return pfn >> self.PFN_SECTION_SHIFT 135 + 136 + def section_nr_to_pfn(self, sec): 137 + return sec << self.PFN_SECTION_SHIFT 138 + 139 + def __pfn_to_section(self, pfn): 140 + return self.__nr_to_section(self.pfn_to_section_nr(pfn)) 141 + 142 + def pfn_to_section(self, pfn): 143 + return self.__pfn_to_section(pfn) 144 + 145 + def subsection_map_index(self, pfn): 146 + return (pfn & ~(self.PAGE_SECTION_MASK)) // self.PAGES_PER_SUBSECTION 147 + 148 + def pfn_section_valid(self, ms, pfn): 149 + if constants.LX_CONFIG_SPARSEMEM_VMEMMAP: 150 + idx = self.subsection_map_index(pfn) 151 + return test_bit(idx, ms['usage']['subsection_map']) 152 + else: 153 + return True 154 + 155 + def valid_section(self, mem_section): 156 + if mem_section != None and (mem_section['section_mem_map'] & self.SECTION_HAS_MEM_MAP): 157 + return True 158 + return False 159 + 160 + def early_section(self, mem_section): 161 + if mem_section != None and (mem_section['section_mem_map'] & self.SECTION_IS_EARLY): 162 + return True 163 + return False 164 + 165 + def pfn_valid(self, pfn): 166 + ms = None 167 + if self.PHYS_PFN(self.PFN_PHYS(pfn)) != pfn: 168 + return False 169 + if self.pfn_to_section_nr(pfn) >= self.NR_MEM_SECTIONS: 170 + return False 171 + ms = self.__pfn_to_section(pfn) 172 + 173 + if not self.valid_section(ms): 174 + return False 175 + return self.early_section(ms) or self.pfn_section_valid(ms, pfn) 176 + 177 + def _PAGE_OFFSET(self, va): 178 + return (-(1 << (va))) & 0xffffffffffffffff 179 + 180 + def _PAGE_END(self, va): 181 + return (-(1 << (va - 1))) & 0xffffffffffffffff 182 + 183 + def kasan_reset_tag(self, addr): 184 + if constants.LX_CONFIG_KASAN_SW_TAGS or constants.LX_CONFIG_KASAN_HW_TAGS: 185 + return int(addr) | (0xff << 56) 186 + else: 187 + return addr 188 + 189 + def __is_lm_address(self, addr): 190 + if (addr - self.PAGE_OFFSET) < (self.PAGE_END - self.PAGE_OFFSET): 191 + return True 192 + else: 193 + return False 194 + def __lm_to_phys(self, addr): 195 + return addr - self.PAGE_OFFSET + self.PHYS_OFFSET 196 + 197 + def __kimg_to_phys(self, addr): 198 + return addr - self.kimage_voffset 199 + 200 + def __virt_to_phys_nodebug(self, va): 201 + untagged_va = self.kasan_reset_tag(va) 202 + if self.__is_lm_address(untagged_va): 203 + return self.__lm_to_phys(untagged_va) 204 + else: 205 + return self.__kimg_to_phys(untagged_va) 206 + 207 + def __virt_to_phys(self, va): 208 + if constants.LX_CONFIG_DEBUG_VIRTUAL: 209 + if not self.__is_lm_address(self.kasan_reset_tag(va)): 210 + raise gdb.GdbError("Warning: virt_to_phys used for non-linear address: 0x%lx\n" % va) 211 + return self.__virt_to_phys_nodebug(va) 212 + 213 + def virt_to_phys(self, va): 214 + return self.__virt_to_phys(va) 215 + 216 + def PFN_PHYS(self, pfn): 217 + return pfn << self.PAGE_SHIFT 218 + 219 + def PHYS_PFN(self, phys): 220 + return phys >> self.PAGE_SHIFT 221 + 222 + def __phys_to_virt(self, pa): 223 + return (pa - self.PHYS_OFFSET) | self.PAGE_OFFSET 224 + 225 + def __phys_to_pfn(self, pa): 226 + return self.PHYS_PFN(pa) 227 + 228 + def __pfn_to_phys(self, pfn): 229 + return self.PFN_PHYS(pfn) 230 + 231 + def __pa_symbol_nodebug(self, x): 232 + return self.__kimg_to_phys(x) 233 + 234 + def __phys_addr_symbol(self, x): 235 + if constants.LX_CONFIG_DEBUG_VIRTUAL: 236 + if x < self.KERNEL_START or x > self.KERNEL_END: 237 + raise gdb.GdbError("0x%x exceed kernel range" % x) 238 + return self.__pa_symbol_nodebug(x) 239 + 240 + def __pa_symbol(self, x): 241 + return self.__phys_addr_symbol(x) 242 + 243 + def __va(self, pa): 244 + return self.__phys_to_virt(pa) 245 + 246 + def pfn_to_kaddr(self, pfn): 247 + return self.__va(pfn << self.PAGE_SHIFT) 248 + 249 + def virt_to_pfn(self, va): 250 + return self.__phys_to_pfn(self.__virt_to_phys(va)) 251 + 252 + def sym_to_pfn(self, x): 253 + return self.__phys_to_pfn(self.__pa_symbol(x)) 254 + 255 + def page_to_pfn(self, page): 256 + return int(page.cast(utils.get_page_type().pointer()) - self.vmemmap.cast(utils.get_page_type().pointer())) 257 + 258 + def page_to_phys(self, page): 259 + return self.__pfn_to_phys(self.page_to_pfn(page)) 260 + 261 + def pfn_to_page(self, pfn): 262 + return (self.vmemmap + pfn).cast(utils.get_page_type().pointer()) 263 + 264 + def page_to_virt(self, page): 265 + if constants.LX_CONFIG_DEBUG_VIRTUAL: 266 + return self.__va(self.page_to_phys(page)) 267 + else: 268 + __idx = int((page.cast(gdb.lookup_type("unsigned long")) - self.VMEMMAP_START).cast(utils.get_ulong_type())) // self.struct_page_size 269 + return self.PAGE_OFFSET + (__idx * self.PAGE_SIZE) 270 + 271 + def virt_to_page(self, va): 272 + if constants.LX_CONFIG_DEBUG_VIRTUAL: 273 + return self.pfn_to_page(self.virt_to_pfn(va)) 274 + else: 275 + __idx = int(self.kasan_reset_tag(va) - self.PAGE_OFFSET) // self.PAGE_SIZE 276 + addr = self.VMEMMAP_START + (__idx * self.struct_page_size) 277 + return gdb.Value(addr).cast(utils.get_page_type().pointer()) 278 + 279 + def page_address(self, page): 280 + return self.page_to_virt(page) 281 + 282 + def folio_address(self, folio): 283 + return self.page_address(folio['page'].address) 284 + 285 + class LxPFN2Page(gdb.Command): 286 + """PFN to struct page""" 203 287 204 288 def __init__(self): 205 - super(TranslateVM, self).__init__('translate-vm', gdb.COMMAND_USER) 289 + super(LxPFN2Page, self).__init__("lx-pfn_to_page", gdb.COMMAND_USER) 206 290 207 291 def invoke(self, arg, from_tty): 208 - if utils.is_target_arch("x86"): 209 - vm_address = gdb.parse_and_eval(f'{arg}') 210 - cr3_data = gdb.parse_and_eval('$cr3') 211 - cr4 = gdb.parse_and_eval('$cr4') 212 - page_levels = 5 if cr4 & (1 << 12) else 4 213 - page_entry = Cr3(cr3_data, page_levels) 214 - while page_entry: 215 - gdb.write(page_entry.mk_string()) 216 - page_entry = page_entry.next_entry(vm_address) 217 - else: 218 - gdb.GdbError("Virtual address translation is not" 219 - "supported for this arch") 292 + argv = gdb.string_to_argv(arg) 293 + pfn = int(argv[0]) 294 + page = page_ops().ops.pfn_to_page(pfn) 295 + gdb.write("pfn_to_page(0x%x) = 0x%x\n" % (pfn, page)) 220 296 297 + LxPFN2Page() 221 298 222 - TranslateVM() 299 + class LxPage2PFN(gdb.Command): 300 + """struct page to PFN""" 301 + 302 + def __init__(self): 303 + super(LxPage2PFN, self).__init__("lx-page_to_pfn", gdb.COMMAND_USER) 304 + 305 + def invoke(self, arg, from_tty): 306 + argv = gdb.string_to_argv(arg) 307 + struct_page_addr = int(argv[0], 16) 308 + page = gdb.Value(struct_page_addr).cast(utils.get_page_type().pointer()) 309 + pfn = page_ops().ops.page_to_pfn(page) 310 + gdb.write("page_to_pfn(0x%x) = 0x%x\n" % (page, pfn)) 311 + 312 + LxPage2PFN() 313 + 314 + class LxPageAddress(gdb.Command): 315 + """struct page to linear mapping address""" 316 + 317 + def __init__(self): 318 + super(LxPageAddress, self).__init__("lx-page_address", gdb.COMMAND_USER) 319 + 320 + def invoke(self, arg, from_tty): 321 + argv = gdb.string_to_argv(arg) 322 + struct_page_addr = int(argv[0], 16) 323 + page = gdb.Value(struct_page_addr).cast(utils.get_page_type().pointer()) 324 + addr = page_ops().ops.page_address(page) 325 + gdb.write("page_address(0x%x) = 0x%x\n" % (page, addr)) 326 + 327 + LxPageAddress() 328 + 329 + class LxPage2Phys(gdb.Command): 330 + """struct page to physical address""" 331 + 332 + def __init__(self): 333 + super(LxPage2Phys, self).__init__("lx-page_to_phys", gdb.COMMAND_USER) 334 + 335 + def invoke(self, arg, from_tty): 336 + argv = gdb.string_to_argv(arg) 337 + struct_page_addr = int(argv[0], 16) 338 + page = gdb.Value(struct_page_addr).cast(utils.get_page_type().pointer()) 339 + phys_addr = page_ops().ops.page_to_phys(page) 340 + gdb.write("page_to_phys(0x%x) = 0x%x\n" % (page, phys_addr)) 341 + 342 + LxPage2Phys() 343 + 344 + class LxVirt2Phys(gdb.Command): 345 + """virtual address to physical address""" 346 + 347 + def __init__(self): 348 + super(LxVirt2Phys, self).__init__("lx-virt_to_phys", gdb.COMMAND_USER) 349 + 350 + def invoke(self, arg, from_tty): 351 + argv = gdb.string_to_argv(arg) 352 + linear_addr = int(argv[0], 16) 353 + phys_addr = page_ops().ops.virt_to_phys(linear_addr) 354 + gdb.write("virt_to_phys(0x%x) = 0x%x\n" % (linear_addr, phys_addr)) 355 + 356 + LxVirt2Phys() 357 + 358 + class LxVirt2Page(gdb.Command): 359 + """virtual address to struct page""" 360 + 361 + def __init__(self): 362 + super(LxVirt2Page, self).__init__("lx-virt_to_page", gdb.COMMAND_USER) 363 + 364 + def invoke(self, arg, from_tty): 365 + argv = gdb.string_to_argv(arg) 366 + linear_addr = int(argv[0], 16) 367 + page = page_ops().ops.virt_to_page(linear_addr) 368 + gdb.write("virt_to_page(0x%x) = 0x%x\n" % (linear_addr, page)) 369 + 370 + LxVirt2Page() 371 + 372 + class LxSym2PFN(gdb.Command): 373 + """symbol address to PFN""" 374 + 375 + def __init__(self): 376 + super(LxSym2PFN, self).__init__("lx-sym_to_pfn", gdb.COMMAND_USER) 377 + 378 + def invoke(self, arg, from_tty): 379 + argv = gdb.string_to_argv(arg) 380 + sym_addr = int(argv[0], 16) 381 + pfn = page_ops().ops.sym_to_pfn(sym_addr) 382 + gdb.write("sym_to_pfn(0x%x) = %d\n" % (sym_addr, pfn)) 383 + 384 + LxSym2PFN() 385 + 386 + class LxPFN2Kaddr(gdb.Command): 387 + """PFN to kernel address""" 388 + 389 + def __init__(self): 390 + super(LxPFN2Kaddr, self).__init__("lx-pfn_to_kaddr", gdb.COMMAND_USER) 391 + 392 + def invoke(self, arg, from_tty): 393 + argv = gdb.string_to_argv(arg) 394 + pfn = int(argv[0]) 395 + kaddr = page_ops().ops.pfn_to_kaddr(pfn) 396 + gdb.write("pfn_to_kaddr(%d) = 0x%x\n" % (pfn, kaddr)) 397 + 398 + LxPFN2Kaddr()
+222
scripts/gdb/linux/pgtable.py
··· 1 + # SPDX-License-Identifier: GPL-2.0-only 2 + # 3 + # gdb helper commands and functions for Linux kernel debugging 4 + # 5 + # routines to introspect page table 6 + # 7 + # Authors: 8 + # Dmitrii Bundin <dmitrii.bundin.a@gmail.com> 9 + # 10 + 11 + import gdb 12 + 13 + from linux import utils 14 + 15 + PHYSICAL_ADDRESS_MASK = gdb.parse_and_eval('0xfffffffffffff') 16 + 17 + 18 + def page_mask(level=1): 19 + # 4KB 20 + if level == 1: 21 + return gdb.parse_and_eval('(u64) ~0xfff') 22 + # 2MB 23 + elif level == 2: 24 + return gdb.parse_and_eval('(u64) ~0x1fffff') 25 + # 1GB 26 + elif level == 3: 27 + return gdb.parse_and_eval('(u64) ~0x3fffffff') 28 + else: 29 + raise Exception(f'Unknown page level: {level}') 30 + 31 + 32 + #page_offset_base in case CONFIG_DYNAMIC_MEMORY_LAYOUT is disabled 33 + POB_NO_DYNAMIC_MEM_LAYOUT = '0xffff888000000000' 34 + def _page_offset_base(): 35 + pob_symbol = gdb.lookup_global_symbol('page_offset_base') 36 + pob = pob_symbol.name if pob_symbol else POB_NO_DYNAMIC_MEM_LAYOUT 37 + return gdb.parse_and_eval(pob) 38 + 39 + 40 + def is_bit_defined_tupled(data, offset): 41 + return offset, bool(data >> offset & 1) 42 + 43 + def content_tupled(data, bit_start, bit_end): 44 + return (bit_start, bit_end), data >> bit_start & ((1 << (1 + bit_end - bit_start)) - 1) 45 + 46 + def entry_va(level, phys_addr, translating_va): 47 + def start_bit(level): 48 + if level == 5: 49 + return 48 50 + elif level == 4: 51 + return 39 52 + elif level == 3: 53 + return 30 54 + elif level == 2: 55 + return 21 56 + elif level == 1: 57 + return 12 58 + else: 59 + raise Exception(f'Unknown level {level}') 60 + 61 + entry_offset = ((translating_va >> start_bit(level)) & 511) * 8 62 + entry_va = _page_offset_base() + phys_addr + entry_offset 63 + return entry_va 64 + 65 + class Cr3(): 66 + def __init__(self, cr3, page_levels): 67 + self.cr3 = cr3 68 + self.page_levels = page_levels 69 + self.page_level_write_through = is_bit_defined_tupled(cr3, 3) 70 + self.page_level_cache_disabled = is_bit_defined_tupled(cr3, 4) 71 + self.next_entry_physical_address = cr3 & PHYSICAL_ADDRESS_MASK & page_mask() 72 + 73 + def next_entry(self, va): 74 + next_level = self.page_levels 75 + return PageHierarchyEntry(entry_va(next_level, self.next_entry_physical_address, va), next_level) 76 + 77 + def mk_string(self): 78 + return f"""\ 79 + cr3: 80 + {'cr3 binary data': <30} {hex(self.cr3)} 81 + {'next entry physical address': <30} {hex(self.next_entry_physical_address)} 82 + --- 83 + {'bit' : <4} {self.page_level_write_through[0]: <10} {'page level write through': <30} {self.page_level_write_through[1]} 84 + {'bit' : <4} {self.page_level_cache_disabled[0]: <10} {'page level cache disabled': <30} {self.page_level_cache_disabled[1]} 85 + """ 86 + 87 + 88 + class PageHierarchyEntry(): 89 + def __init__(self, address, level): 90 + data = int.from_bytes( 91 + memoryview(gdb.selected_inferior().read_memory(address, 8)), 92 + "little" 93 + ) 94 + if level == 1: 95 + self.is_page = True 96 + self.entry_present = is_bit_defined_tupled(data, 0) 97 + self.read_write = is_bit_defined_tupled(data, 1) 98 + self.user_access_allowed = is_bit_defined_tupled(data, 2) 99 + self.page_level_write_through = is_bit_defined_tupled(data, 3) 100 + self.page_level_cache_disabled = is_bit_defined_tupled(data, 4) 101 + self.entry_was_accessed = is_bit_defined_tupled(data, 5) 102 + self.dirty = is_bit_defined_tupled(data, 6) 103 + self.pat = is_bit_defined_tupled(data, 7) 104 + self.global_translation = is_bit_defined_tupled(data, 8) 105 + self.page_physical_address = data & PHYSICAL_ADDRESS_MASK & page_mask(level) 106 + self.next_entry_physical_address = None 107 + self.hlat_restart_with_ordinary = is_bit_defined_tupled(data, 11) 108 + self.protection_key = content_tupled(data, 59, 62) 109 + self.executed_disable = is_bit_defined_tupled(data, 63) 110 + else: 111 + page_size = is_bit_defined_tupled(data, 7) 112 + page_size_bit = page_size[1] 113 + self.is_page = page_size_bit 114 + self.entry_present = is_bit_defined_tupled(data, 0) 115 + self.read_write = is_bit_defined_tupled(data, 1) 116 + self.user_access_allowed = is_bit_defined_tupled(data, 2) 117 + self.page_level_write_through = is_bit_defined_tupled(data, 3) 118 + self.page_level_cache_disabled = is_bit_defined_tupled(data, 4) 119 + self.entry_was_accessed = is_bit_defined_tupled(data, 5) 120 + self.page_size = page_size 121 + self.dirty = is_bit_defined_tupled( 122 + data, 6) if page_size_bit else None 123 + self.global_translation = is_bit_defined_tupled( 124 + data, 8) if page_size_bit else None 125 + self.pat = is_bit_defined_tupled( 126 + data, 12) if page_size_bit else None 127 + self.page_physical_address = data & PHYSICAL_ADDRESS_MASK & page_mask(level) if page_size_bit else None 128 + self.next_entry_physical_address = None if page_size_bit else data & PHYSICAL_ADDRESS_MASK & page_mask() 129 + self.hlat_restart_with_ordinary = is_bit_defined_tupled(data, 11) 130 + self.protection_key = content_tupled(data, 59, 62) if page_size_bit else None 131 + self.executed_disable = is_bit_defined_tupled(data, 63) 132 + self.address = address 133 + self.page_entry_binary_data = data 134 + self.page_hierarchy_level = level 135 + 136 + def next_entry(self, va): 137 + if self.is_page or not self.entry_present[1]: 138 + return None 139 + 140 + next_level = self.page_hierarchy_level - 1 141 + return PageHierarchyEntry(entry_va(next_level, self.next_entry_physical_address, va), next_level) 142 + 143 + 144 + def mk_string(self): 145 + if not self.entry_present[1]: 146 + return f"""\ 147 + level {self.page_hierarchy_level}: 148 + {'entry address': <30} {hex(self.address)} 149 + {'page entry binary data': <30} {hex(self.page_entry_binary_data)} 150 + --- 151 + PAGE ENTRY IS NOT PRESENT! 152 + """ 153 + elif self.is_page: 154 + def page_size_line(ps_bit, ps, level): 155 + return "" if level == 1 else f"{'bit': <3} {ps_bit: <5} {'page size': <30} {ps}" 156 + 157 + return f"""\ 158 + level {self.page_hierarchy_level}: 159 + {'entry address': <30} {hex(self.address)} 160 + {'page entry binary data': <30} {hex(self.page_entry_binary_data)} 161 + {'page size': <30} {'1GB' if self.page_hierarchy_level == 3 else '2MB' if self.page_hierarchy_level == 2 else '4KB' if self.page_hierarchy_level == 1 else 'Unknown page size for level:' + self.page_hierarchy_level} 162 + {'page physical address': <30} {hex(self.page_physical_address)} 163 + --- 164 + {'bit': <4} {self.entry_present[0]: <10} {'entry present': <30} {self.entry_present[1]} 165 + {'bit': <4} {self.read_write[0]: <10} {'read/write access allowed': <30} {self.read_write[1]} 166 + {'bit': <4} {self.user_access_allowed[0]: <10} {'user access allowed': <30} {self.user_access_allowed[1]} 167 + {'bit': <4} {self.page_level_write_through[0]: <10} {'page level write through': <30} {self.page_level_write_through[1]} 168 + {'bit': <4} {self.page_level_cache_disabled[0]: <10} {'page level cache disabled': <30} {self.page_level_cache_disabled[1]} 169 + {'bit': <4} {self.entry_was_accessed[0]: <10} {'entry has been accessed': <30} {self.entry_was_accessed[1]} 170 + {"" if self.page_hierarchy_level == 1 else f"{'bit': <4} {self.page_size[0]: <10} {'page size': <30} {self.page_size[1]}"} 171 + {'bit': <4} {self.dirty[0]: <10} {'page dirty': <30} {self.dirty[1]} 172 + {'bit': <4} {self.global_translation[0]: <10} {'global translation': <30} {self.global_translation[1]} 173 + {'bit': <4} {self.hlat_restart_with_ordinary[0]: <10} {'restart to ordinary': <30} {self.hlat_restart_with_ordinary[1]} 174 + {'bit': <4} {self.pat[0]: <10} {'pat': <30} {self.pat[1]} 175 + {'bits': <4} {str(self.protection_key[0]): <10} {'protection key': <30} {self.protection_key[1]} 176 + {'bit': <4} {self.executed_disable[0]: <10} {'execute disable': <30} {self.executed_disable[1]} 177 + """ 178 + else: 179 + return f"""\ 180 + level {self.page_hierarchy_level}: 181 + {'entry address': <30} {hex(self.address)} 182 + {'page entry binary data': <30} {hex(self.page_entry_binary_data)} 183 + {'next entry physical address': <30} {hex(self.next_entry_physical_address)} 184 + --- 185 + {'bit': <4} {self.entry_present[0]: <10} {'entry present': <30} {self.entry_present[1]} 186 + {'bit': <4} {self.read_write[0]: <10} {'read/write access allowed': <30} {self.read_write[1]} 187 + {'bit': <4} {self.user_access_allowed[0]: <10} {'user access allowed': <30} {self.user_access_allowed[1]} 188 + {'bit': <4} {self.page_level_write_through[0]: <10} {'page level write through': <30} {self.page_level_write_through[1]} 189 + {'bit': <4} {self.page_level_cache_disabled[0]: <10} {'page level cache disabled': <30} {self.page_level_cache_disabled[1]} 190 + {'bit': <4} {self.entry_was_accessed[0]: <10} {'entry has been accessed': <30} {self.entry_was_accessed[1]} 191 + {'bit': <4} {self.page_size[0]: <10} {'page size': <30} {self.page_size[1]} 192 + {'bit': <4} {self.hlat_restart_with_ordinary[0]: <10} {'restart to ordinary': <30} {self.hlat_restart_with_ordinary[1]} 193 + {'bit': <4} {self.executed_disable[0]: <10} {'execute disable': <30} {self.executed_disable[1]} 194 + """ 195 + 196 + 197 + class TranslateVM(gdb.Command): 198 + """Prints the entire paging structure used to translate a given virtual address. 199 + 200 + Having an address space of the currently executed process translates the virtual address 201 + and prints detailed information of all paging structure levels used for the transaltion. 202 + Currently supported arch: x86""" 203 + 204 + def __init__(self): 205 + super(TranslateVM, self).__init__('translate-vm', gdb.COMMAND_USER) 206 + 207 + def invoke(self, arg, from_tty): 208 + if utils.is_target_arch("x86"): 209 + vm_address = gdb.parse_and_eval(f'{arg}') 210 + cr3_data = gdb.parse_and_eval('$cr3') 211 + cr4 = gdb.parse_and_eval('$cr4') 212 + page_levels = 5 if cr4 & (1 << 12) else 4 213 + page_entry = Cr3(cr3_data, page_levels) 214 + while page_entry: 215 + gdb.write(page_entry.mk_string()) 216 + page_entry = page_entry.next_entry(vm_address) 217 + else: 218 + gdb.GdbError("Virtual address translation is not" 219 + "supported for this arch") 220 + 221 + 222 + TranslateVM()
+2 -1
scripts/gdb/vmlinux-gdb.py
··· 41 41 import linux.genpd 42 42 import linux.device 43 43 import linux.vfs 44 - import linux.mm 44 + import linux.pgtable 45 45 import linux.radixtree 46 46 import linux.interrupts 47 + import linux.mm