qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio
at master 646 lines 23 kB view raw
1/* 2 * internal execution defines for qemu 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20#ifndef EXEC_ALL_H 21#define EXEC_ALL_H 22 23#include "cpu.h" 24#include "exec/tb-context.h" 25#ifdef CONFIG_TCG 26#include "exec/cpu_ldst.h" 27#endif 28#include "sysemu/cpus.h" 29 30/* allow to see translation results - the slowdown should be negligible, so we leave it */ 31#define DEBUG_DISAS 32 33/* Page tracking code uses ram addresses in system mode, and virtual 34 addresses in userspace mode. Define tb_page_addr_t to be an appropriate 35 type. */ 36#if defined(CONFIG_USER_ONLY) 37typedef abi_ulong tb_page_addr_t; 38#define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx 39#else 40typedef ram_addr_t tb_page_addr_t; 41#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT 42#endif 43 44#include "qemu/log.h" 45 46void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns); 47void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb, 48 target_ulong *data); 49 50void cpu_gen_init(void); 51 52/** 53 * cpu_restore_state: 54 * @cpu: the vCPU state is to be restore to 55 * @searched_pc: the host PC the fault occurred at 56 * @will_exit: true if the TB executed will be interrupted after some 57 cpu adjustments. Required for maintaining the correct 58 icount valus 59 * @return: true if state was restored, false otherwise 60 * 61 * Attempt to restore the state for a fault occurring in translated 62 * code. If the searched_pc is not in translated code no state is 63 * restored and the function returns false. 64 */ 65bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit); 66 67void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu); 68void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); 69TranslationBlock *tb_gen_code(CPUState *cpu, 70 target_ulong pc, target_ulong cs_base, 71 uint32_t flags, 72 int cflags); 73 74void QEMU_NORETURN cpu_loop_exit(CPUState *cpu); 75void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); 76void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); 77 78/** 79 * cpu_loop_exit_requested: 80 * @cpu: The CPU state to be tested 81 * 82 * Indicate if somebody asked for a return of the CPU to the main loop 83 * (e.g., via cpu_exit() or cpu_interrupt()). 84 * 85 * This is helpful for architectures that support interruptible 86 * instructions. After writing back all state to registers/memory, this 87 * call can be used to check if it makes sense to return to the main loop 88 * or to continue executing the interruptible instruction. 89 */ 90static inline bool cpu_loop_exit_requested(CPUState *cpu) 91{ 92 return (int32_t)atomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0; 93} 94 95#if !defined(CONFIG_USER_ONLY) 96void cpu_reloading_memory_map(void); 97/** 98 * cpu_address_space_init: 99 * @cpu: CPU to add this address space to 100 * @asidx: integer index of this address space 101 * @prefix: prefix to be used as name of address space 102 * @mr: the root memory region of address space 103 * 104 * Add the specified address space to the CPU's cpu_ases list. 105 * The address space added with @asidx 0 is the one used for the 106 * convenience pointer cpu->as. 107 * The target-specific code which registers ASes is responsible 108 * for defining what semantics address space 0, 1, 2, etc have. 109 * 110 * Before the first call to this function, the caller must set 111 * cpu->num_ases to the total number of address spaces it needs 112 * to support. 113 * 114 * Note that with KVM only one address space is supported. 115 */ 116void cpu_address_space_init(CPUState *cpu, int asidx, 117 const char *prefix, MemoryRegion *mr); 118#endif 119 120#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) 121/* cputlb.c */ 122/** 123 * tlb_init - initialize a CPU's TLB 124 * @cpu: CPU whose TLB should be initialized 125 */ 126void tlb_init(CPUState *cpu); 127/** 128 * tlb_destroy - destroy a CPU's TLB 129 * @cpu: CPU whose TLB should be destroyed 130 */ 131void tlb_destroy(CPUState *cpu); 132/** 133 * tlb_flush_page: 134 * @cpu: CPU whose TLB should be flushed 135 * @addr: virtual address of page to be flushed 136 * 137 * Flush one page from the TLB of the specified CPU, for all 138 * MMU indexes. 139 */ 140void tlb_flush_page(CPUState *cpu, target_ulong addr); 141/** 142 * tlb_flush_page_all_cpus: 143 * @cpu: src CPU of the flush 144 * @addr: virtual address of page to be flushed 145 * 146 * Flush one page from the TLB of the specified CPU, for all 147 * MMU indexes. 148 */ 149void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr); 150/** 151 * tlb_flush_page_all_cpus_synced: 152 * @cpu: src CPU of the flush 153 * @addr: virtual address of page to be flushed 154 * 155 * Flush one page from the TLB of the specified CPU, for all MMU 156 * indexes like tlb_flush_page_all_cpus except the source vCPUs work 157 * is scheduled as safe work meaning all flushes will be complete once 158 * the source vCPUs safe work is complete. This will depend on when 159 * the guests translation ends the TB. 160 */ 161void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr); 162/** 163 * tlb_flush: 164 * @cpu: CPU whose TLB should be flushed 165 * 166 * Flush the entire TLB for the specified CPU. Most CPU architectures 167 * allow the implementation to drop entries from the TLB at any time 168 * so this is generally safe. If more selective flushing is required 169 * use one of the other functions for efficiency. 170 */ 171void tlb_flush(CPUState *cpu); 172/** 173 * tlb_flush_all_cpus: 174 * @cpu: src CPU of the flush 175 */ 176void tlb_flush_all_cpus(CPUState *src_cpu); 177/** 178 * tlb_flush_all_cpus_synced: 179 * @cpu: src CPU of the flush 180 * 181 * Like tlb_flush_all_cpus except this except the source vCPUs work is 182 * scheduled as safe work meaning all flushes will be complete once 183 * the source vCPUs safe work is complete. This will depend on when 184 * the guests translation ends the TB. 185 */ 186void tlb_flush_all_cpus_synced(CPUState *src_cpu); 187/** 188 * tlb_flush_page_by_mmuidx: 189 * @cpu: CPU whose TLB should be flushed 190 * @addr: virtual address of page to be flushed 191 * @idxmap: bitmap of MMU indexes to flush 192 * 193 * Flush one page from the TLB of the specified CPU, for the specified 194 * MMU indexes. 195 */ 196void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, 197 uint16_t idxmap); 198/** 199 * tlb_flush_page_by_mmuidx_all_cpus: 200 * @cpu: Originating CPU of the flush 201 * @addr: virtual address of page to be flushed 202 * @idxmap: bitmap of MMU indexes to flush 203 * 204 * Flush one page from the TLB of all CPUs, for the specified 205 * MMU indexes. 206 */ 207void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, 208 uint16_t idxmap); 209/** 210 * tlb_flush_page_by_mmuidx_all_cpus_synced: 211 * @cpu: Originating CPU of the flush 212 * @addr: virtual address of page to be flushed 213 * @idxmap: bitmap of MMU indexes to flush 214 * 215 * Flush one page from the TLB of all CPUs, for the specified MMU 216 * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source 217 * vCPUs work is scheduled as safe work meaning all flushes will be 218 * complete once the source vCPUs safe work is complete. This will 219 * depend on when the guests translation ends the TB. 220 */ 221void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr, 222 uint16_t idxmap); 223/** 224 * tlb_flush_by_mmuidx: 225 * @cpu: CPU whose TLB should be flushed 226 * @wait: If true ensure synchronisation by exiting the cpu_loop 227 * @idxmap: bitmap of MMU indexes to flush 228 * 229 * Flush all entries from the TLB of the specified CPU, for the specified 230 * MMU indexes. 231 */ 232void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); 233/** 234 * tlb_flush_by_mmuidx_all_cpus: 235 * @cpu: Originating CPU of the flush 236 * @idxmap: bitmap of MMU indexes to flush 237 * 238 * Flush all entries from all TLBs of all CPUs, for the specified 239 * MMU indexes. 240 */ 241void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap); 242/** 243 * tlb_flush_by_mmuidx_all_cpus_synced: 244 * @cpu: Originating CPU of the flush 245 * @idxmap: bitmap of MMU indexes to flush 246 * 247 * Flush all entries from all TLBs of all CPUs, for the specified 248 * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source 249 * vCPUs work is scheduled as safe work meaning all flushes will be 250 * complete once the source vCPUs safe work is complete. This will 251 * depend on when the guests translation ends the TB. 252 */ 253void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); 254/** 255 * tlb_set_page_with_attrs: 256 * @cpu: CPU to add this TLB entry for 257 * @vaddr: virtual address of page to add entry for 258 * @paddr: physical address of the page 259 * @attrs: memory transaction attributes 260 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) 261 * @mmu_idx: MMU index to insert TLB entry for 262 * @size: size of the page in bytes 263 * 264 * Add an entry to this CPU's TLB (a mapping from virtual address 265 * @vaddr to physical address @paddr) with the specified memory 266 * transaction attributes. This is generally called by the target CPU 267 * specific code after it has been called through the tlb_fill() 268 * entry point and performed a successful page table walk to find 269 * the physical address and attributes for the virtual address 270 * which provoked the TLB miss. 271 * 272 * At most one entry for a given virtual address is permitted. Only a 273 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only 274 * used by tlb_flush_page. 275 */ 276void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, 277 hwaddr paddr, MemTxAttrs attrs, 278 int prot, int mmu_idx, target_ulong size); 279/* tlb_set_page: 280 * 281 * This function is equivalent to calling tlb_set_page_with_attrs() 282 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided 283 * as a convenience for CPUs which don't use memory transaction attributes. 284 */ 285void tlb_set_page(CPUState *cpu, target_ulong vaddr, 286 hwaddr paddr, int prot, 287 int mmu_idx, target_ulong size); 288#else 289static inline void tlb_init(CPUState *cpu) 290{ 291} 292static inline void tlb_destroy(CPUState *cpu) 293{ 294} 295static inline void tlb_flush_page(CPUState *cpu, target_ulong addr) 296{ 297} 298static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) 299{ 300} 301static inline void tlb_flush_page_all_cpus_synced(CPUState *src, 302 target_ulong addr) 303{ 304} 305static inline void tlb_flush(CPUState *cpu) 306{ 307} 308static inline void tlb_flush_all_cpus(CPUState *src_cpu) 309{ 310} 311static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu) 312{ 313} 314static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, 315 target_ulong addr, uint16_t idxmap) 316{ 317} 318 319static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 320{ 321} 322static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, 323 target_ulong addr, 324 uint16_t idxmap) 325{ 326} 327static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, 328 target_ulong addr, 329 uint16_t idxmap) 330{ 331} 332static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap) 333{ 334} 335 336static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, 337 uint16_t idxmap) 338{ 339} 340#endif 341/** 342 * probe_access: 343 * @env: CPUArchState 344 * @addr: guest virtual address to look up 345 * @size: size of the access 346 * @access_type: read, write or execute permission 347 * @mmu_idx: MMU index to use for lookup 348 * @retaddr: return address for unwinding 349 * 350 * Look up the guest virtual address @addr. Raise an exception if the 351 * page does not satisfy @access_type. Raise an exception if the 352 * access (@addr, @size) hits a watchpoint. For writes, mark a clean 353 * page as dirty. 354 * 355 * Finally, return the host address for a page that is backed by RAM, 356 * or NULL if the page requires I/O. 357 */ 358void *probe_access(CPUArchState *env, target_ulong addr, int size, 359 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); 360 361static inline void *probe_write(CPUArchState *env, target_ulong addr, int size, 362 int mmu_idx, uintptr_t retaddr) 363{ 364 return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); 365} 366 367static inline void *probe_read(CPUArchState *env, target_ulong addr, int size, 368 int mmu_idx, uintptr_t retaddr) 369{ 370 return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); 371} 372 373/** 374 * probe_access_flags: 375 * @env: CPUArchState 376 * @addr: guest virtual address to look up 377 * @access_type: read, write or execute permission 378 * @mmu_idx: MMU index to use for lookup 379 * @nonfault: suppress the fault 380 * @phost: return value for host address 381 * @retaddr: return address for unwinding 382 * 383 * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for 384 * the page, and storing the host address for RAM in @phost. 385 * 386 * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK. 387 * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags. 388 * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags. 389 * For simplicity, all "mmio-like" flags are folded to TLB_MMIO. 390 */ 391int probe_access_flags(CPUArchState *env, target_ulong addr, 392 MMUAccessType access_type, int mmu_idx, 393 bool nonfault, void **phost, uintptr_t retaddr); 394 395#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ 396 397/* Estimated block size for TB allocation. */ 398/* ??? The following is based on a 2015 survey of x86_64 host output. 399 Better would seem to be some sort of dynamically sized TB array, 400 adapting to the block sizes actually being produced. */ 401#if defined(CONFIG_SOFTMMU) 402#define CODE_GEN_AVG_BLOCK_SIZE 400 403#else 404#define CODE_GEN_AVG_BLOCK_SIZE 150 405#endif 406 407/* 408 * Translation Cache-related fields of a TB. 409 * This struct exists just for convenience; we keep track of TB's in a binary 410 * search tree, and the only fields needed to compare TB's in the tree are 411 * @ptr and @size. 412 * Note: the address of search data can be obtained by adding @size to @ptr. 413 */ 414struct tb_tc { 415 void *ptr; /* pointer to the translated code */ 416 size_t size; 417}; 418 419struct TranslationBlock { 420 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ 421 target_ulong cs_base; /* CS base for this block */ 422 uint32_t flags; /* flags defining in which context the code was generated */ 423 uint16_t size; /* size of target code for this block (1 <= 424 size <= TARGET_PAGE_SIZE) */ 425 uint16_t icount; 426 uint32_t cflags; /* compile flags */ 427#define CF_COUNT_MASK 0x00007fff 428#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */ 429#define CF_NOCACHE 0x00010000 /* To be freed after execution */ 430#define CF_USE_ICOUNT 0x00020000 431#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */ 432#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */ 433#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */ 434#define CF_CLUSTER_SHIFT 24 435/* cflags' mask for hashing/comparison */ 436#define CF_HASH_MASK \ 437 (CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL | CF_CLUSTER_MASK) 438 439 /* Per-vCPU dynamic tracing state used to generate this TB */ 440 uint32_t trace_vcpu_dstate; 441 442 struct tb_tc tc; 443 444 /* original tb when cflags has CF_NOCACHE */ 445 struct TranslationBlock *orig_tb; 446 /* first and second physical page containing code. The lower bit 447 of the pointer tells the index in page_next[]. 448 The list is protected by the TB's page('s) lock(s) */ 449 uintptr_t page_next[2]; 450 tb_page_addr_t page_addr[2]; 451 452 /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */ 453 QemuSpin jmp_lock; 454 455 /* The following data are used to directly call another TB from 456 * the code of this one. This can be done either by emitting direct or 457 * indirect native jump instructions. These jumps are reset so that the TB 458 * just continues its execution. The TB can be linked to another one by 459 * setting one of the jump targets (or patching the jump instruction). Only 460 * two of such jumps are supported. 461 */ 462 uint16_t jmp_reset_offset[2]; /* offset of original jump target */ 463#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */ 464 uintptr_t jmp_target_arg[2]; /* target address or offset */ 465 466 /* 467 * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps. 468 * Each TB can have two outgoing jumps, and therefore can participate 469 * in two lists. The list entries are kept in jmp_list_next[2]. The least 470 * significant bit (LSB) of the pointers in these lists is used to encode 471 * which of the two list entries is to be used in the pointed TB. 472 * 473 * List traversals are protected by jmp_lock. The destination TB of each 474 * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock 475 * can be acquired from any origin TB. 476 * 477 * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is 478 * being invalidated, so that no further outgoing jumps from it can be set. 479 * 480 * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained 481 * to a destination TB that has CF_INVALID set. 482 */ 483 uintptr_t jmp_list_head; 484 uintptr_t jmp_list_next[2]; 485 uintptr_t jmp_dest[2]; 486}; 487 488extern bool parallel_cpus; 489 490/* Hide the atomic_read to make code a little easier on the eyes */ 491static inline uint32_t tb_cflags(const TranslationBlock *tb) 492{ 493 return atomic_read(&tb->cflags); 494} 495 496/* current cflags for hashing/comparison */ 497static inline uint32_t curr_cflags(void) 498{ 499 return (parallel_cpus ? CF_PARALLEL : 0) 500 | (use_icount ? CF_USE_ICOUNT : 0); 501} 502 503/* TranslationBlock invalidate API */ 504#if defined(CONFIG_USER_ONLY) 505void tb_invalidate_phys_addr(target_ulong addr); 506void tb_invalidate_phys_range(target_ulong start, target_ulong end); 507#else 508void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs); 509#endif 510void tb_flush(CPUState *cpu); 511void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); 512TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, 513 target_ulong cs_base, uint32_t flags, 514 uint32_t cf_mask); 515void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); 516 517/* GETPC is the true target of the return instruction that we'll execute. */ 518#if defined(CONFIG_TCG_INTERPRETER) 519extern uintptr_t tci_tb_ptr; 520# define GETPC() tci_tb_ptr 521#else 522# define GETPC() \ 523 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) 524#endif 525 526/* The true return address will often point to a host insn that is part of 527 the next translated guest insn. Adjust the address backward to point to 528 the middle of the call insn. Subtracting one would do the job except for 529 several compressed mode architectures (arm, mips) which set the low bit 530 to indicate the compressed mode; subtracting two works around that. It 531 is also the case that there are no host isas that contain a call insn 532 smaller than 4 bytes, so we don't worry about special-casing this. */ 533#define GETPC_ADJ 2 534 535#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG) 536void assert_no_pages_locked(void); 537#else 538static inline void assert_no_pages_locked(void) 539{ 540} 541#endif 542 543#if !defined(CONFIG_USER_ONLY) 544 545/** 546 * iotlb_to_section: 547 * @cpu: CPU performing the access 548 * @index: TCG CPU IOTLB entry 549 * 550 * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that 551 * it refers to. @index will have been initially created and returned 552 * by memory_region_section_get_iotlb(). 553 */ 554struct MemoryRegionSection *iotlb_to_section(CPUState *cpu, 555 hwaddr index, MemTxAttrs attrs); 556#endif 557 558#if defined(CONFIG_USER_ONLY) 559void mmap_lock(void); 560void mmap_unlock(void); 561bool have_mmap_lock(void); 562 563/** 564 * get_page_addr_code() - user-mode version 565 * @env: CPUArchState 566 * @addr: guest virtual address of guest code 567 * 568 * Returns @addr. 569 */ 570static inline tb_page_addr_t get_page_addr_code(CPUArchState *env, 571 target_ulong addr) 572{ 573 return addr; 574} 575 576/** 577 * get_page_addr_code_hostp() - user-mode version 578 * @env: CPUArchState 579 * @addr: guest virtual address of guest code 580 * 581 * Returns @addr. 582 * 583 * If @hostp is non-NULL, sets *@hostp to the host address where @addr's content 584 * is kept. 585 */ 586static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, 587 target_ulong addr, 588 void **hostp) 589{ 590 if (hostp) { 591 *hostp = g2h(addr); 592 } 593 return addr; 594} 595#else 596static inline void mmap_lock(void) {} 597static inline void mmap_unlock(void) {} 598 599/** 600 * get_page_addr_code() - full-system version 601 * @env: CPUArchState 602 * @addr: guest virtual address of guest code 603 * 604 * If we cannot translate and execute from the entire RAM page, or if 605 * the region is not backed by RAM, returns -1. Otherwise, returns the 606 * ram_addr_t corresponding to the guest code at @addr. 607 * 608 * Note: this function can trigger an exception. 609 */ 610tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr); 611 612/** 613 * get_page_addr_code_hostp() - full-system version 614 * @env: CPUArchState 615 * @addr: guest virtual address of guest code 616 * 617 * See get_page_addr_code() (full-system version) for documentation on the 618 * return value. 619 * 620 * Sets *@hostp (when @hostp is non-NULL) as follows. 621 * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp 622 * to the host address where @addr's content is kept. 623 * 624 * Note: this function can trigger an exception. 625 */ 626tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, 627 void **hostp); 628 629void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); 630void tlb_set_dirty(CPUState *cpu, target_ulong vaddr); 631 632/* exec.c */ 633void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr); 634 635MemoryRegionSection * 636address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, 637 hwaddr *xlat, hwaddr *plen, 638 MemTxAttrs attrs, int *prot); 639hwaddr memory_region_section_get_iotlb(CPUState *cpu, 640 MemoryRegionSection *section); 641#endif 642 643/* vl.c */ 644extern int singlestep; 645 646#endif