at v2.6.17 2128 lines 72 kB view raw
1#ifndef _SPARC64_HYPERVISOR_H 2#define _SPARC64_HYPERVISOR_H 3 4/* Sun4v hypervisor interfaces and defines. 5 * 6 * Hypervisor calls are made via traps to software traps number 0x80 7 * and above. Registers %o0 to %o5 serve as argument, status, and 8 * return value registers. 9 * 10 * There are two kinds of these traps. First there are the normal 11 * "fast traps" which use software trap 0x80 and encode the function 12 * to invoke by number in register %o5. Argument and return value 13 * handling is as follows: 14 * 15 * ----------------------------------------------- 16 * | %o5 | function number | undefined | 17 * | %o0 | argument 0 | return status | 18 * | %o1 | argument 1 | return value 1 | 19 * | %o2 | argument 2 | return value 2 | 20 * | %o3 | argument 3 | return value 3 | 21 * | %o4 | argument 4 | return value 4 | 22 * ----------------------------------------------- 23 * 24 * The second type are "hyper-fast traps" which encode the function 25 * number in the software trap number itself. So these use trap 26 * numbers > 0x80. The register usage for hyper-fast traps is as 27 * follows: 28 * 29 * ----------------------------------------------- 30 * | %o0 | argument 0 | return status | 31 * | %o1 | argument 1 | return value 1 | 32 * | %o2 | argument 2 | return value 2 | 33 * | %o3 | argument 3 | return value 3 | 34 * | %o4 | argument 4 | return value 4 | 35 * ----------------------------------------------- 36 * 37 * Registers providing explicit arguments to the hypervisor calls 38 * are volatile across the call. Upon return their values are 39 * undefined unless explicitly specified as containing a particular 40 * return value by the specific call. The return status is always 41 * returned in register %o0, zero indicates a successful execution of 42 * the hypervisor call and other values indicate an error status as 43 * defined below. So, for example, if a hyper-fast trap takes 44 * arguments 0, 1, and 2, then %o0, %o1, and %o2 are volatile across 45 * the call and %o3, %o4, and %o5 would be preserved. 46 * 47 * If the hypervisor trap is invalid, or the fast trap function number 48 * is invalid, HV_EBADTRAP will be returned in %o0. Also, all 64-bits 49 * of the argument and return values are significant. 50 */ 51 52/* Trap numbers. */ 53#define HV_FAST_TRAP 0x80 54#define HV_MMU_MAP_ADDR_TRAP 0x83 55#define HV_MMU_UNMAP_ADDR_TRAP 0x84 56#define HV_TTRACE_ADDENTRY_TRAP 0x85 57#define HV_CORE_TRAP 0xff 58 59/* Error codes. */ 60#define HV_EOK 0 /* Successful return */ 61#define HV_ENOCPU 1 /* Invalid CPU id */ 62#define HV_ENORADDR 2 /* Invalid real address */ 63#define HV_ENOINTR 3 /* Invalid interrupt id */ 64#define HV_EBADPGSZ 4 /* Invalid pagesize encoding */ 65#define HV_EBADTSB 5 /* Invalid TSB description */ 66#define HV_EINVAL 6 /* Invalid argument */ 67#define HV_EBADTRAP 7 /* Invalid function number */ 68#define HV_EBADALIGN 8 /* Invalid address alignment */ 69#define HV_EWOULDBLOCK 9 /* Cannot complete w/o blocking */ 70#define HV_ENOACCESS 10 /* No access to resource */ 71#define HV_EIO 11 /* I/O error */ 72#define HV_ECPUERROR 12 /* CPU in error state */ 73#define HV_ENOTSUPPORTED 13 /* Function not supported */ 74#define HV_ENOMAP 14 /* No mapping found */ 75#define HV_ETOOMANY 15 /* Too many items specified */ 76 77/* mach_exit() 78 * TRAP: HV_FAST_TRAP 79 * FUNCTION: HV_FAST_MACH_EXIT 80 * ARG0: exit code 81 * ERRORS: This service does not return. 82 * 83 * Stop all CPUs in the virtual domain and place them into the stopped 84 * state. The 64-bit exit code may be passed to a service entity as 85 * the domain's exit status. On systems without a service entity, the 86 * domain will undergo a reset, and the boot firmware will be 87 * reloaded. 88 * 89 * This function will never return to the guest that invokes it. 90 * 91 * Note: By convention an exit code of zero denotes a successful exit by 92 * the guest code. A non-zero exit code denotes a guest specific 93 * error indication. 94 * 95 */ 96#define HV_FAST_MACH_EXIT 0x00 97 98/* Domain services. */ 99 100/* mach_desc() 101 * TRAP: HV_FAST_TRAP 102 * FUNCTION: HV_FAST_MACH_DESC 103 * ARG0: buffer 104 * ARG1: length 105 * RET0: status 106 * RET1: length 107 * ERRORS: HV_EBADALIGN Buffer is badly aligned 108 * HV_ENORADDR Buffer is to an illegal real address. 109 * HV_EINVAL Buffer length is too small for complete 110 * machine description. 111 * 112 * Copy the most current machine description into the buffer indicated 113 * by the real address in ARG0. The buffer provided must be 16 byte 114 * aligned. Upon success or HV_EINVAL, this service returns the 115 * actual size of the machine description in the RET1 return value. 116 * 117 * Note: A method of determining the appropriate buffer size for the 118 * machine description is to first call this service with a buffer 119 * length of 0 bytes. 120 */ 121#define HV_FAST_MACH_DESC 0x01 122 123/* mach_exit() 124 * TRAP: HV_FAST_TRAP 125 * FUNCTION: HV_FAST_MACH_SIR 126 * ERRORS: This service does not return. 127 * 128 * Perform a software initiated reset of the virtual machine domain. 129 * All CPUs are captured as soon as possible, all hardware devices are 130 * returned to the entry default state, and the domain is restarted at 131 * the SIR (trap type 0x04) real trap table (RTBA) entry point on one 132 * of the CPUs. The single CPU restarted is selected as determined by 133 * platform specific policy. Memory is preserved across this 134 * operation. 135 */ 136#define HV_FAST_MACH_SIR 0x02 137 138/* mach_set_soft_state() 139 * TRAP: HV_FAST_TRAP 140 * FUNCTION: HV_FAST_MACH_SET_SOFT_STATE 141 * ARG0: software state 142 * ARG1: software state description pointer 143 * RET0: status 144 * ERRORS: EINVAL software state not valid or software state 145 * description is not NULL terminated 146 * ENORADDR software state description pointer is not a 147 * valid real address 148 * EBADALIGNED software state description is not correctly 149 * aligned 150 * 151 * This allows the guest to report it's soft state to the hypervisor. There 152 * are two primary components to this state. The first part states whether 153 * the guest software is running or not. The second containts optional 154 * details specific to the software. 155 * 156 * The software state argument is defined below in HV_SOFT_STATE_*, and 157 * indicates whether the guest is operating normally or in a transitional 158 * state. 159 * 160 * The software state description argument is a real address of a data buffer 161 * of size 32-bytes aligned on a 32-byte boundary. It is treated as a NULL 162 * terminated 7-bit ASCII string of up to 31 characters not including the 163 * NULL termination. 164 */ 165#define HV_FAST_MACH_SET_SOFT_STATE 0x03 166#define HV_SOFT_STATE_NORMAL 0x01 167#define HV_SOFT_STATE_TRANSITION 0x02 168 169/* mach_get_soft_state() 170 * TRAP: HV_FAST_TRAP 171 * FUNCTION: HV_FAST_MACH_GET_SOFT_STATE 172 * ARG0: software state description pointer 173 * RET0: status 174 * RET1: software state 175 * ERRORS: ENORADDR software state description pointer is not a 176 * valid real address 177 * EBADALIGNED software state description is not correctly 178 * aligned 179 * 180 * Retrieve the current value of the guest's software state. The rules 181 * for the software state pointer are the same as for mach_set_soft_state() 182 * above. 183 */ 184#define HV_FAST_MACH_GET_SOFT_STATE 0x04 185 186/* CPU services. 187 * 188 * CPUs represent devices that can execute software threads. A single 189 * chip that contains multiple cores or strands is represented as 190 * multiple CPUs with unique CPU identifiers. CPUs are exported to 191 * OBP via the machine description (and to the OS via the OBP device 192 * tree). CPUs are always in one of three states: stopped, running, 193 * or error. 194 * 195 * A CPU ID is a pre-assigned 16-bit value that uniquely identifies a 196 * CPU within a logical domain. Operations that are to be performed 197 * on multiple CPUs specify them via a CPU list. A CPU list is an 198 * array in real memory, of which each 16-bit word is a CPU ID. CPU 199 * lists are passed through the API as two arguments. The first is 200 * the number of entries (16-bit words) in the CPU list, and the 201 * second is the (real address) pointer to the CPU ID list. 202 */ 203 204/* cpu_start() 205 * TRAP: HV_FAST_TRAP 206 * FUNCTION: HV_FAST_CPU_START 207 * ARG0: CPU ID 208 * ARG1: PC 209 * ARG1: RTBA 210 * ARG1: target ARG0 211 * RET0: status 212 * ERRORS: ENOCPU Invalid CPU ID 213 * EINVAL Target CPU ID is not in the stopped state 214 * ENORADDR Invalid PC or RTBA real address 215 * EBADALIGN Unaligned PC or unaligned RTBA 216 * EWOULDBLOCK Starting resources are not available 217 * 218 * Start CPU with given CPU ID with PC in %pc and with a real trap 219 * base address value of RTBA. The indicated CPU must be in the 220 * stopped state. The supplied RTBA must be aligned on a 256 byte 221 * boundary. On successful completion, the specified CPU will be in 222 * the running state and will be supplied with "target ARG0" in %o0 223 * and RTBA in %tba. 224 */ 225#define HV_FAST_CPU_START 0x10 226 227/* cpu_stop() 228 * TRAP: HV_FAST_TRAP 229 * FUNCTION: HV_FAST_CPU_STOP 230 * ARG0: CPU ID 231 * RET0: status 232 * ERRORS: ENOCPU Invalid CPU ID 233 * EINVAL Target CPU ID is the current cpu 234 * EINVAL Target CPU ID is not in the running state 235 * EWOULDBLOCK Stopping resources are not available 236 * ENOTSUPPORTED Not supported on this platform 237 * 238 * The specified CPU is stopped. The indicated CPU must be in the 239 * running state. On completion, it will be in the stopped state. It 240 * is not legal to stop the current CPU. 241 * 242 * Note: As this service cannot be used to stop the current cpu, this service 243 * may not be used to stop the last running CPU in a domain. To stop 244 * and exit a running domain, a guest must use the mach_exit() service. 245 */ 246#define HV_FAST_CPU_STOP 0x11 247 248/* cpu_yield() 249 * TRAP: HV_FAST_TRAP 250 * FUNCTION: HV_FAST_CPU_YIELD 251 * RET0: status 252 * ERRORS: No possible error. 253 * 254 * Suspend execution on the current CPU. Execution will resume when 255 * an interrupt (device, %stick_compare, or cross-call) is targeted to 256 * the CPU. On some CPUs, this API may be used by the hypervisor to 257 * save power by disabling hardware strands. 258 */ 259#define HV_FAST_CPU_YIELD 0x12 260 261#ifndef __ASSEMBLY__ 262extern unsigned long sun4v_cpu_yield(void); 263#endif 264 265/* cpu_qconf() 266 * TRAP: HV_FAST_TRAP 267 * FUNCTION: HV_FAST_CPU_QCONF 268 * ARG0: queue 269 * ARG1: base real address 270 * ARG2: number of entries 271 * RET0: status 272 * ERRORS: ENORADDR Invalid base real address 273 * EINVAL Invalid queue or number of entries is less 274 * than 2 or too large. 275 * EBADALIGN Base real address is not correctly aligned 276 * for size. 277 * 278 * Configure the given queue to be placed at the given base real 279 * address, with the given number of entries. The number of entries 280 * must be a power of 2. The base real address must be aligned 281 * exactly to match the queue size. Each queue entry is 64 bytes 282 * long, so for example a 32 entry queue must be aligned on a 2048 283 * byte real address boundary. 284 * 285 * The specified queue is unconfigured if the number of entries is given 286 * as zero. 287 * 288 * For the current version of this API service, the argument queue is defined 289 * as follows: 290 * 291 * queue description 292 * ----- ------------------------- 293 * 0x3c cpu mondo queue 294 * 0x3d device mondo queue 295 * 0x3e resumable error queue 296 * 0x3f non-resumable error queue 297 * 298 * Note: The maximum number of entries for each queue for a specific cpu may 299 * be determined from the machine description. 300 */ 301#define HV_FAST_CPU_QCONF 0x14 302#define HV_CPU_QUEUE_CPU_MONDO 0x3c 303#define HV_CPU_QUEUE_DEVICE_MONDO 0x3d 304#define HV_CPU_QUEUE_RES_ERROR 0x3e 305#define HV_CPU_QUEUE_NONRES_ERROR 0x3f 306 307#ifndef __ASSEMBLY__ 308extern unsigned long sun4v_cpu_qconf(unsigned long type, 309 unsigned long queue_paddr, 310 unsigned long num_queue_entries); 311#endif 312 313/* cpu_qinfo() 314 * TRAP: HV_FAST_TRAP 315 * FUNCTION: HV_FAST_CPU_QINFO 316 * ARG0: queue 317 * RET0: status 318 * RET1: base real address 319 * RET1: number of entries 320 * ERRORS: EINVAL Invalid queue 321 * 322 * Return the configuration info for the given queue. The base real 323 * address and number of entries of the defined queue are returned. 324 * The queue argument values are the same as for cpu_qconf() above. 325 * 326 * If the specified queue is a valid queue number, but no queue has 327 * been defined, the number of entries will be set to zero and the 328 * base real address returned is undefined. 329 */ 330#define HV_FAST_CPU_QINFO 0x15 331 332/* cpu_mondo_send() 333 * TRAP: HV_FAST_TRAP 334 * FUNCTION: HV_FAST_CPU_MONDO_SEND 335 * ARG0-1: CPU list 336 * ARG2: data real address 337 * RET0: status 338 * ERRORS: EBADALIGN Mondo data is not 64-byte aligned or CPU list 339 * is not 2-byte aligned. 340 * ENORADDR Invalid data mondo address, or invalid cpu list 341 * address. 342 * ENOCPU Invalid cpu in CPU list 343 * EWOULDBLOCK Some or all of the listed CPUs did not receive 344 * the mondo 345 * ECPUERROR One or more of the listed CPUs are in error 346 * state, use HV_FAST_CPU_STATE to see which ones 347 * EINVAL CPU list includes caller's CPU ID 348 * 349 * Send a mondo interrupt to the CPUs in the given CPU list with the 350 * 64-bytes at the given data real address. The data must be 64-byte 351 * aligned. The mondo data will be delivered to the cpu_mondo queues 352 * of the recipient CPUs. 353 * 354 * In all cases, error or not, the CPUs in the CPU list to which the 355 * mondo has been successfully delivered will be indicated by having 356 * their entry in CPU list updated with the value 0xffff. 357 */ 358#define HV_FAST_CPU_MONDO_SEND 0x42 359 360#ifndef __ASSEMBLY__ 361extern unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count, unsigned long cpu_list_pa, unsigned long mondo_block_pa); 362#endif 363 364/* cpu_myid() 365 * TRAP: HV_FAST_TRAP 366 * FUNCTION: HV_FAST_CPU_MYID 367 * RET0: status 368 * RET1: CPU ID 369 * ERRORS: No errors defined. 370 * 371 * Return the hypervisor ID handle for the current CPU. Use by a 372 * virtual CPU to discover it's own identity. 373 */ 374#define HV_FAST_CPU_MYID 0x16 375 376/* cpu_state() 377 * TRAP: HV_FAST_TRAP 378 * FUNCTION: HV_FAST_CPU_STATE 379 * ARG0: CPU ID 380 * RET0: status 381 * RET1: state 382 * ERRORS: ENOCPU Invalid CPU ID 383 * 384 * Retrieve the current state of the CPU with the given CPU ID. 385 */ 386#define HV_FAST_CPU_STATE 0x17 387#define HV_CPU_STATE_STOPPED 0x01 388#define HV_CPU_STATE_RUNNING 0x02 389#define HV_CPU_STATE_ERROR 0x03 390 391#ifndef __ASSEMBLY__ 392extern long sun4v_cpu_state(unsigned long cpuid); 393#endif 394 395/* cpu_set_rtba() 396 * TRAP: HV_FAST_TRAP 397 * FUNCTION: HV_FAST_CPU_SET_RTBA 398 * ARG0: RTBA 399 * RET0: status 400 * RET1: previous RTBA 401 * ERRORS: ENORADDR Invalid RTBA real address 402 * EBADALIGN RTBA is incorrectly aligned for a trap table 403 * 404 * Set the real trap base address of the local cpu to the given RTBA. 405 * The supplied RTBA must be aligned on a 256 byte boundary. Upon 406 * success the previous value of the RTBA is returned in RET1. 407 * 408 * Note: This service does not affect %tba 409 */ 410#define HV_FAST_CPU_SET_RTBA 0x18 411 412/* cpu_set_rtba() 413 * TRAP: HV_FAST_TRAP 414 * FUNCTION: HV_FAST_CPU_GET_RTBA 415 * RET0: status 416 * RET1: previous RTBA 417 * ERRORS: No possible error. 418 * 419 * Returns the current value of RTBA in RET1. 420 */ 421#define HV_FAST_CPU_GET_RTBA 0x19 422 423/* MMU services. 424 * 425 * Layout of a TSB description for mmu_tsb_ctx{,non}0() calls. 426 */ 427#ifndef __ASSEMBLY__ 428struct hv_tsb_descr { 429 unsigned short pgsz_idx; 430 unsigned short assoc; 431 unsigned int num_ttes; /* in TTEs */ 432 unsigned int ctx_idx; 433 unsigned int pgsz_mask; 434 unsigned long tsb_base; 435 unsigned long resv; 436}; 437#endif 438#define HV_TSB_DESCR_PGSZ_IDX_OFFSET 0x00 439#define HV_TSB_DESCR_ASSOC_OFFSET 0x02 440#define HV_TSB_DESCR_NUM_TTES_OFFSET 0x04 441#define HV_TSB_DESCR_CTX_IDX_OFFSET 0x08 442#define HV_TSB_DESCR_PGSZ_MASK_OFFSET 0x0c 443#define HV_TSB_DESCR_TSB_BASE_OFFSET 0x10 444#define HV_TSB_DESCR_RESV_OFFSET 0x18 445 446/* Page size bitmask. */ 447#define HV_PGSZ_MASK_8K (1 << 0) 448#define HV_PGSZ_MASK_64K (1 << 1) 449#define HV_PGSZ_MASK_512K (1 << 2) 450#define HV_PGSZ_MASK_4MB (1 << 3) 451#define HV_PGSZ_MASK_32MB (1 << 4) 452#define HV_PGSZ_MASK_256MB (1 << 5) 453#define HV_PGSZ_MASK_2GB (1 << 6) 454#define HV_PGSZ_MASK_16GB (1 << 7) 455 456/* Page size index. The value given in the TSB descriptor must correspond 457 * to the smallest page size specified in the pgsz_mask page size bitmask. 458 */ 459#define HV_PGSZ_IDX_8K 0 460#define HV_PGSZ_IDX_64K 1 461#define HV_PGSZ_IDX_512K 2 462#define HV_PGSZ_IDX_4MB 3 463#define HV_PGSZ_IDX_32MB 4 464#define HV_PGSZ_IDX_256MB 5 465#define HV_PGSZ_IDX_2GB 6 466#define HV_PGSZ_IDX_16GB 7 467 468/* MMU fault status area. 469 * 470 * MMU related faults have their status and fault address information 471 * placed into a memory region made available by privileged code. Each 472 * virtual processor must make a mmu_fault_area_conf() call to tell the 473 * hypervisor where that processor's fault status should be stored. 474 * 475 * The fault status block is a multiple of 64-bytes and must be aligned 476 * on a 64-byte boundary. 477 */ 478#ifndef __ASSEMBLY__ 479struct hv_fault_status { 480 unsigned long i_fault_type; 481 unsigned long i_fault_addr; 482 unsigned long i_fault_ctx; 483 unsigned long i_reserved[5]; 484 unsigned long d_fault_type; 485 unsigned long d_fault_addr; 486 unsigned long d_fault_ctx; 487 unsigned long d_reserved[5]; 488}; 489#endif 490#define HV_FAULT_I_TYPE_OFFSET 0x00 491#define HV_FAULT_I_ADDR_OFFSET 0x08 492#define HV_FAULT_I_CTX_OFFSET 0x10 493#define HV_FAULT_D_TYPE_OFFSET 0x40 494#define HV_FAULT_D_ADDR_OFFSET 0x48 495#define HV_FAULT_D_CTX_OFFSET 0x50 496 497#define HV_FAULT_TYPE_FAST_MISS 1 498#define HV_FAULT_TYPE_FAST_PROT 2 499#define HV_FAULT_TYPE_MMU_MISS 3 500#define HV_FAULT_TYPE_INV_RA 4 501#define HV_FAULT_TYPE_PRIV_VIOL 5 502#define HV_FAULT_TYPE_PROT_VIOL 6 503#define HV_FAULT_TYPE_NFO 7 504#define HV_FAULT_TYPE_NFO_SEFF 8 505#define HV_FAULT_TYPE_INV_VA 9 506#define HV_FAULT_TYPE_INV_ASI 10 507#define HV_FAULT_TYPE_NC_ATOMIC 11 508#define HV_FAULT_TYPE_PRIV_ACT 12 509#define HV_FAULT_TYPE_RESV1 13 510#define HV_FAULT_TYPE_UNALIGNED 14 511#define HV_FAULT_TYPE_INV_PGSZ 15 512/* Values 16 --> -2 are reserved. */ 513#define HV_FAULT_TYPE_MULTIPLE -1 514 515/* Flags argument for mmu_{map,unmap}_addr(), mmu_demap_{page,context,all}(), 516 * and mmu_{map,unmap}_perm_addr(). 517 */ 518#define HV_MMU_DMMU 0x01 519#define HV_MMU_IMMU 0x02 520#define HV_MMU_ALL (HV_MMU_DMMU | HV_MMU_IMMU) 521 522/* mmu_map_addr() 523 * TRAP: HV_MMU_MAP_ADDR_TRAP 524 * ARG0: virtual address 525 * ARG1: mmu context 526 * ARG2: TTE 527 * ARG3: flags (HV_MMU_{IMMU,DMMU}) 528 * ERRORS: EINVAL Invalid virtual address, mmu context, or flags 529 * EBADPGSZ Invalid page size value 530 * ENORADDR Invalid real address in TTE 531 * 532 * Create a non-permanent mapping using the given TTE, virtual 533 * address, and mmu context. The flags argument determines which 534 * (data, or instruction, or both) TLB the mapping gets loaded into. 535 * 536 * The behavior is undefined if the valid bit is clear in the TTE. 537 * 538 * Note: This API call is for privileged code to specify temporary translation 539 * mappings without the need to create and manage a TSB. 540 */ 541 542/* mmu_unmap_addr() 543 * TRAP: HV_MMU_UNMAP_ADDR_TRAP 544 * ARG0: virtual address 545 * ARG1: mmu context 546 * ARG2: flags (HV_MMU_{IMMU,DMMU}) 547 * ERRORS: EINVAL Invalid virtual address, mmu context, or flags 548 * 549 * Demaps the given virtual address in the given mmu context on this 550 * CPU. This function is intended to be used to demap pages mapped 551 * with mmu_map_addr. This service is equivalent to invoking 552 * mmu_demap_page() with only the current CPU in the CPU list. The 553 * flags argument determines which (data, or instruction, or both) TLB 554 * the mapping gets unmapped from. 555 * 556 * Attempting to perform an unmap operation for a previously defined 557 * permanent mapping will have undefined results. 558 */ 559 560/* mmu_tsb_ctx0() 561 * TRAP: HV_FAST_TRAP 562 * FUNCTION: HV_FAST_MMU_TSB_CTX0 563 * ARG0: number of TSB descriptions 564 * ARG1: TSB descriptions pointer 565 * RET0: status 566 * ERRORS: ENORADDR Invalid TSB descriptions pointer or 567 * TSB base within a descriptor 568 * EBADALIGN TSB descriptions pointer is not aligned 569 * to an 8-byte boundary, or TSB base 570 * within a descriptor is not aligned for 571 * the given TSB size 572 * EBADPGSZ Invalid page size in a TSB descriptor 573 * EBADTSB Invalid associativity or size in a TSB 574 * descriptor 575 * EINVAL Invalid number of TSB descriptions, or 576 * invalid context index in a TSB 577 * descriptor, or index page size not 578 * equal to smallest page size in page 579 * size bitmask field. 580 * 581 * Configures the TSBs for the current CPU for virtual addresses with 582 * context zero. The TSB descriptions pointer is a pointer to an 583 * array of the given number of TSB descriptions. 584 * 585 * Note: The maximum number of TSBs available to a virtual CPU is given by the 586 * mmu-max-#tsbs property of the cpu's corresponding "cpu" node in the 587 * machine description. 588 */ 589#define HV_FAST_MMU_TSB_CTX0 0x20 590 591/* mmu_tsb_ctxnon0() 592 * TRAP: HV_FAST_TRAP 593 * FUNCTION: HV_FAST_MMU_TSB_CTXNON0 594 * ARG0: number of TSB descriptions 595 * ARG1: TSB descriptions pointer 596 * RET0: status 597 * ERRORS: Same as for mmu_tsb_ctx0() above. 598 * 599 * Configures the TSBs for the current CPU for virtual addresses with 600 * non-zero contexts. The TSB descriptions pointer is a pointer to an 601 * array of the given number of TSB descriptions. 602 * 603 * Note: A maximum of 16 TSBs may be specified in the TSB description list. 604 */ 605#define HV_FAST_MMU_TSB_CTXNON0 0x21 606 607/* mmu_demap_page() 608 * TRAP: HV_FAST_TRAP 609 * FUNCTION: HV_FAST_MMU_DEMAP_PAGE 610 * ARG0: reserved, must be zero 611 * ARG1: reserved, must be zero 612 * ARG2: virtual address 613 * ARG3: mmu context 614 * ARG4: flags (HV_MMU_{IMMU,DMMU}) 615 * RET0: status 616 * ERRORS: EINVAL Invalid virutal address, context, or 617 * flags value 618 * ENOTSUPPORTED ARG0 or ARG1 is non-zero 619 * 620 * Demaps any page mapping of the given virtual address in the given 621 * mmu context for the current virtual CPU. Any virtually tagged 622 * caches are guaranteed to be kept consistent. The flags argument 623 * determines which TLB (instruction, or data, or both) participate in 624 * the operation. 625 * 626 * ARG0 and ARG1 are both reserved and must be set to zero. 627 */ 628#define HV_FAST_MMU_DEMAP_PAGE 0x22 629 630/* mmu_demap_ctx() 631 * TRAP: HV_FAST_TRAP 632 * FUNCTION: HV_FAST_MMU_DEMAP_CTX 633 * ARG0: reserved, must be zero 634 * ARG1: reserved, must be zero 635 * ARG2: mmu context 636 * ARG3: flags (HV_MMU_{IMMU,DMMU}) 637 * RET0: status 638 * ERRORS: EINVAL Invalid context or flags value 639 * ENOTSUPPORTED ARG0 or ARG1 is non-zero 640 * 641 * Demaps all non-permanent virtual page mappings previously specified 642 * for the given context for the current virtual CPU. Any virtual 643 * tagged caches are guaranteed to be kept consistent. The flags 644 * argument determines which TLB (instruction, or data, or both) 645 * participate in the operation. 646 * 647 * ARG0 and ARG1 are both reserved and must be set to zero. 648 */ 649#define HV_FAST_MMU_DEMAP_CTX 0x23 650 651/* mmu_demap_all() 652 * TRAP: HV_FAST_TRAP 653 * FUNCTION: HV_FAST_MMU_DEMAP_ALL 654 * ARG0: reserved, must be zero 655 * ARG1: reserved, must be zero 656 * ARG2: flags (HV_MMU_{IMMU,DMMU}) 657 * RET0: status 658 * ERRORS: EINVAL Invalid flags value 659 * ENOTSUPPORTED ARG0 or ARG1 is non-zero 660 * 661 * Demaps all non-permanent virtual page mappings previously specified 662 * for the current virtual CPU. Any virtual tagged caches are 663 * guaranteed to be kept consistent. The flags argument determines 664 * which TLB (instruction, or data, or both) participate in the 665 * operation. 666 * 667 * ARG0 and ARG1 are both reserved and must be set to zero. 668 */ 669#define HV_FAST_MMU_DEMAP_ALL 0x24 670 671/* mmu_map_perm_addr() 672 * TRAP: HV_FAST_TRAP 673 * FUNCTION: HV_FAST_MMU_MAP_PERM_ADDR 674 * ARG0: virtual address 675 * ARG1: reserved, must be zero 676 * ARG2: TTE 677 * ARG3: flags (HV_MMU_{IMMU,DMMU}) 678 * RET0: status 679 * ERRORS: EINVAL Invalid virutal address or flags value 680 * EBADPGSZ Invalid page size value 681 * ENORADDR Invalid real address in TTE 682 * ETOOMANY Too many mappings (max of 8 reached) 683 * 684 * Create a permanent mapping using the given TTE and virtual address 685 * for context 0 on the calling virtual CPU. A maximum of 8 such 686 * permanent mappings may be specified by privileged code. Mappings 687 * may be removed with mmu_unmap_perm_addr(). 688 * 689 * The behavior is undefined if a TTE with the valid bit clear is given. 690 * 691 * Note: This call is used to specify address space mappings for which 692 * privileged code does not expect to receive misses. For example, 693 * this mechanism can be used to map kernel nucleus code and data. 694 */ 695#define HV_FAST_MMU_MAP_PERM_ADDR 0x25 696 697/* mmu_fault_area_conf() 698 * TRAP: HV_FAST_TRAP 699 * FUNCTION: HV_FAST_MMU_FAULT_AREA_CONF 700 * ARG0: real address 701 * RET0: status 702 * RET1: previous mmu fault area real address 703 * ERRORS: ENORADDR Invalid real address 704 * EBADALIGN Invalid alignment for fault area 705 * 706 * Configure the MMU fault status area for the calling CPU. A 64-byte 707 * aligned real address specifies where MMU fault status information 708 * is placed. The return value is the previously specified area, or 0 709 * for the first invocation. Specifying a fault area at real address 710 * 0 is not allowed. 711 */ 712#define HV_FAST_MMU_FAULT_AREA_CONF 0x26 713 714/* mmu_enable() 715 * TRAP: HV_FAST_TRAP 716 * FUNCTION: HV_FAST_MMU_ENABLE 717 * ARG0: enable flag 718 * ARG1: return target address 719 * RET0: status 720 * ERRORS: ENORADDR Invalid real address when disabling 721 * translation. 722 * EBADALIGN The return target address is not 723 * aligned to an instruction. 724 * EINVAL The enable flag request the current 725 * operating mode (e.g. disable if already 726 * disabled) 727 * 728 * Enable or disable virtual address translation for the calling CPU 729 * within the virtual machine domain. If the enable flag is zero, 730 * translation is disabled, any non-zero value will enable 731 * translation. 732 * 733 * When this function returns, the newly selected translation mode 734 * will be active. If the mmu is being enabled, then the return 735 * target address is a virtual address else it is a real address. 736 * 737 * Upon successful completion, control will be returned to the given 738 * return target address (ie. the cpu will jump to that address). On 739 * failure, the previous mmu mode remains and the trap simply returns 740 * as normal with the appropriate error code in RET0. 741 */ 742#define HV_FAST_MMU_ENABLE 0x27 743 744/* mmu_unmap_perm_addr() 745 * TRAP: HV_FAST_TRAP 746 * FUNCTION: HV_FAST_MMU_UNMAP_PERM_ADDR 747 * ARG0: virtual address 748 * ARG1: reserved, must be zero 749 * ARG2: flags (HV_MMU_{IMMU,DMMU}) 750 * RET0: status 751 * ERRORS: EINVAL Invalid virutal address or flags value 752 * ENOMAP Specified mapping was not found 753 * 754 * Demaps any permanent page mapping (established via 755 * mmu_map_perm_addr()) at the given virtual address for context 0 on 756 * the current virtual CPU. Any virtual tagged caches are guaranteed 757 * to be kept consistent. 758 */ 759#define HV_FAST_MMU_UNMAP_PERM_ADDR 0x28 760 761/* mmu_tsb_ctx0_info() 762 * TRAP: HV_FAST_TRAP 763 * FUNCTION: HV_FAST_MMU_TSB_CTX0_INFO 764 * ARG0: max TSBs 765 * ARG1: buffer pointer 766 * RET0: status 767 * RET1: number of TSBs 768 * ERRORS: EINVAL Supplied buffer is too small 769 * EBADALIGN The buffer pointer is badly aligned 770 * ENORADDR Invalid real address for buffer pointer 771 * 772 * Return the TSB configuration as previous defined by mmu_tsb_ctx0() 773 * into the provided buffer. The size of the buffer is given in ARG1 774 * in terms of the number of TSB description entries. 775 * 776 * Upon return, RET1 always contains the number of TSB descriptions 777 * previously configured. If zero TSBs were configured, EOK is 778 * returned with RET1 containing 0. 779 */ 780#define HV_FAST_MMU_TSB_CTX0_INFO 0x29 781 782/* mmu_tsb_ctxnon0_info() 783 * TRAP: HV_FAST_TRAP 784 * FUNCTION: HV_FAST_MMU_TSB_CTXNON0_INFO 785 * ARG0: max TSBs 786 * ARG1: buffer pointer 787 * RET0: status 788 * RET1: number of TSBs 789 * ERRORS: EINVAL Supplied buffer is too small 790 * EBADALIGN The buffer pointer is badly aligned 791 * ENORADDR Invalid real address for buffer pointer 792 * 793 * Return the TSB configuration as previous defined by 794 * mmu_tsb_ctxnon0() into the provided buffer. The size of the buffer 795 * is given in ARG1 in terms of the number of TSB description entries. 796 * 797 * Upon return, RET1 always contains the number of TSB descriptions 798 * previously configured. If zero TSBs were configured, EOK is 799 * returned with RET1 containing 0. 800 */ 801#define HV_FAST_MMU_TSB_CTXNON0_INFO 0x2a 802 803/* mmu_fault_area_info() 804 * TRAP: HV_FAST_TRAP 805 * FUNCTION: HV_FAST_MMU_FAULT_AREA_INFO 806 * RET0: status 807 * RET1: fault area real address 808 * ERRORS: No errors defined. 809 * 810 * Return the currently defined MMU fault status area for the current 811 * CPU. The real address of the fault status area is returned in 812 * RET1, or 0 is returned in RET1 if no fault status area is defined. 813 * 814 * Note: mmu_fault_area_conf() may be called with the return value (RET1) 815 * from this service if there is a need to save and restore the fault 816 * area for a cpu. 817 */ 818#define HV_FAST_MMU_FAULT_AREA_INFO 0x2b 819 820/* Cache and Memory services. */ 821 822/* mem_scrub() 823 * TRAP: HV_FAST_TRAP 824 * FUNCTION: HV_FAST_MEM_SCRUB 825 * ARG0: real address 826 * ARG1: length 827 * RET0: status 828 * RET1: length scrubbed 829 * ERRORS: ENORADDR Invalid real address 830 * EBADALIGN Start address or length are not correctly 831 * aligned 832 * EINVAL Length is zero 833 * 834 * Zero the memory contents in the range real address to real address 835 * plus length minus 1. Also, valid ECC will be generated for that 836 * memory address range. Scrubbing is started at the given real 837 * address, but may not scrub the entire given length. The actual 838 * length scrubbed will be returned in RET1. 839 * 840 * The real address and length must be aligned on an 8K boundary, or 841 * contain the start address and length from a sun4v error report. 842 * 843 * Note: There are two uses for this function. The first use is to block clear 844 * and initialize memory and the second is to scrub an u ncorrectable 845 * error reported via a resumable or non-resumable trap. The second 846 * use requires the arguments to be equal to the real address and length 847 * provided in a sun4v memory error report. 848 */ 849#define HV_FAST_MEM_SCRUB 0x31 850 851/* mem_sync() 852 * TRAP: HV_FAST_TRAP 853 * FUNCTION: HV_FAST_MEM_SYNC 854 * ARG0: real address 855 * ARG1: length 856 * RET0: status 857 * RET1: length synced 858 * ERRORS: ENORADDR Invalid real address 859 * EBADALIGN Start address or length are not correctly 860 * aligned 861 * EINVAL Length is zero 862 * 863 * Force the next access within the real address to real address plus 864 * length minus 1 to be fetches from main system memory. Less than 865 * the given length may be synced, the actual amount synced is 866 * returned in RET1. The real address and length must be aligned on 867 * an 8K boundary. 868 */ 869#define HV_FAST_MEM_SYNC 0x32 870 871/* Time of day services. 872 * 873 * The hypervisor maintains the time of day on a per-domain basis. 874 * Changing the time of day in one domain does not affect the time of 875 * day on any other domain. 876 * 877 * Time is described by a single unsigned 64-bit word which is the 878 * number of seconds since the UNIX Epoch (00:00:00 UTC, January 1, 879 * 1970). 880 */ 881 882/* tod_get() 883 * TRAP: HV_FAST_TRAP 884 * FUNCTION: HV_FAST_TOD_GET 885 * RET0: status 886 * RET1: TOD 887 * ERRORS: EWOULDBLOCK TOD resource is temporarily unavailable 888 * ENOTSUPPORTED If TOD not supported on this platform 889 * 890 * Return the current time of day. May block if TOD access is 891 * temporarily not possible. 892 */ 893#define HV_FAST_TOD_GET 0x50 894 895/* tod_set() 896 * TRAP: HV_FAST_TRAP 897 * FUNCTION: HV_FAST_TOD_SET 898 * ARG0: TOD 899 * RET0: status 900 * ERRORS: EWOULDBLOCK TOD resource is temporarily unavailable 901 * ENOTSUPPORTED If TOD not supported on this platform 902 * 903 * The current time of day is set to the value specified in ARG0. May 904 * block if TOD access is temporarily not possible. 905 */ 906#define HV_FAST_TOD_SET 0x51 907 908/* Console services */ 909 910/* con_getchar() 911 * TRAP: HV_FAST_TRAP 912 * FUNCTION: HV_FAST_CONS_GETCHAR 913 * RET0: status 914 * RET1: character 915 * ERRORS: EWOULDBLOCK No character available. 916 * 917 * Returns a character from the console device. If no character is 918 * available then an EWOULDBLOCK error is returned. If a character is 919 * available, then the returned status is EOK and the character value 920 * is in RET1. 921 * 922 * A virtual BREAK is represented by the 64-bit value -1. 923 * 924 * A virtual HUP signal is represented by the 64-bit value -2. 925 */ 926#define HV_FAST_CONS_GETCHAR 0x60 927 928/* con_putchar() 929 * TRAP: HV_FAST_TRAP 930 * FUNCTION: HV_FAST_CONS_PUTCHAR 931 * ARG0: character 932 * RET0: status 933 * ERRORS: EINVAL Illegal character 934 * EWOULDBLOCK Output buffer currently full, would block 935 * 936 * Send a character to the console device. Only character values 937 * between 0 and 255 may be used. Values outside this range are 938 * invalid except for the 64-bit value -1 which is used to send a 939 * virtual BREAK. 940 */ 941#define HV_FAST_CONS_PUTCHAR 0x61 942 943/* Trap trace services. 944 * 945 * The hypervisor provides a trap tracing capability for privileged 946 * code running on each virtual CPU. Privileged code provides a 947 * round-robin trap trace queue within which the hypervisor writes 948 * 64-byte entries detailing hyperprivileged traps taken n behalf of 949 * privileged code. This is provided as a debugging capability for 950 * privileged code. 951 * 952 * The trap trace control structure is 64-bytes long and placed at the 953 * start (offset 0) of the trap trace buffer, and is described as 954 * follows: 955 */ 956#ifndef __ASSEMBLY__ 957struct hv_trap_trace_control { 958 unsigned long head_offset; 959 unsigned long tail_offset; 960 unsigned long __reserved[0x30 / sizeof(unsigned long)]; 961}; 962#endif 963#define HV_TRAP_TRACE_CTRL_HEAD_OFFSET 0x00 964#define HV_TRAP_TRACE_CTRL_TAIL_OFFSET 0x08 965 966/* The head offset is the offset of the most recently completed entry 967 * in the trap-trace buffer. The tail offset is the offset of the 968 * next entry to be written. The control structure is owned and 969 * modified by the hypervisor. A guest may not modify the control 970 * structure contents. Attempts to do so will result in undefined 971 * behavior for the guest. 972 * 973 * Each trap trace buffer entry is layed out as follows: 974 */ 975#ifndef __ASSEMBLY__ 976struct hv_trap_trace_entry { 977 unsigned char type; /* Hypervisor or guest entry? */ 978 unsigned char hpstate; /* Hyper-privileged state */ 979 unsigned char tl; /* Trap level */ 980 unsigned char gl; /* Global register level */ 981 unsigned short tt; /* Trap type */ 982 unsigned short tag; /* Extended trap identifier */ 983 unsigned long tstate; /* Trap state */ 984 unsigned long tick; /* Tick */ 985 unsigned long tpc; /* Trap PC */ 986 unsigned long f1; /* Entry specific */ 987 unsigned long f2; /* Entry specific */ 988 unsigned long f3; /* Entry specific */ 989 unsigned long f4; /* Entry specific */ 990}; 991#endif 992#define HV_TRAP_TRACE_ENTRY_TYPE 0x00 993#define HV_TRAP_TRACE_ENTRY_HPSTATE 0x01 994#define HV_TRAP_TRACE_ENTRY_TL 0x02 995#define HV_TRAP_TRACE_ENTRY_GL 0x03 996#define HV_TRAP_TRACE_ENTRY_TT 0x04 997#define HV_TRAP_TRACE_ENTRY_TAG 0x06 998#define HV_TRAP_TRACE_ENTRY_TSTATE 0x08 999#define HV_TRAP_TRACE_ENTRY_TICK 0x10 1000#define HV_TRAP_TRACE_ENTRY_TPC 0x18 1001#define HV_TRAP_TRACE_ENTRY_F1 0x20 1002#define HV_TRAP_TRACE_ENTRY_F2 0x28 1003#define HV_TRAP_TRACE_ENTRY_F3 0x30 1004#define HV_TRAP_TRACE_ENTRY_F4 0x38 1005 1006/* The type field is encoded as follows. */ 1007#define HV_TRAP_TYPE_UNDEF 0x00 /* Entry content undefined */ 1008#define HV_TRAP_TYPE_HV 0x01 /* Hypervisor trap entry */ 1009#define HV_TRAP_TYPE_GUEST 0xff /* Added via ttrace_addentry() */ 1010 1011/* ttrace_buf_conf() 1012 * TRAP: HV_FAST_TRAP 1013 * FUNCTION: HV_FAST_TTRACE_BUF_CONF 1014 * ARG0: real address 1015 * ARG1: number of entries 1016 * RET0: status 1017 * RET1: number of entries 1018 * ERRORS: ENORADDR Invalid real address 1019 * EINVAL Size is too small 1020 * EBADALIGN Real address not aligned on 64-byte boundary 1021 * 1022 * Requests hypervisor trap tracing and declares a virtual CPU's trap 1023 * trace buffer to the hypervisor. The real address supplies the real 1024 * base address of the trap trace queue and must be 64-byte aligned. 1025 * Specifying a value of 0 for the number of entries disables trap 1026 * tracing for the calling virtual CPU. The buffer allocated must be 1027 * sized for a power of two number of 64-byte trap trace entries plus 1028 * an initial 64-byte control structure. 1029 * 1030 * This may be invoked any number of times so that a virtual CPU may 1031 * relocate a trap trace buffer or create "snapshots" of information. 1032 * 1033 * If the real address is illegal or badly aligned, then trap tracing 1034 * is disabled and an error is returned. 1035 * 1036 * Upon failure with EINVAL, this service call returns in RET1 the 1037 * minimum number of buffer entries required. Upon other failures 1038 * RET1 is undefined. 1039 */ 1040#define HV_FAST_TTRACE_BUF_CONF 0x90 1041 1042/* ttrace_buf_info() 1043 * TRAP: HV_FAST_TRAP 1044 * FUNCTION: HV_FAST_TTRACE_BUF_INFO 1045 * RET0: status 1046 * RET1: real address 1047 * RET2: size 1048 * ERRORS: None defined. 1049 * 1050 * Returns the size and location of the previously declared trap-trace 1051 * buffer. In the event that no buffer was previously defined, or the 1052 * buffer is disabled, this call will return a size of zero bytes. 1053 */ 1054#define HV_FAST_TTRACE_BUF_INFO 0x91 1055 1056/* ttrace_enable() 1057 * TRAP: HV_FAST_TRAP 1058 * FUNCTION: HV_FAST_TTRACE_ENABLE 1059 * ARG0: enable 1060 * RET0: status 1061 * RET1: previous enable state 1062 * ERRORS: EINVAL No trap trace buffer currently defined 1063 * 1064 * Enable or disable trap tracing, and return the previous enabled 1065 * state in RET1. Future systems may define various flags for the 1066 * enable argument (ARG0), for the moment a guest should pass 1067 * "(uint64_t) -1" to enable, and "(uint64_t) 0" to disable all 1068 * tracing - which will ensure future compatability. 1069 */ 1070#define HV_FAST_TTRACE_ENABLE 0x92 1071 1072/* ttrace_freeze() 1073 * TRAP: HV_FAST_TRAP 1074 * FUNCTION: HV_FAST_TTRACE_FREEZE 1075 * ARG0: freeze 1076 * RET0: status 1077 * RET1: previous freeze state 1078 * ERRORS: EINVAL No trap trace buffer currently defined 1079 * 1080 * Freeze or unfreeze trap tracing, returning the previous freeze 1081 * state in RET1. A guest should pass a non-zero value to freeze and 1082 * a zero value to unfreeze all tracing. The returned previous state 1083 * is 0 for not frozen and 1 for frozen. 1084 */ 1085#define HV_FAST_TTRACE_FREEZE 0x93 1086 1087/* ttrace_addentry() 1088 * TRAP: HV_TTRACE_ADDENTRY_TRAP 1089 * ARG0: tag (16-bits) 1090 * ARG1: data word 0 1091 * ARG2: data word 1 1092 * ARG3: data word 2 1093 * ARG4: data word 3 1094 * RET0: status 1095 * ERRORS: EINVAL No trap trace buffer currently defined 1096 * 1097 * Add an entry to the trap trace buffer. Upon return only ARG0/RET0 1098 * is modified - none of the other registers holding arguments are 1099 * volatile across this hypervisor service. 1100 */ 1101 1102/* Core dump services. 1103 * 1104 * Since the hypervisor viraulizes and thus obscures a lot of the 1105 * physical machine layout and state, traditional OS crash dumps can 1106 * be difficult to diagnose especially when the problem is a 1107 * configuration error of some sort. 1108 * 1109 * The dump services provide an opaque buffer into which the 1110 * hypervisor can place it's internal state in order to assist in 1111 * debugging such situations. The contents are opaque and extremely 1112 * platform and hypervisor implementation specific. The guest, during 1113 * a core dump, requests that the hypervisor update any information in 1114 * the dump buffer in preparation to being dumped as part of the 1115 * domain's memory image. 1116 */ 1117 1118/* dump_buf_update() 1119 * TRAP: HV_FAST_TRAP 1120 * FUNCTION: HV_FAST_DUMP_BUF_UPDATE 1121 * ARG0: real address 1122 * ARG1: size 1123 * RET0: status 1124 * RET1: required size of dump buffer 1125 * ERRORS: ENORADDR Invalid real address 1126 * EBADALIGN Real address is not aligned on a 64-byte 1127 * boundary 1128 * EINVAL Size is non-zero but less than minimum size 1129 * required 1130 * ENOTSUPPORTED Operation not supported on current logical 1131 * domain 1132 * 1133 * Declare a domain dump buffer to the hypervisor. The real address 1134 * provided for the domain dump buffer must be 64-byte aligned. The 1135 * size specifies the size of the dump buffer and may be larger than 1136 * the minimum size specified in the machine description. The 1137 * hypervisor will fill the dump buffer with opaque data. 1138 * 1139 * Note: A guest may elect to include dump buffer contents as part of a crash 1140 * dump to assist with debugging. This function may be called any number 1141 * of times so that a guest may relocate a dump buffer, or create 1142 * "snapshots" of any dump-buffer information. Each call to 1143 * dump_buf_update() atomically declares the new dump buffer to the 1144 * hypervisor. 1145 * 1146 * A specified size of 0 unconfigures the dump buffer. If the real 1147 * address is illegal or badly aligned, then any currently active dump 1148 * buffer is disabled and an error is returned. 1149 * 1150 * In the event that the call fails with EINVAL, RET1 contains the 1151 * minimum size requires by the hypervisor for a valid dump buffer. 1152 */ 1153#define HV_FAST_DUMP_BUF_UPDATE 0x94 1154 1155/* dump_buf_info() 1156 * TRAP: HV_FAST_TRAP 1157 * FUNCTION: HV_FAST_DUMP_BUF_INFO 1158 * RET0: status 1159 * RET1: real address of current dump buffer 1160 * RET2: size of current dump buffer 1161 * ERRORS: No errors defined. 1162 * 1163 * Return the currently configures dump buffer description. A 1164 * returned size of 0 bytes indicates an undefined dump buffer. In 1165 * this case the return address in RET1 is undefined. 1166 */ 1167#define HV_FAST_DUMP_BUF_INFO 0x95 1168 1169/* Device interrupt services. 1170 * 1171 * Device interrupts are allocated to system bus bridges by the hypervisor, 1172 * and described to OBP in the machine description. OBP then describes 1173 * these interrupts to the OS via properties in the device tree. 1174 * 1175 * Terminology: 1176 * 1177 * cpuid Unique opaque value which represents a target cpu. 1178 * 1179 * devhandle Device handle. It uniquely identifies a device, and 1180 * consistes of the lower 28-bits of the hi-cell of the 1181 * first entry of the device's "reg" property in the 1182 * OBP device tree. 1183 * 1184 * devino Device interrupt number. Specifies the relative 1185 * interrupt number within the device. The unique 1186 * combination of devhandle and devino are used to 1187 * identify a specific device interrupt. 1188 * 1189 * Note: The devino value is the same as the values in the 1190 * "interrupts" property or "interrupt-map" property 1191 * in the OBP device tree for that device. 1192 * 1193 * sysino System interrupt number. A 64-bit unsigned interger 1194 * representing a unique interrupt within a virtual 1195 * machine. 1196 * 1197 * intr_state A flag representing the interrupt state for a given 1198 * sysino. The state values are defined below. 1199 * 1200 * intr_enabled A flag representing the 'enabled' state for a given 1201 * sysino. The enable values are defined below. 1202 */ 1203 1204#define HV_INTR_STATE_IDLE 0 /* Nothing pending */ 1205#define HV_INTR_STATE_RECEIVED 1 /* Interrupt received by hardware */ 1206#define HV_INTR_STATE_DELIVERED 2 /* Interrupt delivered to queue */ 1207 1208#define HV_INTR_DISABLED 0 /* sysino not enabled */ 1209#define HV_INTR_ENABLED 1 /* sysino enabled */ 1210 1211/* intr_devino_to_sysino() 1212 * TRAP: HV_FAST_TRAP 1213 * FUNCTION: HV_FAST_INTR_DEVINO2SYSINO 1214 * ARG0: devhandle 1215 * ARG1: devino 1216 * RET0: status 1217 * RET1: sysino 1218 * ERRORS: EINVAL Invalid devhandle/devino 1219 * 1220 * Converts a device specific interrupt number of the given 1221 * devhandle/devino into a system specific ino (sysino). 1222 */ 1223#define HV_FAST_INTR_DEVINO2SYSINO 0xa0 1224 1225#ifndef __ASSEMBLY__ 1226extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle, 1227 unsigned long devino); 1228#endif 1229 1230/* intr_getenabled() 1231 * TRAP: HV_FAST_TRAP 1232 * FUNCTION: HV_FAST_INTR_GETENABLED 1233 * ARG0: sysino 1234 * RET0: status 1235 * RET1: intr_enabled (HV_INTR_{DISABLED,ENABLED}) 1236 * ERRORS: EINVAL Invalid sysino 1237 * 1238 * Returns interrupt enabled state in RET1 for the interrupt defined 1239 * by the given sysino. 1240 */ 1241#define HV_FAST_INTR_GETENABLED 0xa1 1242 1243#ifndef __ASSEMBLY__ 1244extern unsigned long sun4v_intr_getenabled(unsigned long sysino); 1245#endif 1246 1247/* intr_setenabled() 1248 * TRAP: HV_FAST_TRAP 1249 * FUNCTION: HV_FAST_INTR_SETENABLED 1250 * ARG0: sysino 1251 * ARG1: intr_enabled (HV_INTR_{DISABLED,ENABLED}) 1252 * RET0: status 1253 * ERRORS: EINVAL Invalid sysino or intr_enabled value 1254 * 1255 * Set the 'enabled' state of the interrupt sysino. 1256 */ 1257#define HV_FAST_INTR_SETENABLED 0xa2 1258 1259#ifndef __ASSEMBLY__ 1260extern unsigned long sun4v_intr_setenabled(unsigned long sysino, unsigned long intr_enabled); 1261#endif 1262 1263/* intr_getstate() 1264 * TRAP: HV_FAST_TRAP 1265 * FUNCTION: HV_FAST_INTR_GETSTATE 1266 * ARG0: sysino 1267 * RET0: status 1268 * RET1: intr_state (HV_INTR_STATE_*) 1269 * ERRORS: EINVAL Invalid sysino 1270 * 1271 * Returns current state of the interrupt defined by the given sysino. 1272 */ 1273#define HV_FAST_INTR_GETSTATE 0xa3 1274 1275#ifndef __ASSEMBLY__ 1276extern unsigned long sun4v_intr_getstate(unsigned long sysino); 1277#endif 1278 1279/* intr_setstate() 1280 * TRAP: HV_FAST_TRAP 1281 * FUNCTION: HV_FAST_INTR_SETSTATE 1282 * ARG0: sysino 1283 * ARG1: intr_state (HV_INTR_STATE_*) 1284 * RET0: status 1285 * ERRORS: EINVAL Invalid sysino or intr_state value 1286 * 1287 * Sets the current state of the interrupt described by the given sysino 1288 * value. 1289 * 1290 * Note: Setting the state to HV_INTR_STATE_IDLE clears any pending 1291 * interrupt for sysino. 1292 */ 1293#define HV_FAST_INTR_SETSTATE 0xa4 1294 1295#ifndef __ASSEMBLY__ 1296extern unsigned long sun4v_intr_setstate(unsigned long sysino, unsigned long intr_state); 1297#endif 1298 1299/* intr_gettarget() 1300 * TRAP: HV_FAST_TRAP 1301 * FUNCTION: HV_FAST_INTR_GETTARGET 1302 * ARG0: sysino 1303 * RET0: status 1304 * RET1: cpuid 1305 * ERRORS: EINVAL Invalid sysino 1306 * 1307 * Returns CPU that is the current target of the interrupt defined by 1308 * the given sysino. The CPU value returned is undefined if the target 1309 * has not been set via intr_settarget(). 1310 */ 1311#define HV_FAST_INTR_GETTARGET 0xa5 1312 1313#ifndef __ASSEMBLY__ 1314extern unsigned long sun4v_intr_gettarget(unsigned long sysino); 1315#endif 1316 1317/* intr_settarget() 1318 * TRAP: HV_FAST_TRAP 1319 * FUNCTION: HV_FAST_INTR_SETTARGET 1320 * ARG0: sysino 1321 * ARG1: cpuid 1322 * RET0: status 1323 * ERRORS: EINVAL Invalid sysino 1324 * ENOCPU Invalid cpuid 1325 * 1326 * Set the target CPU for the interrupt defined by the given sysino. 1327 */ 1328#define HV_FAST_INTR_SETTARGET 0xa6 1329 1330#ifndef __ASSEMBLY__ 1331extern unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid); 1332#endif 1333 1334/* PCI IO services. 1335 * 1336 * See the terminology descriptions in the device interrupt services 1337 * section above as those apply here too. Here are terminology 1338 * definitions specific to these PCI IO services: 1339 * 1340 * tsbnum TSB number. Indentifies which io-tsb is used. 1341 * For this version of the specification, tsbnum 1342 * must be zero. 1343 * 1344 * tsbindex TSB index. Identifies which entry in the TSB 1345 * is used. The first entry is zero. 1346 * 1347 * tsbid A 64-bit aligned data structure which contains 1348 * a tsbnum and a tsbindex. Bits 63:32 contain the 1349 * tsbnum and bits 31:00 contain the tsbindex. 1350 * 1351 * Use the HV_PCI_TSBID() macro to construct such 1352 * values. 1353 * 1354 * io_attributes IO attributes for IOMMU mappings. One of more 1355 * of the attritbute bits are stores in a 64-bit 1356 * value. The values are defined below. 1357 * 1358 * r_addr 64-bit real address 1359 * 1360 * pci_device PCI device address. A PCI device address identifies 1361 * a specific device on a specific PCI bus segment. 1362 * A PCI device address ia a 32-bit unsigned integer 1363 * with the following format: 1364 * 1365 * 00000000.bbbbbbbb.dddddfff.00000000 1366 * 1367 * Use the HV_PCI_DEVICE_BUILD() macro to construct 1368 * such values. 1369 * 1370 * pci_config_offset 1371 * PCI configureation space offset. For conventional 1372 * PCI a value between 0 and 255. For extended 1373 * configuration space, a value between 0 and 4095. 1374 * 1375 * Note: For PCI configuration space accesses, the offset 1376 * must be aligned to the access size. 1377 * 1378 * error_flag A return value which specifies if the action succeeded 1379 * or failed. 0 means no error, non-0 means some error 1380 * occurred while performing the service. 1381 * 1382 * io_sync_direction 1383 * Direction definition for pci_dma_sync(), defined 1384 * below in HV_PCI_SYNC_*. 1385 * 1386 * io_page_list A list of io_page_addresses, an io_page_address is 1387 * a real address. 1388 * 1389 * io_page_list_p A pointer to an io_page_list. 1390 * 1391 * "size based byte swap" - Some functions do size based byte swapping 1392 * which allows sw to access pointers and 1393 * counters in native form when the processor 1394 * operates in a different endianness than the 1395 * IO bus. Size-based byte swapping converts a 1396 * multi-byte field between big-endian and 1397 * little-endian format. 1398 */ 1399 1400#define HV_PCI_MAP_ATTR_READ 0x01 1401#define HV_PCI_MAP_ATTR_WRITE 0x02 1402 1403#define HV_PCI_DEVICE_BUILD(b,d,f) \ 1404 ((((b) & 0xff) << 16) | \ 1405 (((d) & 0x1f) << 11) | \ 1406 (((f) & 0x07) << 8)) 1407 1408#define HV_PCI_TSBID(__tsb_num, __tsb_index) \ 1409 ((((u64)(__tsb_num)) << 32UL) | ((u64)(__tsb_index))) 1410 1411#define HV_PCI_SYNC_FOR_DEVICE 0x01 1412#define HV_PCI_SYNC_FOR_CPU 0x02 1413 1414/* pci_iommu_map() 1415 * TRAP: HV_FAST_TRAP 1416 * FUNCTION: HV_FAST_PCI_IOMMU_MAP 1417 * ARG0: devhandle 1418 * ARG1: tsbid 1419 * ARG2: #ttes 1420 * ARG3: io_attributes 1421 * ARG4: io_page_list_p 1422 * RET0: status 1423 * RET1: #ttes mapped 1424 * ERRORS: EINVAL Invalid devhandle/tsbnum/tsbindex/io_attributes 1425 * EBADALIGN Improperly aligned real address 1426 * ENORADDR Invalid real address 1427 * 1428 * Create IOMMU mappings in the sun4v device defined by the given 1429 * devhandle. The mappings are created in the TSB defined by the 1430 * tsbnum component of the given tsbid. The first mapping is created 1431 * in the TSB i ndex defined by the tsbindex component of the given tsbid. 1432 * The call creates up to #ttes mappings, the first one at tsbnum, tsbindex, 1433 * the second at tsbnum, tsbindex + 1, etc. 1434 * 1435 * All mappings are created with the attributes defined by the io_attributes 1436 * argument. The page mapping addresses are described in the io_page_list 1437 * defined by the given io_page_list_p, which is a pointer to the io_page_list. 1438 * The first entry in the io_page_list is the address for the first iotte, the 1439 * 2nd for the 2nd iotte, and so on. 1440 * 1441 * Each io_page_address in the io_page_list must be appropriately aligned. 1442 * #ttes must be greater than zero. For this version of the spec, the tsbnum 1443 * component of the given tsbid must be zero. 1444 * 1445 * Returns the actual number of mappings creates, which may be less than 1446 * or equal to the argument #ttes. If the function returns a value which 1447 * is less than the #ttes, the caller may continus to call the function with 1448 * an updated tsbid, #ttes, io_page_list_p arguments until all pages are 1449 * mapped. 1450 * 1451 * Note: This function does not imply an iotte cache flush. The guest must 1452 * demap an entry before re-mapping it. 1453 */ 1454#define HV_FAST_PCI_IOMMU_MAP 0xb0 1455 1456/* pci_iommu_demap() 1457 * TRAP: HV_FAST_TRAP 1458 * FUNCTION: HV_FAST_PCI_IOMMU_DEMAP 1459 * ARG0: devhandle 1460 * ARG1: tsbid 1461 * ARG2: #ttes 1462 * RET0: status 1463 * RET1: #ttes demapped 1464 * ERRORS: EINVAL Invalid devhandle/tsbnum/tsbindex 1465 * 1466 * Demap and flush IOMMU mappings in the device defined by the given 1467 * devhandle. Demaps up to #ttes entries in the TSB defined by the tsbnum 1468 * component of the given tsbid, starting at the TSB index defined by the 1469 * tsbindex component of the given tsbid. 1470 * 1471 * For this version of the spec, the tsbnum of the given tsbid must be zero. 1472 * #ttes must be greater than zero. 1473 * 1474 * Returns the actual number of ttes demapped, which may be less than or equal 1475 * to the argument #ttes. If #ttes demapped is less than #ttes, the caller 1476 * may continue to call this function with updated tsbid and #ttes arguments 1477 * until all pages are demapped. 1478 * 1479 * Note: Entries do not have to be mapped to be demapped. A demap of an 1480 * unmapped page will flush the entry from the tte cache. 1481 */ 1482#define HV_FAST_PCI_IOMMU_DEMAP 0xb1 1483 1484/* pci_iommu_getmap() 1485 * TRAP: HV_FAST_TRAP 1486 * FUNCTION: HV_FAST_PCI_IOMMU_GETMAP 1487 * ARG0: devhandle 1488 * ARG1: tsbid 1489 * RET0: status 1490 * RET1: io_attributes 1491 * RET2: real address 1492 * ERRORS: EINVAL Invalid devhandle/tsbnum/tsbindex 1493 * ENOMAP Mapping is not valid, no translation exists 1494 * 1495 * Read and return the mapping in the device described by the given devhandle 1496 * and tsbid. If successful, the io_attributes shall be returned in RET1 1497 * and the page address of the mapping shall be returned in RET2. 1498 * 1499 * For this version of the spec, the tsbnum component of the given tsbid 1500 * must be zero. 1501 */ 1502#define HV_FAST_PCI_IOMMU_GETMAP 0xb2 1503 1504/* pci_iommu_getbypass() 1505 * TRAP: HV_FAST_TRAP 1506 * FUNCTION: HV_FAST_PCI_IOMMU_GETBYPASS 1507 * ARG0: devhandle 1508 * ARG1: real address 1509 * ARG2: io_attributes 1510 * RET0: status 1511 * RET1: io_addr 1512 * ERRORS: EINVAL Invalid devhandle/io_attributes 1513 * ENORADDR Invalid real address 1514 * ENOTSUPPORTED Function not supported in this implementation. 1515 * 1516 * Create a "special" mapping in the device described by the given devhandle, 1517 * for the given real address and attributes. Return the IO address in RET1 1518 * if successful. 1519 */ 1520#define HV_FAST_PCI_IOMMU_GETBYPASS 0xb3 1521 1522/* pci_config_get() 1523 * TRAP: HV_FAST_TRAP 1524 * FUNCTION: HV_FAST_PCI_CONFIG_GET 1525 * ARG0: devhandle 1526 * ARG1: pci_device 1527 * ARG2: pci_config_offset 1528 * ARG3: size 1529 * RET0: status 1530 * RET1: error_flag 1531 * RET2: data 1532 * ERRORS: EINVAL Invalid devhandle/pci_device/offset/size 1533 * EBADALIGN pci_config_offset not size aligned 1534 * ENOACCESS Access to this offset is not permitted 1535 * 1536 * Read PCI configuration space for the adapter described by the given 1537 * devhandle. Read size (1, 2, or 4) bytes of data from the given 1538 * pci_device, at pci_config_offset from the beginning of the device's 1539 * configuration space. If there was no error, RET1 is set to zero and 1540 * RET2 is set to the data read. Insignificant bits in RET2 are not 1541 * guarenteed to have any specific value and therefore must be ignored. 1542 * 1543 * The data returned in RET2 is size based byte swapped. 1544 * 1545 * If an error occurs during the read, set RET1 to a non-zero value. The 1546 * given pci_config_offset must be 'size' aligned. 1547 */ 1548#define HV_FAST_PCI_CONFIG_GET 0xb4 1549 1550/* pci_config_put() 1551 * TRAP: HV_FAST_TRAP 1552 * FUNCTION: HV_FAST_PCI_CONFIG_PUT 1553 * ARG0: devhandle 1554 * ARG1: pci_device 1555 * ARG2: pci_config_offset 1556 * ARG3: size 1557 * ARG4: data 1558 * RET0: status 1559 * RET1: error_flag 1560 * ERRORS: EINVAL Invalid devhandle/pci_device/offset/size 1561 * EBADALIGN pci_config_offset not size aligned 1562 * ENOACCESS Access to this offset is not permitted 1563 * 1564 * Write PCI configuration space for the adapter described by the given 1565 * devhandle. Write size (1, 2, or 4) bytes of data in a single operation, 1566 * at pci_config_offset from the beginning of the device's configuration 1567 * space. The data argument contains the data to be written to configuration 1568 * space. Prior to writing, the data is size based byte swapped. 1569 * 1570 * If an error occurs during the write access, do not generate an error 1571 * report, do set RET1 to a non-zero value. Otherwise RET1 is zero. 1572 * The given pci_config_offset must be 'size' aligned. 1573 * 1574 * This function is permitted to read from offset zero in the configuration 1575 * space described by the given pci_device if necessary to ensure that the 1576 * write access to config space completes. 1577 */ 1578#define HV_FAST_PCI_CONFIG_PUT 0xb5 1579 1580/* pci_peek() 1581 * TRAP: HV_FAST_TRAP 1582 * FUNCTION: HV_FAST_PCI_PEEK 1583 * ARG0: devhandle 1584 * ARG1: real address 1585 * ARG2: size 1586 * RET0: status 1587 * RET1: error_flag 1588 * RET2: data 1589 * ERRORS: EINVAL Invalid devhandle or size 1590 * EBADALIGN Improperly aligned real address 1591 * ENORADDR Bad real address 1592 * ENOACCESS Guest access prohibited 1593 * 1594 * Attempt to read the IO address given by the given devhandle, real address, 1595 * and size. Size must be 1, 2, 4, or 8. The read is performed as a single 1596 * access operation using the given size. If an error occurs when reading 1597 * from the given location, do not generate an error report, but return a 1598 * non-zero value in RET1. If the read was successful, return zero in RET1 1599 * and return the actual data read in RET2. The data returned is size based 1600 * byte swapped. 1601 * 1602 * Non-significant bits in RET2 are not guarenteed to have any specific value 1603 * and therefore must be ignored. If RET1 is returned as non-zero, the data 1604 * value is not guarenteed to have any specific value and should be ignored. 1605 * 1606 * The caller must have permission to read from the given devhandle, real 1607 * address, which must be an IO address. The argument real address must be a 1608 * size aligned address. 1609 * 1610 * The hypervisor implementation of this function must block access to any 1611 * IO address that the guest does not have explicit permission to access. 1612 */ 1613#define HV_FAST_PCI_PEEK 0xb6 1614 1615/* pci_poke() 1616 * TRAP: HV_FAST_TRAP 1617 * FUNCTION: HV_FAST_PCI_POKE 1618 * ARG0: devhandle 1619 * ARG1: real address 1620 * ARG2: size 1621 * ARG3: data 1622 * ARG4: pci_device 1623 * RET0: status 1624 * RET1: error_flag 1625 * ERRORS: EINVAL Invalid devhandle, size, or pci_device 1626 * EBADALIGN Improperly aligned real address 1627 * ENORADDR Bad real address 1628 * ENOACCESS Guest access prohibited 1629 * ENOTSUPPORTED Function is not supported by implementation 1630 * 1631 * Attempt to write data to the IO address given by the given devhandle, 1632 * real address, and size. Size must be 1, 2, 4, or 8. The write is 1633 * performed as a single access operation using the given size. Prior to 1634 * writing the data is size based swapped. 1635 * 1636 * If an error occurs when writing to the given location, do not generate an 1637 * error report, but return a non-zero value in RET1. If the write was 1638 * successful, return zero in RET1. 1639 * 1640 * pci_device describes the configuration address of the device being 1641 * written to. The implementation may safely read from offset 0 with 1642 * the configuration space of the device described by devhandle and 1643 * pci_device in order to guarantee that the write portion of the operation 1644 * completes 1645 * 1646 * Any error that occurs due to the read shall be reported using the normal 1647 * error reporting mechanisms .. the read error is not suppressed. 1648 * 1649 * The caller must have permission to write to the given devhandle, real 1650 * address, which must be an IO address. The argument real address must be a 1651 * size aligned address. The caller must have permission to read from 1652 * the given devhandle, pci_device cofiguration space offset 0. 1653 * 1654 * The hypervisor implementation of this function must block access to any 1655 * IO address that the guest does not have explicit permission to access. 1656 */ 1657#define HV_FAST_PCI_POKE 0xb7 1658 1659/* pci_dma_sync() 1660 * TRAP: HV_FAST_TRAP 1661 * FUNCTION: HV_FAST_PCI_DMA_SYNC 1662 * ARG0: devhandle 1663 * ARG1: real address 1664 * ARG2: size 1665 * ARG3: io_sync_direction 1666 * RET0: status 1667 * RET1: #synced 1668 * ERRORS: EINVAL Invalid devhandle or io_sync_direction 1669 * ENORADDR Bad real address 1670 * 1671 * Synchronize a memory region described by the given real address and size, 1672 * for the device defined by the given devhandle using the direction(s) 1673 * defined by the given io_sync_direction. The argument size is the size of 1674 * the memory region in bytes. 1675 * 1676 * Return the actual number of bytes synchronized in the return value #synced, 1677 * which may be less than or equal to the argument size. If the return 1678 * value #synced is less than size, the caller must continue to call this 1679 * function with updated real address and size arguments until the entire 1680 * memory region is synchronized. 1681 */ 1682#define HV_FAST_PCI_DMA_SYNC 0xb8 1683 1684/* PCI MSI services. */ 1685 1686#define HV_MSITYPE_MSI32 0x00 1687#define HV_MSITYPE_MSI64 0x01 1688 1689#define HV_MSIQSTATE_IDLE 0x00 1690#define HV_MSIQSTATE_ERROR 0x01 1691 1692#define HV_MSIQ_INVALID 0x00 1693#define HV_MSIQ_VALID 0x01 1694 1695#define HV_MSISTATE_IDLE 0x00 1696#define HV_MSISTATE_DELIVERED 0x01 1697 1698#define HV_MSIVALID_INVALID 0x00 1699#define HV_MSIVALID_VALID 0x01 1700 1701#define HV_PCIE_MSGTYPE_PME_MSG 0x18 1702#define HV_PCIE_MSGTYPE_PME_ACK_MSG 0x1b 1703#define HV_PCIE_MSGTYPE_CORR_MSG 0x30 1704#define HV_PCIE_MSGTYPE_NONFATAL_MSG 0x31 1705#define HV_PCIE_MSGTYPE_FATAL_MSG 0x33 1706 1707#define HV_MSG_INVALID 0x00 1708#define HV_MSG_VALID 0x01 1709 1710/* pci_msiq_conf() 1711 * TRAP: HV_FAST_TRAP 1712 * FUNCTION: HV_FAST_PCI_MSIQ_CONF 1713 * ARG0: devhandle 1714 * ARG1: msiqid 1715 * ARG2: real address 1716 * ARG3: number of entries 1717 * RET0: status 1718 * ERRORS: EINVAL Invalid devhandle, msiqid or nentries 1719 * EBADALIGN Improperly aligned real address 1720 * ENORADDR Bad real address 1721 * 1722 * Configure the MSI queue given by the devhandle and msiqid arguments, 1723 * and to be placed at the given real address and be of the given 1724 * number of entries. The real address must be aligned exactly to match 1725 * the queue size. Each queue entry is 64-bytes long, so f.e. a 32 entry 1726 * queue must be aligned on a 2048 byte real address boundary. The MSI-EQ 1727 * Head and Tail are initialized so that the MSI-EQ is 'empty'. 1728 * 1729 * Implementation Note: Certain implementations have fixed sized queues. In 1730 * that case, number of entries must contain the correct 1731 * value. 1732 */ 1733#define HV_FAST_PCI_MSIQ_CONF 0xc0 1734 1735/* pci_msiq_info() 1736 * TRAP: HV_FAST_TRAP 1737 * FUNCTION: HV_FAST_PCI_MSIQ_INFO 1738 * ARG0: devhandle 1739 * ARG1: msiqid 1740 * RET0: status 1741 * RET1: real address 1742 * RET2: number of entries 1743 * ERRORS: EINVAL Invalid devhandle or msiqid 1744 * 1745 * Return the configuration information for the MSI queue described 1746 * by the given devhandle and msiqid. The base address of the queue 1747 * is returned in ARG1 and the number of entries is returned in ARG2. 1748 * If the queue is unconfigured, the real address is undefined and the 1749 * number of entries will be returned as zero. 1750 */ 1751#define HV_FAST_PCI_MSIQ_INFO 0xc1 1752 1753/* pci_msiq_getvalid() 1754 * TRAP: HV_FAST_TRAP 1755 * FUNCTION: HV_FAST_PCI_MSIQ_GETVALID 1756 * ARG0: devhandle 1757 * ARG1: msiqid 1758 * RET0: status 1759 * RET1: msiqvalid (HV_MSIQ_VALID or HV_MSIQ_INVALID) 1760 * ERRORS: EINVAL Invalid devhandle or msiqid 1761 * 1762 * Get the valid state of the MSI-EQ described by the given devhandle and 1763 * msiqid. 1764 */ 1765#define HV_FAST_PCI_MSIQ_GETVALID 0xc2 1766 1767/* pci_msiq_setvalid() 1768 * TRAP: HV_FAST_TRAP 1769 * FUNCTION: HV_FAST_PCI_MSIQ_SETVALID 1770 * ARG0: devhandle 1771 * ARG1: msiqid 1772 * ARG2: msiqvalid (HV_MSIQ_VALID or HV_MSIQ_INVALID) 1773 * RET0: status 1774 * ERRORS: EINVAL Invalid devhandle or msiqid or msiqvalid 1775 * value or MSI EQ is uninitialized 1776 * 1777 * Set the valid state of the MSI-EQ described by the given devhandle and 1778 * msiqid to the given msiqvalid. 1779 */ 1780#define HV_FAST_PCI_MSIQ_SETVALID 0xc3 1781 1782/* pci_msiq_getstate() 1783 * TRAP: HV_FAST_TRAP 1784 * FUNCTION: HV_FAST_PCI_MSIQ_GETSTATE 1785 * ARG0: devhandle 1786 * ARG1: msiqid 1787 * RET0: status 1788 * RET1: msiqstate (HV_MSIQSTATE_IDLE or HV_MSIQSTATE_ERROR) 1789 * ERRORS: EINVAL Invalid devhandle or msiqid 1790 * 1791 * Get the state of the MSI-EQ described by the given devhandle and 1792 * msiqid. 1793 */ 1794#define HV_FAST_PCI_MSIQ_GETSTATE 0xc4 1795 1796/* pci_msiq_getvalid() 1797 * TRAP: HV_FAST_TRAP 1798 * FUNCTION: HV_FAST_PCI_MSIQ_GETVALID 1799 * ARG0: devhandle 1800 * ARG1: msiqid 1801 * ARG2: msiqstate (HV_MSIQSTATE_IDLE or HV_MSIQSTATE_ERROR) 1802 * RET0: status 1803 * ERRORS: EINVAL Invalid devhandle or msiqid or msiqstate 1804 * value or MSI EQ is uninitialized 1805 * 1806 * Set the state of the MSI-EQ described by the given devhandle and 1807 * msiqid to the given msiqvalid. 1808 */ 1809#define HV_FAST_PCI_MSIQ_SETSTATE 0xc5 1810 1811/* pci_msiq_gethead() 1812 * TRAP: HV_FAST_TRAP 1813 * FUNCTION: HV_FAST_PCI_MSIQ_GETHEAD 1814 * ARG0: devhandle 1815 * ARG1: msiqid 1816 * RET0: status 1817 * RET1: msiqhead 1818 * ERRORS: EINVAL Invalid devhandle or msiqid 1819 * 1820 * Get the current MSI EQ queue head for the MSI-EQ described by the 1821 * given devhandle and msiqid. 1822 */ 1823#define HV_FAST_PCI_MSIQ_GETHEAD 0xc6 1824 1825/* pci_msiq_sethead() 1826 * TRAP: HV_FAST_TRAP 1827 * FUNCTION: HV_FAST_PCI_MSIQ_SETHEAD 1828 * ARG0: devhandle 1829 * ARG1: msiqid 1830 * ARG2: msiqhead 1831 * RET0: status 1832 * ERRORS: EINVAL Invalid devhandle or msiqid or msiqhead, 1833 * or MSI EQ is uninitialized 1834 * 1835 * Set the current MSI EQ queue head for the MSI-EQ described by the 1836 * given devhandle and msiqid. 1837 */ 1838#define HV_FAST_PCI_MSIQ_SETHEAD 0xc7 1839 1840/* pci_msiq_gettail() 1841 * TRAP: HV_FAST_TRAP 1842 * FUNCTION: HV_FAST_PCI_MSIQ_GETTAIL 1843 * ARG0: devhandle 1844 * ARG1: msiqid 1845 * RET0: status 1846 * RET1: msiqtail 1847 * ERRORS: EINVAL Invalid devhandle or msiqid 1848 * 1849 * Get the current MSI EQ queue tail for the MSI-EQ described by the 1850 * given devhandle and msiqid. 1851 */ 1852#define HV_FAST_PCI_MSIQ_GETTAIL 0xc8 1853 1854/* pci_msi_getvalid() 1855 * TRAP: HV_FAST_TRAP 1856 * FUNCTION: HV_FAST_PCI_MSI_GETVALID 1857 * ARG0: devhandle 1858 * ARG1: msinum 1859 * RET0: status 1860 * RET1: msivalidstate 1861 * ERRORS: EINVAL Invalid devhandle or msinum 1862 * 1863 * Get the current valid/enabled state for the MSI defined by the 1864 * given devhandle and msinum. 1865 */ 1866#define HV_FAST_PCI_MSI_GETVALID 0xc9 1867 1868/* pci_msi_setvalid() 1869 * TRAP: HV_FAST_TRAP 1870 * FUNCTION: HV_FAST_PCI_MSI_SETVALID 1871 * ARG0: devhandle 1872 * ARG1: msinum 1873 * ARG2: msivalidstate 1874 * RET0: status 1875 * ERRORS: EINVAL Invalid devhandle or msinum or msivalidstate 1876 * 1877 * Set the current valid/enabled state for the MSI defined by the 1878 * given devhandle and msinum. 1879 */ 1880#define HV_FAST_PCI_MSI_SETVALID 0xca 1881 1882/* pci_msi_getmsiq() 1883 * TRAP: HV_FAST_TRAP 1884 * FUNCTION: HV_FAST_PCI_MSI_GETMSIQ 1885 * ARG0: devhandle 1886 * ARG1: msinum 1887 * RET0: status 1888 * RET1: msiqid 1889 * ERRORS: EINVAL Invalid devhandle or msinum or MSI is unbound 1890 * 1891 * Get the MSI EQ that the MSI defined by the given devhandle and 1892 * msinum is bound to. 1893 */ 1894#define HV_FAST_PCI_MSI_GETMSIQ 0xcb 1895 1896/* pci_msi_setmsiq() 1897 * TRAP: HV_FAST_TRAP 1898 * FUNCTION: HV_FAST_PCI_MSI_SETMSIQ 1899 * ARG0: devhandle 1900 * ARG1: msinum 1901 * ARG2: msitype 1902 * ARG3: msiqid 1903 * RET0: status 1904 * ERRORS: EINVAL Invalid devhandle or msinum or msiqid 1905 * 1906 * Set the MSI EQ that the MSI defined by the given devhandle and 1907 * msinum is bound to. 1908 */ 1909#define HV_FAST_PCI_MSI_SETMSIQ 0xcc 1910 1911/* pci_msi_getstate() 1912 * TRAP: HV_FAST_TRAP 1913 * FUNCTION: HV_FAST_PCI_MSI_GETSTATE 1914 * ARG0: devhandle 1915 * ARG1: msinum 1916 * RET0: status 1917 * RET1: msistate 1918 * ERRORS: EINVAL Invalid devhandle or msinum 1919 * 1920 * Get the state of the MSI defined by the given devhandle and msinum. 1921 * If not initialized, return HV_MSISTATE_IDLE. 1922 */ 1923#define HV_FAST_PCI_MSI_GETSTATE 0xcd 1924 1925/* pci_msi_setstate() 1926 * TRAP: HV_FAST_TRAP 1927 * FUNCTION: HV_FAST_PCI_MSI_SETSTATE 1928 * ARG0: devhandle 1929 * ARG1: msinum 1930 * ARG2: msistate 1931 * RET0: status 1932 * ERRORS: EINVAL Invalid devhandle or msinum or msistate 1933 * 1934 * Set the state of the MSI defined by the given devhandle and msinum. 1935 */ 1936#define HV_FAST_PCI_MSI_SETSTATE 0xce 1937 1938/* pci_msg_getmsiq() 1939 * TRAP: HV_FAST_TRAP 1940 * FUNCTION: HV_FAST_PCI_MSG_GETMSIQ 1941 * ARG0: devhandle 1942 * ARG1: msgtype 1943 * RET0: status 1944 * RET1: msiqid 1945 * ERRORS: EINVAL Invalid devhandle or msgtype 1946 * 1947 * Get the MSI EQ of the MSG defined by the given devhandle and msgtype. 1948 */ 1949#define HV_FAST_PCI_MSG_GETMSIQ 0xd0 1950 1951/* pci_msg_setmsiq() 1952 * TRAP: HV_FAST_TRAP 1953 * FUNCTION: HV_FAST_PCI_MSG_SETMSIQ 1954 * ARG0: devhandle 1955 * ARG1: msgtype 1956 * ARG2: msiqid 1957 * RET0: status 1958 * ERRORS: EINVAL Invalid devhandle, msgtype, or msiqid 1959 * 1960 * Set the MSI EQ of the MSG defined by the given devhandle and msgtype. 1961 */ 1962#define HV_FAST_PCI_MSG_SETMSIQ 0xd1 1963 1964/* pci_msg_getvalid() 1965 * TRAP: HV_FAST_TRAP 1966 * FUNCTION: HV_FAST_PCI_MSG_GETVALID 1967 * ARG0: devhandle 1968 * ARG1: msgtype 1969 * RET0: status 1970 * RET1: msgvalidstate 1971 * ERRORS: EINVAL Invalid devhandle or msgtype 1972 * 1973 * Get the valid/enabled state of the MSG defined by the given 1974 * devhandle and msgtype. 1975 */ 1976#define HV_FAST_PCI_MSG_GETVALID 0xd2 1977 1978/* pci_msg_setvalid() 1979 * TRAP: HV_FAST_TRAP 1980 * FUNCTION: HV_FAST_PCI_MSG_SETVALID 1981 * ARG0: devhandle 1982 * ARG1: msgtype 1983 * ARG2: msgvalidstate 1984 * RET0: status 1985 * ERRORS: EINVAL Invalid devhandle or msgtype or msgvalidstate 1986 * 1987 * Set the valid/enabled state of the MSG defined by the given 1988 * devhandle and msgtype. 1989 */ 1990#define HV_FAST_PCI_MSG_SETVALID 0xd3 1991 1992/* Performance counter services. */ 1993 1994#define HV_PERF_JBUS_PERF_CTRL_REG 0x00 1995#define HV_PERF_JBUS_PERF_CNT_REG 0x01 1996#define HV_PERF_DRAM_PERF_CTRL_REG_0 0x02 1997#define HV_PERF_DRAM_PERF_CNT_REG_0 0x03 1998#define HV_PERF_DRAM_PERF_CTRL_REG_1 0x04 1999#define HV_PERF_DRAM_PERF_CNT_REG_1 0x05 2000#define HV_PERF_DRAM_PERF_CTRL_REG_2 0x06 2001#define HV_PERF_DRAM_PERF_CNT_REG_2 0x07 2002#define HV_PERF_DRAM_PERF_CTRL_REG_3 0x08 2003#define HV_PERF_DRAM_PERF_CNT_REG_3 0x09 2004 2005/* get_perfreg() 2006 * TRAP: HV_FAST_TRAP 2007 * FUNCTION: HV_FAST_GET_PERFREG 2008 * ARG0: performance reg number 2009 * RET0: status 2010 * RET1: performance reg value 2011 * ERRORS: EINVAL Invalid performance register number 2012 * ENOACCESS No access allowed to performance counters 2013 * 2014 * Read the value of the given DRAM/JBUS performance counter/control register. 2015 */ 2016#define HV_FAST_GET_PERFREG 0x100 2017 2018/* set_perfreg() 2019 * TRAP: HV_FAST_TRAP 2020 * FUNCTION: HV_FAST_SET_PERFREG 2021 * ARG0: performance reg number 2022 * ARG1: performance reg value 2023 * RET0: status 2024 * ERRORS: EINVAL Invalid performance register number 2025 * ENOACCESS No access allowed to performance counters 2026 * 2027 * Write the given performance reg value to the given DRAM/JBUS 2028 * performance counter/control register. 2029 */ 2030#define HV_FAST_SET_PERFREG 0x101 2031 2032/* MMU statistics services. 2033 * 2034 * The hypervisor maintains MMU statistics and privileged code provides 2035 * a buffer where these statistics can be collected. It is continually 2036 * updated once configured. The layout is as follows: 2037 */ 2038#ifndef __ASSEMBLY__ 2039struct hv_mmu_statistics { 2040 unsigned long immu_tsb_hits_ctx0_8k_tte; 2041 unsigned long immu_tsb_ticks_ctx0_8k_tte; 2042 unsigned long immu_tsb_hits_ctx0_64k_tte; 2043 unsigned long immu_tsb_ticks_ctx0_64k_tte; 2044 unsigned long __reserved1[2]; 2045 unsigned long immu_tsb_hits_ctx0_4mb_tte; 2046 unsigned long immu_tsb_ticks_ctx0_4mb_tte; 2047 unsigned long __reserved2[2]; 2048 unsigned long immu_tsb_hits_ctx0_256mb_tte; 2049 unsigned long immu_tsb_ticks_ctx0_256mb_tte; 2050 unsigned long __reserved3[4]; 2051 unsigned long immu_tsb_hits_ctxnon0_8k_tte; 2052 unsigned long immu_tsb_ticks_ctxnon0_8k_tte; 2053 unsigned long immu_tsb_hits_ctxnon0_64k_tte; 2054 unsigned long immu_tsb_ticks_ctxnon0_64k_tte; 2055 unsigned long __reserved4[2]; 2056 unsigned long immu_tsb_hits_ctxnon0_4mb_tte; 2057 unsigned long immu_tsb_ticks_ctxnon0_4mb_tte; 2058 unsigned long __reserved5[2]; 2059 unsigned long immu_tsb_hits_ctxnon0_256mb_tte; 2060 unsigned long immu_tsb_ticks_ctxnon0_256mb_tte; 2061 unsigned long __reserved6[4]; 2062 unsigned long dmmu_tsb_hits_ctx0_8k_tte; 2063 unsigned long dmmu_tsb_ticks_ctx0_8k_tte; 2064 unsigned long dmmu_tsb_hits_ctx0_64k_tte; 2065 unsigned long dmmu_tsb_ticks_ctx0_64k_tte; 2066 unsigned long __reserved7[2]; 2067 unsigned long dmmu_tsb_hits_ctx0_4mb_tte; 2068 unsigned long dmmu_tsb_ticks_ctx0_4mb_tte; 2069 unsigned long __reserved8[2]; 2070 unsigned long dmmu_tsb_hits_ctx0_256mb_tte; 2071 unsigned long dmmu_tsb_ticks_ctx0_256mb_tte; 2072 unsigned long __reserved9[4]; 2073 unsigned long dmmu_tsb_hits_ctxnon0_8k_tte; 2074 unsigned long dmmu_tsb_ticks_ctxnon0_8k_tte; 2075 unsigned long dmmu_tsb_hits_ctxnon0_64k_tte; 2076 unsigned long dmmu_tsb_ticks_ctxnon0_64k_tte; 2077 unsigned long __reserved10[2]; 2078 unsigned long dmmu_tsb_hits_ctxnon0_4mb_tte; 2079 unsigned long dmmu_tsb_ticks_ctxnon0_4mb_tte; 2080 unsigned long __reserved11[2]; 2081 unsigned long dmmu_tsb_hits_ctxnon0_256mb_tte; 2082 unsigned long dmmu_tsb_ticks_ctxnon0_256mb_tte; 2083 unsigned long __reserved12[4]; 2084}; 2085#endif 2086 2087/* mmustat_conf() 2088 * TRAP: HV_FAST_TRAP 2089 * FUNCTION: HV_FAST_MMUSTAT_CONF 2090 * ARG0: real address 2091 * RET0: status 2092 * RET1: real address 2093 * ERRORS: ENORADDR Invalid real address 2094 * EBADALIGN Real address not aligned on 64-byte boundary 2095 * EBADTRAP API not supported on this processor 2096 * 2097 * Enable MMU statistic gathering using the buffer at the given real 2098 * address on the current virtual CPU. The new buffer real address 2099 * is given in ARG1, and the previously specified buffer real address 2100 * is returned in RET1, or is returned as zero for the first invocation. 2101 * 2102 * If the passed in real address argument is zero, this will disable 2103 * MMU statistic collection on the current virtual CPU. If an error is 2104 * returned then no statistics are collected. 2105 * 2106 * The buffer contents should be initialized to all zeros before being 2107 * given to the hypervisor or else the statistics will be meaningless. 2108 */ 2109#define HV_FAST_MMUSTAT_CONF 0x102 2110 2111/* mmustat_info() 2112 * TRAP: HV_FAST_TRAP 2113 * FUNCTION: HV_FAST_MMUSTAT_INFO 2114 * RET0: status 2115 * RET1: real address 2116 * ERRORS: EBADTRAP API not supported on this processor 2117 * 2118 * Return the current state and real address of the currently configured 2119 * MMU statistics buffer on the current virtual CPU. 2120 */ 2121#define HV_FAST_MMUSTAT_INFO 0x103 2122 2123/* Function numbers for HV_CORE_TRAP. */ 2124#define HV_CORE_VER 0x00 2125#define HV_CORE_PUTCHAR 0x01 2126#define HV_CORE_EXIT 0x02 2127 2128#endif /* !(_SPARC64_HYPERVISOR_H) */