Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm/arm64: KVM: add vgic.h header file

vgic.c is currently a mixture of generic vGIC emulation code and
functions specific to emulating a GICv2. To ease the addition of
GICv3 later, we create new header file vgic.h, which holds constants
and prototypes of commonly used functions.
Rename some identifiers to avoid name space clutter.
I removed the long-standing comment about using the kvm_io_bus API
to tackle the GIC register ranges, as it wouldn't be a win for us
anymore.

Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>

-------
As the diff isn't always obvious here (and to aid eventual rebases),
here is a list of high-level changes done to the code:
* moved definitions and prototypes from vgic.c to vgic.h:
- VGIC_ADDR_UNDEF
- ACCESS_{READ,WRITE}_*
- vgic_init()
- vgic_update_state()
- vgic_kick_vcpus()
- vgic_get_vmcr()
- vgic_set_vmcr()
- struct mmio_range {} (renamed to struct kvm_mmio_range)
* removed static keyword and exported prototype in vgic.h:
- vgic_bitmap_get_reg()
- vgic_bitmap_set_irq_val()
- vgic_bitmap_get_shared_map()
- vgic_bytemap_get_reg()
- vgic_dist_irq_set_pending()
- vgic_dist_irq_clear_pending()
- vgic_cpu_irq_clear()
- vgic_reg_access()
- handle_mmio_raz_wi()
- vgic_handle_enable_reg()
- vgic_handle_set_pending_reg()
- vgic_handle_clear_pending_reg()
- vgic_handle_cfg_reg()
- vgic_unqueue_irqs()
- find_matching_range() (renamed to vgic_find_range)
- vgic_handle_mmio_range()
- vgic_update_state()
- vgic_get_vmcr()
- vgic_set_vmcr()
- vgic_queue_irq()
- vgic_kick_vcpus()
- vgic_init()
- vgic_v2_init_emulation()
- vgic_has_attr_regs()
- vgic_set_common_attr()
- vgic_get_common_attr()
- vgic_destroy()
- vgic_create()
* moved functions to vgic.h (static inline):
- mmio_data_read()
- mmio_data_write()
- is_in_range()

Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>

authored by

Andre Przywara and committed by
Christoffer Dall
83215812 b60da146

+170 -101
+50 -101
virt/kvm/arm/vgic.c
··· 75 75 * inactive as long as the external input line is held high. 76 76 */ 77 77 78 - #define VGIC_ADDR_UNDEF (-1) 79 - #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF) 78 + #include "vgic.h" 80 79 81 - #define PRODUCT_ID_KVM 0x4b /* ASCII code K */ 82 - #define IMPLEMENTER_ARM 0x43b 83 80 #define GICC_ARCH_VERSION_V2 0x2 84 81 85 - #define ACCESS_READ_VALUE (1 << 0) 86 - #define ACCESS_READ_RAZ (0 << 0) 87 - #define ACCESS_READ_MASK(x) ((x) & (1 << 0)) 88 - #define ACCESS_WRITE_IGNORED (0 << 1) 89 - #define ACCESS_WRITE_SETBIT (1 << 1) 90 - #define ACCESS_WRITE_CLEARBIT (2 << 1) 91 - #define ACCESS_WRITE_VALUE (3 << 1) 92 - #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1)) 93 - 94 - static int vgic_init(struct kvm *kvm); 95 82 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu); 96 83 static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu); 97 - static void vgic_update_state(struct kvm *kvm); 98 - static void vgic_kick_vcpus(struct kvm *kvm); 99 84 static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi); 100 85 static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg); 101 86 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr); 102 87 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc); 103 - static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); 104 - static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); 105 88 106 89 static const struct vgic_ops *vgic_ops; 107 90 static const struct vgic_params *vgic; ··· 158 175 return (unsigned long *)val; 159 176 } 160 177 161 - static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, 162 - int cpuid, u32 offset) 178 + u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset) 163 179 { 164 180 offset >>= 2; 165 181 if (!offset) ··· 176 194 return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared); 177 195 } 178 196 179 - static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid, 180 - int irq, int val) 197 + void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid, 198 + int irq, int val) 181 199 { 182 200 unsigned long *reg; 183 201 ··· 199 217 return x->private + cpuid; 200 218 } 201 219 202 - static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x) 220 + unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x) 203 221 { 204 222 return x->shared; 205 223 } ··· 226 244 b->shared = NULL; 227 245 } 228 246 229 - static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset) 247 + u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset) 230 248 { 231 249 u32 *reg; 232 250 ··· 323 341 return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq); 324 342 } 325 343 326 - static void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq) 344 + void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq) 327 345 { 328 346 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 329 347 330 348 vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1); 331 349 } 332 350 333 - static void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq) 351 + void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq) 334 352 { 335 353 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 336 354 ··· 346 364 vcpu->arch.vgic_cpu.pending_shared); 347 365 } 348 366 349 - static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq) 367 + void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq) 350 368 { 351 369 if (irq < VGIC_NR_PRIVATE_IRQS) 352 370 clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu); ··· 360 378 return vgic_irq_is_edge(vcpu, irq) || !vgic_irq_is_queued(vcpu, irq); 361 379 } 362 380 363 - static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask) 364 - { 365 - return le32_to_cpu(*((u32 *)mmio->data)) & mask; 366 - } 367 - 368 - static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value) 369 - { 370 - *((u32 *)mmio->data) = cpu_to_le32(value) & mask; 371 - } 372 - 373 381 /** 374 382 * vgic_reg_access - access vgic register 375 383 * @mmio: pointer to the data describing the mmio access ··· 371 399 * modes defined for vgic register access 372 400 * (read,raz,write-ignored,setbit,clearbit,write) 373 401 */ 374 - static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg, 375 - phys_addr_t offset, int mode) 402 + void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg, 403 + phys_addr_t offset, int mode) 376 404 { 377 405 int word_offset = (offset & 3) * 8; 378 406 u32 mask = (1UL << (mmio->len * 8)) - 1; ··· 456 484 return false; 457 485 } 458 486 459 - static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, 460 - struct kvm_exit_mmio *mmio, phys_addr_t offset) 487 + bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, 488 + phys_addr_t offset) 461 489 { 462 490 vgic_reg_access(mmio, NULL, offset, 463 491 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED); 464 492 return false; 465 493 } 466 494 467 - static bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, 468 - phys_addr_t offset, int vcpu_id, int access) 495 + bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, 496 + phys_addr_t offset, int vcpu_id, int access) 469 497 { 470 498 u32 *reg; 471 499 int mode = ACCESS_READ_VALUE | access; ··· 502 530 vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT); 503 531 } 504 532 505 - static bool vgic_handle_set_pending_reg(struct kvm *kvm, 506 - struct kvm_exit_mmio *mmio, 507 - phys_addr_t offset, int vcpu_id) 533 + bool vgic_handle_set_pending_reg(struct kvm *kvm, 534 + struct kvm_exit_mmio *mmio, 535 + phys_addr_t offset, int vcpu_id) 508 536 { 509 537 u32 *reg, orig; 510 538 u32 level_mask; ··· 539 567 return false; 540 568 } 541 569 542 - static bool vgic_handle_clear_pending_reg(struct kvm *kvm, 543 - struct kvm_exit_mmio *mmio, 544 - phys_addr_t offset, int vcpu_id) 570 + bool vgic_handle_clear_pending_reg(struct kvm *kvm, 571 + struct kvm_exit_mmio *mmio, 572 + phys_addr_t offset, int vcpu_id) 545 573 { 546 574 u32 *level_active; 547 575 u32 *reg, orig; ··· 713 741 * LSB is always 0. As such, we only keep the upper bit, and use the 714 742 * two above functions to compress/expand the bits 715 743 */ 716 - static bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio, 717 - phys_addr_t offset) 744 + bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio, 745 + phys_addr_t offset) 718 746 { 719 747 u32 val; 720 748 ··· 790 818 * to the distributor but the active state stays in the LRs, because we don't 791 819 * track the active state on the distributor side. 792 820 */ 793 - static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) 821 + void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) 794 822 { 795 823 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 796 824 int i; ··· 915 943 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false); 916 944 } 917 945 918 - /* 919 - * I would have liked to use the kvm_bus_io_*() API instead, but it 920 - * cannot cope with banked registers (only the VM pointer is passed 921 - * around, and we need the vcpu). One of these days, someone please 922 - * fix it! 923 - */ 924 - struct mmio_range { 925 - phys_addr_t base; 926 - unsigned long len; 927 - int bits_per_irq; 928 - bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, 929 - phys_addr_t offset); 930 - }; 931 - 932 - static const struct mmio_range vgic_dist_ranges[] = { 946 + static const struct kvm_mmio_range vgic_dist_ranges[] = { 933 947 { 934 948 .base = GIC_DIST_CTRL, 935 949 .len = 12, ··· 1000 1042 {} 1001 1043 }; 1002 1044 1003 - static const 1004 - struct mmio_range *find_matching_range(const struct mmio_range *ranges, 1045 + const 1046 + struct kvm_mmio_range *vgic_find_range(const struct kvm_mmio_range *ranges, 1005 1047 struct kvm_exit_mmio *mmio, 1006 1048 phys_addr_t offset) 1007 1049 { 1008 - const struct mmio_range *r = ranges; 1050 + const struct kvm_mmio_range *r = ranges; 1009 1051 1010 1052 while (r->len) { 1011 1053 if (offset >= r->base && ··· 1018 1060 } 1019 1061 1020 1062 static bool vgic_validate_access(const struct vgic_dist *dist, 1021 - const struct mmio_range *range, 1063 + const struct kvm_mmio_range *range, 1022 1064 unsigned long offset) 1023 1065 { 1024 1066 int irq; ··· 1046 1088 static bool call_range_handler(struct kvm_vcpu *vcpu, 1047 1089 struct kvm_exit_mmio *mmio, 1048 1090 unsigned long offset, 1049 - const struct mmio_range *range) 1091 + const struct kvm_mmio_range *range) 1050 1092 { 1051 1093 u32 *data32 = (void *)mmio->data; 1052 1094 struct kvm_exit_mmio mmio32; ··· 1090 1132 * 1091 1133 * returns true if the MMIO access could be performed 1092 1134 */ 1093 - static bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run, 1135 + bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run, 1094 1136 struct kvm_exit_mmio *mmio, 1095 - const struct mmio_range *ranges, 1137 + const struct kvm_mmio_range *ranges, 1096 1138 unsigned long mmio_base) 1097 1139 { 1098 - const struct mmio_range *range; 1140 + const struct kvm_mmio_range *range; 1099 1141 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 1100 1142 bool updated_state; 1101 1143 unsigned long offset; 1102 1144 1103 1145 offset = mmio->phys_addr - mmio_base; 1104 - range = find_matching_range(ranges, mmio, offset); 1146 + range = vgic_find_range(ranges, mmio, offset); 1105 1147 if (unlikely(!range || !range->handle_mmio)) { 1106 1148 pr_warn("Unhandled access %d %08llx %d\n", 1107 1149 mmio->is_write, mmio->phys_addr, mmio->len); ··· 1125 1167 vgic_kick_vcpus(vcpu->kvm); 1126 1168 1127 1169 return true; 1128 - } 1129 - 1130 - static inline bool is_in_range(phys_addr_t addr, unsigned long len, 1131 - phys_addr_t baseaddr, unsigned long size) 1132 - { 1133 - return (addr >= baseaddr) && (addr + len <= baseaddr + size); 1134 1170 } 1135 1171 1136 1172 static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, ··· 1253 1301 * Update the interrupt state and determine which CPUs have pending 1254 1302 * interrupts. Must be called with distributor lock held. 1255 1303 */ 1256 - static void vgic_update_state(struct kvm *kvm) 1304 + void vgic_update_state(struct kvm *kvm) 1257 1305 { 1258 1306 struct vgic_dist *dist = &kvm->arch.vgic; 1259 1307 struct kvm_vcpu *vcpu; ··· 1314 1362 vgic_ops->disable_underflow(vcpu); 1315 1363 } 1316 1364 1317 - static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) 1365 + void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) 1318 1366 { 1319 1367 vgic_ops->get_vmcr(vcpu, vmcr); 1320 1368 } 1321 1369 1322 - static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) 1370 + void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) 1323 1371 { 1324 1372 vgic_ops->set_vmcr(vcpu, vmcr); 1325 1373 } ··· 1369 1417 * Queue an interrupt to a CPU virtual interface. Return true on success, 1370 1418 * or false if it wasn't possible to queue it. 1371 1419 */ 1372 - static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) 1420 + bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) 1373 1421 { 1374 1422 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1375 1423 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; ··· 1655 1703 return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); 1656 1704 } 1657 1705 1658 - static void vgic_kick_vcpus(struct kvm *kvm) 1706 + void vgic_kick_vcpus(struct kvm *kvm) 1659 1707 { 1660 1708 struct kvm_vcpu *vcpu; 1661 1709 int c; ··· 1908 1956 * Allocate and initialize the various data structures. Must be called 1909 1957 * with kvm->lock held! 1910 1958 */ 1911 - static int vgic_init(struct kvm *kvm) 1959 + int vgic_init(struct kvm *kvm) 1912 1960 { 1913 1961 struct vgic_dist *dist = &kvm->arch.vgic; 1914 1962 struct kvm_vcpu *vcpu; ··· 2048 2096 return ret; 2049 2097 } 2050 2098 2051 - static void vgic_v2_init_emulation(struct kvm *kvm) 2099 + void vgic_v2_init_emulation(struct kvm *kvm) 2052 2100 { 2053 2101 struct vgic_dist *dist = &kvm->arch.vgic; 2054 2102 ··· 2278 2326 * CPU Interface Register accesses - these are not accessed by the VM, but by 2279 2327 * user space for saving and restoring VGIC state. 2280 2328 */ 2281 - static const struct mmio_range vgic_cpu_ranges[] = { 2329 + static const struct kvm_mmio_range vgic_cpu_ranges[] = { 2282 2330 { 2283 2331 .base = GIC_CPU_CTRL, 2284 2332 .len = 12, ··· 2305 2353 struct kvm_device_attr *attr, 2306 2354 u32 *reg, bool is_write) 2307 2355 { 2308 - const struct mmio_range *r = NULL, *ranges; 2356 + const struct kvm_mmio_range *r = NULL, *ranges; 2309 2357 phys_addr_t offset; 2310 2358 int ret, cpuid, c; 2311 2359 struct kvm_vcpu *vcpu, *tmp_vcpu; ··· 2346 2394 default: 2347 2395 BUG(); 2348 2396 } 2349 - r = find_matching_range(ranges, &mmio, offset); 2397 + r = vgic_find_range(ranges, &mmio, offset); 2350 2398 2351 2399 if (unlikely(!r || !r->handle_mmio)) { 2352 2400 ret = -ENXIO; ··· 2392 2440 return ret; 2393 2441 } 2394 2442 2395 - static int vgic_set_common_attr(struct kvm_device *dev, 2396 - struct kvm_device_attr *attr) 2443 + int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 2397 2444 { 2398 2445 int r; 2399 2446 ··· 2476 2525 return -ENXIO; 2477 2526 } 2478 2527 2479 - static int vgic_get_common_attr(struct kvm_device *dev, 2480 - struct kvm_device_attr *attr) 2528 + int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 2481 2529 { 2482 2530 int r = -ENXIO; 2483 2531 ··· 2531 2581 return -ENXIO; 2532 2582 } 2533 2583 2534 - static int vgic_has_attr_regs(const struct mmio_range *ranges, 2535 - phys_addr_t offset) 2584 + int vgic_has_attr_regs(const struct kvm_mmio_range *ranges, phys_addr_t offset) 2536 2585 { 2537 2586 struct kvm_exit_mmio dev_attr_mmio; 2538 2587 2539 2588 dev_attr_mmio.len = 4; 2540 - if (find_matching_range(ranges, &dev_attr_mmio, offset)) 2589 + if (vgic_find_range(ranges, &dev_attr_mmio, offset)) 2541 2590 return 0; 2542 2591 else 2543 2592 return -ENXIO; ··· 2571 2622 return -ENXIO; 2572 2623 } 2573 2624 2574 - static void vgic_destroy(struct kvm_device *dev) 2625 + void vgic_destroy(struct kvm_device *dev) 2575 2626 { 2576 2627 kfree(dev); 2577 2628 } 2578 2629 2579 - static int vgic_create(struct kvm_device *dev, u32 type) 2630 + int vgic_create(struct kvm_device *dev, u32 type) 2580 2631 { 2581 2632 return kvm_vgic_create(dev->kvm, type); 2582 2633 }
+120
virt/kvm/arm/vgic.h
··· 1 + /* 2 + * Copyright (C) 2012-2014 ARM Ltd. 3 + * Author: Marc Zyngier <marc.zyngier@arm.com> 4 + * 5 + * Derived from virt/kvm/arm/vgic.c 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + * 16 + * You should have received a copy of the GNU General Public License 17 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 + */ 19 + 20 + #ifndef __KVM_VGIC_H__ 21 + #define __KVM_VGIC_H__ 22 + 23 + #define VGIC_ADDR_UNDEF (-1) 24 + #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF) 25 + 26 + #define PRODUCT_ID_KVM 0x4b /* ASCII code K */ 27 + #define IMPLEMENTER_ARM 0x43b 28 + 29 + #define ACCESS_READ_VALUE (1 << 0) 30 + #define ACCESS_READ_RAZ (0 << 0) 31 + #define ACCESS_READ_MASK(x) ((x) & (1 << 0)) 32 + #define ACCESS_WRITE_IGNORED (0 << 1) 33 + #define ACCESS_WRITE_SETBIT (1 << 1) 34 + #define ACCESS_WRITE_CLEARBIT (2 << 1) 35 + #define ACCESS_WRITE_VALUE (3 << 1) 36 + #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1)) 37 + 38 + unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x); 39 + 40 + void vgic_update_state(struct kvm *kvm); 41 + int vgic_init_common_maps(struct kvm *kvm); 42 + 43 + u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset); 44 + u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset); 45 + 46 + void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq); 47 + void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq); 48 + void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq); 49 + void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid, 50 + int irq, int val); 51 + 52 + void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); 53 + void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); 54 + 55 + bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq); 56 + void vgic_unqueue_irqs(struct kvm_vcpu *vcpu); 57 + 58 + void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg, 59 + phys_addr_t offset, int mode); 60 + bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, 61 + phys_addr_t offset); 62 + 63 + static inline 64 + u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask) 65 + { 66 + return le32_to_cpu(*((u32 *)mmio->data)) & mask; 67 + } 68 + 69 + static inline 70 + void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value) 71 + { 72 + *((u32 *)mmio->data) = cpu_to_le32(value) & mask; 73 + } 74 + 75 + struct kvm_mmio_range { 76 + phys_addr_t base; 77 + unsigned long len; 78 + int bits_per_irq; 79 + bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, 80 + phys_addr_t offset); 81 + }; 82 + 83 + static inline bool is_in_range(phys_addr_t addr, unsigned long len, 84 + phys_addr_t baseaddr, unsigned long size) 85 + { 86 + return (addr >= baseaddr) && (addr + len <= baseaddr + size); 87 + } 88 + 89 + const 90 + struct kvm_mmio_range *vgic_find_range(const struct kvm_mmio_range *ranges, 91 + struct kvm_exit_mmio *mmio, 92 + phys_addr_t offset); 93 + 94 + bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run, 95 + struct kvm_exit_mmio *mmio, 96 + const struct kvm_mmio_range *ranges, 97 + unsigned long mmio_base); 98 + 99 + bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, 100 + phys_addr_t offset, int vcpu_id, int access); 101 + 102 + bool vgic_handle_set_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, 103 + phys_addr_t offset, int vcpu_id); 104 + 105 + bool vgic_handle_clear_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, 106 + phys_addr_t offset, int vcpu_id); 107 + 108 + bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio, 109 + phys_addr_t offset); 110 + 111 + void vgic_kick_vcpus(struct kvm *kvm); 112 + 113 + int vgic_has_attr_regs(const struct kvm_mmio_range *ranges, phys_addr_t offset); 114 + int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr); 115 + int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr); 116 + 117 + int vgic_init(struct kvm *kvm); 118 + void vgic_v2_init_emulation(struct kvm *kvm); 119 + 120 + #endif