KVM: Per-vcpu inodes

Allocate a distinct inode for every vcpu in a VM. This has the following
benefits:

- the filp cachelines are no longer bounced when f_count is incremented on
every ioctl()
- the API and internal code are distinctly clearer; for example, on the
KVM_GET_REGS ioctl, there is no need to copy the vcpu number from
userspace and then copy the registers back; the vcpu identity is derived
from the fd used to make the call

Right now the performance benefits are completely theoretical since (a) we
don't support more than one vcpu per VM and (b) virtualization hardware
inefficiencies completely everwhelm any cacheline bouncing effects. But
both of these will change, and we need to prepare the API today.

Signed-off-by: Avi Kivity <avi@qumranet.com>

+172 -142
+2 -1
drivers/kvm/kvm.h
··· 309 309 int busy; 310 310 unsigned long rmap_overflow; 311 311 struct list_head vm_list; 312 + struct file *filp; 312 313 }; 313 314 314 315 struct kvm_stat { ··· 344 343 int (*vcpu_create)(struct kvm_vcpu *vcpu); 345 344 void (*vcpu_free)(struct kvm_vcpu *vcpu); 346 345 347 - struct kvm_vcpu *(*vcpu_load)(struct kvm_vcpu *vcpu); 346 + void (*vcpu_load)(struct kvm_vcpu *vcpu); 348 347 void (*vcpu_put)(struct kvm_vcpu *vcpu); 349 348 void (*vcpu_decache)(struct kvm_vcpu *vcpu); 350 349
+151 -116
drivers/kvm/kvm_main.c
··· 96 96 97 97 #endif 98 98 99 + static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 100 + unsigned long arg); 101 + 99 102 static struct inode *kvmfs_inode(struct file_operations *fops) 100 103 { 101 104 int error = -ENOMEM; ··· 249 246 } 250 247 EXPORT_SYMBOL_GPL(kvm_write_guest); 251 248 252 - static int vcpu_slot(struct kvm_vcpu *vcpu) 253 - { 254 - return vcpu - vcpu->kvm->vcpus; 255 - } 256 - 257 249 /* 258 250 * Switches to specified vcpu, until a matching vcpu_put() 259 251 */ 260 - static struct kvm_vcpu *vcpu_load(struct kvm *kvm, int vcpu_slot) 252 + static void vcpu_load(struct kvm_vcpu *vcpu) 261 253 { 262 - struct kvm_vcpu *vcpu = &kvm->vcpus[vcpu_slot]; 254 + mutex_lock(&vcpu->mutex); 255 + kvm_arch_ops->vcpu_load(vcpu); 256 + } 257 + 258 + /* 259 + * Switches to specified vcpu, until a matching vcpu_put(). Will return NULL 260 + * if the slot is not populated. 261 + */ 262 + static struct kvm_vcpu *vcpu_load_slot(struct kvm *kvm, int slot) 263 + { 264 + struct kvm_vcpu *vcpu = &kvm->vcpus[slot]; 263 265 264 266 mutex_lock(&vcpu->mutex); 265 - if (unlikely(!vcpu->vmcs)) { 267 + if (!vcpu->vmcs) { 266 268 mutex_unlock(&vcpu->mutex); 267 269 return NULL; 268 270 } 269 - return kvm_arch_ops->vcpu_load(vcpu); 271 + kvm_arch_ops->vcpu_load(vcpu); 272 + return vcpu; 270 273 } 271 274 272 275 static void vcpu_put(struct kvm_vcpu *vcpu) ··· 345 336 346 337 static void kvm_free_vcpu(struct kvm_vcpu *vcpu) 347 338 { 348 - if (!vcpu_load(vcpu->kvm, vcpu_slot(vcpu))) 339 + if (!vcpu->vmcs) 349 340 return; 350 341 342 + vcpu_load(vcpu); 351 343 kvm_mmu_destroy(vcpu); 352 344 vcpu_put(vcpu); 353 345 kvm_arch_ops->vcpu_free(vcpu); ··· 735 725 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 736 726 struct kvm_vcpu *vcpu; 737 727 738 - vcpu = vcpu_load(kvm, i); 728 + vcpu = vcpu_load_slot(kvm, i); 739 729 if (!vcpu) 740 730 continue; 741 731 kvm_mmu_reset_context(vcpu); ··· 801 791 if (any) { 802 792 cleared = 0; 803 793 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 804 - struct kvm_vcpu *vcpu = vcpu_load(kvm, i); 794 + struct kvm_vcpu *vcpu; 805 795 796 + vcpu = vcpu_load_slot(kvm, i); 806 797 if (!vcpu) 807 798 continue; 808 799 if (!cleared) { ··· 1472 1461 { 1473 1462 vcpu_put(vcpu); 1474 1463 cond_resched(); 1475 - /* Cannot fail - no vcpu unplug yet. */ 1476 - vcpu_load(vcpu->kvm, vcpu_slot(vcpu)); 1464 + vcpu_load(vcpu); 1477 1465 } 1478 1466 EXPORT_SYMBOL_GPL(kvm_resched); 1479 1467 ··· 1494 1484 } 1495 1485 EXPORT_SYMBOL_GPL(save_msrs); 1496 1486 1497 - static int kvm_vm_ioctl_run(struct kvm *kvm, struct kvm_run *kvm_run) 1487 + static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1498 1488 { 1499 - struct kvm_vcpu *vcpu; 1500 1489 int r; 1501 1490 1502 - if (!valid_vcpu(kvm_run->vcpu)) 1503 - return -EINVAL; 1504 - 1505 - vcpu = vcpu_load(kvm, kvm_run->vcpu); 1506 - if (!vcpu) 1507 - return -ENOENT; 1491 + vcpu_load(vcpu); 1508 1492 1509 1493 /* re-sync apic's tpr */ 1510 1494 vcpu->cr8 = kvm_run->cr8; ··· 1521 1517 return r; 1522 1518 } 1523 1519 1524 - static int kvm_vm_ioctl_get_regs(struct kvm *kvm, struct kvm_regs *regs) 1520 + static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, 1521 + struct kvm_regs *regs) 1525 1522 { 1526 - struct kvm_vcpu *vcpu; 1527 - 1528 - if (!valid_vcpu(regs->vcpu)) 1529 - return -EINVAL; 1530 - 1531 - vcpu = vcpu_load(kvm, regs->vcpu); 1532 - if (!vcpu) 1533 - return -ENOENT; 1523 + vcpu_load(vcpu); 1534 1524 1535 1525 kvm_arch_ops->cache_regs(vcpu); 1536 1526 ··· 1561 1563 return 0; 1562 1564 } 1563 1565 1564 - static int kvm_vm_ioctl_set_regs(struct kvm *kvm, struct kvm_regs *regs) 1566 + static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, 1567 + struct kvm_regs *regs) 1565 1568 { 1566 - struct kvm_vcpu *vcpu; 1567 - 1568 - if (!valid_vcpu(regs->vcpu)) 1569 - return -EINVAL; 1570 - 1571 - vcpu = vcpu_load(kvm, regs->vcpu); 1572 - if (!vcpu) 1573 - return -ENOENT; 1569 + vcpu_load(vcpu); 1574 1570 1575 1571 vcpu->regs[VCPU_REGS_RAX] = regs->rax; 1576 1572 vcpu->regs[VCPU_REGS_RBX] = regs->rbx; ··· 1601 1609 return kvm_arch_ops->get_segment(vcpu, var, seg); 1602 1610 } 1603 1611 1604 - static int kvm_vm_ioctl_get_sregs(struct kvm *kvm, struct kvm_sregs *sregs) 1612 + static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 1613 + struct kvm_sregs *sregs) 1605 1614 { 1606 - struct kvm_vcpu *vcpu; 1607 1615 struct descriptor_table dt; 1608 1616 1609 - if (!valid_vcpu(sregs->vcpu)) 1610 - return -EINVAL; 1611 - vcpu = vcpu_load(kvm, sregs->vcpu); 1612 - if (!vcpu) 1613 - return -ENOENT; 1617 + vcpu_load(vcpu); 1614 1618 1615 1619 get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 1616 1620 get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); ··· 1648 1660 return kvm_arch_ops->set_segment(vcpu, var, seg); 1649 1661 } 1650 1662 1651 - static int kvm_vm_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs) 1663 + static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 1664 + struct kvm_sregs *sregs) 1652 1665 { 1653 - struct kvm_vcpu *vcpu; 1654 1666 int mmu_reset_needed = 0; 1655 1667 int i; 1656 1668 struct descriptor_table dt; 1657 1669 1658 - if (!valid_vcpu(sregs->vcpu)) 1659 - return -EINVAL; 1660 - vcpu = vcpu_load(kvm, sregs->vcpu); 1661 - if (!vcpu) 1662 - return -ENOENT; 1670 + vcpu_load(vcpu); 1663 1671 1664 1672 set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 1665 1673 set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); ··· 1761 1777 * 1762 1778 * @return number of msrs set successfully. 1763 1779 */ 1764 - static int __msr_io(struct kvm *kvm, struct kvm_msrs *msrs, 1780 + static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, 1765 1781 struct kvm_msr_entry *entries, 1766 1782 int (*do_msr)(struct kvm_vcpu *vcpu, 1767 1783 unsigned index, u64 *data)) 1768 1784 { 1769 - struct kvm_vcpu *vcpu; 1770 1785 int i; 1771 1786 1772 - if (!valid_vcpu(msrs->vcpu)) 1773 - return -EINVAL; 1774 - 1775 - vcpu = vcpu_load(kvm, msrs->vcpu); 1776 - if (!vcpu) 1777 - return -ENOENT; 1787 + vcpu_load(vcpu); 1778 1788 1779 1789 for (i = 0; i < msrs->nmsrs; ++i) 1780 1790 if (do_msr(vcpu, entries[i].index, &entries[i].data)) ··· 1784 1806 * 1785 1807 * @return number of msrs set successfully. 1786 1808 */ 1787 - static int msr_io(struct kvm *kvm, struct kvm_msrs __user *user_msrs, 1809 + static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, 1788 1810 int (*do_msr)(struct kvm_vcpu *vcpu, 1789 1811 unsigned index, u64 *data), 1790 1812 int writeback) ··· 1812 1834 if (copy_from_user(entries, user_msrs->entries, size)) 1813 1835 goto out_free; 1814 1836 1815 - r = n = __msr_io(kvm, &msrs, entries, do_msr); 1837 + r = n = __msr_io(vcpu, &msrs, entries, do_msr); 1816 1838 if (r < 0) 1817 1839 goto out_free; 1818 1840 ··· 1831 1853 /* 1832 1854 * Translate a guest virtual address to a guest physical address. 1833 1855 */ 1834 - static int kvm_vm_ioctl_translate(struct kvm *kvm, struct kvm_translation *tr) 1856 + static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 1857 + struct kvm_translation *tr) 1835 1858 { 1836 1859 unsigned long vaddr = tr->linear_address; 1837 - struct kvm_vcpu *vcpu; 1838 1860 gpa_t gpa; 1839 1861 1840 - vcpu = vcpu_load(kvm, tr->vcpu); 1841 - if (!vcpu) 1842 - return -ENOENT; 1843 - spin_lock(&kvm->lock); 1862 + vcpu_load(vcpu); 1863 + spin_lock(&vcpu->kvm->lock); 1844 1864 gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr); 1845 1865 tr->physical_address = gpa; 1846 1866 tr->valid = gpa != UNMAPPED_GVA; 1847 1867 tr->writeable = 1; 1848 1868 tr->usermode = 0; 1849 - spin_unlock(&kvm->lock); 1869 + spin_unlock(&vcpu->kvm->lock); 1850 1870 vcpu_put(vcpu); 1851 1871 1852 1872 return 0; 1853 1873 } 1854 1874 1855 - static int kvm_vm_ioctl_interrupt(struct kvm *kvm, struct kvm_interrupt *irq) 1875 + static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, 1876 + struct kvm_interrupt *irq) 1856 1877 { 1857 - struct kvm_vcpu *vcpu; 1858 - 1859 - if (!valid_vcpu(irq->vcpu)) 1860 - return -EINVAL; 1861 1878 if (irq->irq < 0 || irq->irq >= 256) 1862 1879 return -EINVAL; 1863 - vcpu = vcpu_load(kvm, irq->vcpu); 1864 - if (!vcpu) 1865 - return -ENOENT; 1880 + vcpu_load(vcpu); 1866 1881 1867 1882 set_bit(irq->irq, vcpu->irq_pending); 1868 1883 set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary); ··· 1865 1894 return 0; 1866 1895 } 1867 1896 1868 - static int kvm_vm_ioctl_debug_guest(struct kvm *kvm, 1869 - struct kvm_debug_guest *dbg) 1897 + static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, 1898 + struct kvm_debug_guest *dbg) 1870 1899 { 1871 - struct kvm_vcpu *vcpu; 1872 1900 int r; 1873 1901 1874 - if (!valid_vcpu(dbg->vcpu)) 1875 - return -EINVAL; 1876 - vcpu = vcpu_load(kvm, dbg->vcpu); 1877 - if (!vcpu) 1878 - return -ENOENT; 1902 + vcpu_load(vcpu); 1879 1903 1880 1904 r = kvm_arch_ops->set_guest_debug(vcpu, dbg); 1881 1905 1882 1906 vcpu_put(vcpu); 1883 1907 1908 + return r; 1909 + } 1910 + 1911 + static int kvm_vcpu_release(struct inode *inode, struct file *filp) 1912 + { 1913 + struct kvm_vcpu *vcpu = filp->private_data; 1914 + 1915 + fput(vcpu->kvm->filp); 1916 + return 0; 1917 + } 1918 + 1919 + static struct file_operations kvm_vcpu_fops = { 1920 + .release = kvm_vcpu_release, 1921 + .unlocked_ioctl = kvm_vcpu_ioctl, 1922 + .compat_ioctl = kvm_vcpu_ioctl, 1923 + }; 1924 + 1925 + /* 1926 + * Allocates an inode for the vcpu. 1927 + */ 1928 + static int create_vcpu_fd(struct kvm_vcpu *vcpu) 1929 + { 1930 + int fd, r; 1931 + struct inode *inode; 1932 + struct file *file; 1933 + 1934 + atomic_inc(&vcpu->kvm->filp->f_count); 1935 + inode = kvmfs_inode(&kvm_vcpu_fops); 1936 + if (IS_ERR(inode)) { 1937 + r = PTR_ERR(inode); 1938 + goto out1; 1939 + } 1940 + 1941 + file = kvmfs_file(inode, vcpu); 1942 + if (IS_ERR(file)) { 1943 + r = PTR_ERR(file); 1944 + goto out2; 1945 + } 1946 + 1947 + r = get_unused_fd(); 1948 + if (r < 0) 1949 + goto out3; 1950 + fd = r; 1951 + fd_install(fd, file); 1952 + 1953 + return fd; 1954 + 1955 + out3: 1956 + fput(file); 1957 + out2: 1958 + iput(inode); 1959 + out1: 1960 + fput(vcpu->kvm->filp); 1884 1961 return r; 1885 1962 } 1886 1963 ··· 1974 1955 if (r < 0) 1975 1956 goto out_free_vcpus; 1976 1957 1977 - return 0; 1958 + r = create_vcpu_fd(vcpu); 1959 + if (r < 0) 1960 + goto out_free_vcpus; 1961 + 1962 + return r; 1978 1963 1979 1964 out_free_vcpus: 1980 1965 kvm_free_vcpu(vcpu); ··· 1987 1964 return r; 1988 1965 } 1989 1966 1990 - static long kvm_vm_ioctl(struct file *filp, 1991 - unsigned int ioctl, unsigned long arg) 1967 + static long kvm_vcpu_ioctl(struct file *filp, 1968 + unsigned int ioctl, unsigned long arg) 1992 1969 { 1993 - struct kvm *kvm = filp->private_data; 1970 + struct kvm_vcpu *vcpu = filp->private_data; 1994 1971 void __user *argp = (void __user *)arg; 1995 1972 int r = -EINVAL; 1996 1973 1997 1974 switch (ioctl) { 1998 - case KVM_CREATE_VCPU: 1999 - r = kvm_vm_ioctl_create_vcpu(kvm, arg); 2000 - if (r) 2001 - goto out; 2002 - break; 2003 1975 case KVM_RUN: { 2004 1976 struct kvm_run kvm_run; 2005 1977 2006 1978 r = -EFAULT; 2007 1979 if (copy_from_user(&kvm_run, argp, sizeof kvm_run)) 2008 1980 goto out; 2009 - r = kvm_vm_ioctl_run(kvm, &kvm_run); 1981 + r = kvm_vcpu_ioctl_run(vcpu, &kvm_run); 2010 1982 if (r < 0 && r != -EINTR) 2011 1983 goto out; 2012 1984 if (copy_to_user(argp, &kvm_run, sizeof kvm_run)) { ··· 2013 1995 case KVM_GET_REGS: { 2014 1996 struct kvm_regs kvm_regs; 2015 1997 2016 - r = -EFAULT; 2017 - if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs)) 2018 - goto out; 2019 - r = kvm_vm_ioctl_get_regs(kvm, &kvm_regs); 1998 + memset(&kvm_regs, 0, sizeof kvm_regs); 1999 + r = kvm_vcpu_ioctl_get_regs(vcpu, &kvm_regs); 2020 2000 if (r) 2021 2001 goto out; 2022 2002 r = -EFAULT; ··· 2029 2013 r = -EFAULT; 2030 2014 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs)) 2031 2015 goto out; 2032 - r = kvm_vm_ioctl_set_regs(kvm, &kvm_regs); 2016 + r = kvm_vcpu_ioctl_set_regs(vcpu, &kvm_regs); 2033 2017 if (r) 2034 2018 goto out; 2035 2019 r = 0; ··· 2038 2022 case KVM_GET_SREGS: { 2039 2023 struct kvm_sregs kvm_sregs; 2040 2024 2041 - r = -EFAULT; 2042 - if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs)) 2043 - goto out; 2044 - r = kvm_vm_ioctl_get_sregs(kvm, &kvm_sregs); 2025 + memset(&kvm_sregs, 0, sizeof kvm_sregs); 2026 + r = kvm_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs); 2045 2027 if (r) 2046 2028 goto out; 2047 2029 r = -EFAULT; ··· 2054 2040 r = -EFAULT; 2055 2041 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs)) 2056 2042 goto out; 2057 - r = kvm_vm_ioctl_set_sregs(kvm, &kvm_sregs); 2043 + r = kvm_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs); 2058 2044 if (r) 2059 2045 goto out; 2060 2046 r = 0; ··· 2066 2052 r = -EFAULT; 2067 2053 if (copy_from_user(&tr, argp, sizeof tr)) 2068 2054 goto out; 2069 - r = kvm_vm_ioctl_translate(kvm, &tr); 2055 + r = kvm_vcpu_ioctl_translate(vcpu, &tr); 2070 2056 if (r) 2071 2057 goto out; 2072 2058 r = -EFAULT; ··· 2081 2067 r = -EFAULT; 2082 2068 if (copy_from_user(&irq, argp, sizeof irq)) 2083 2069 goto out; 2084 - r = kvm_vm_ioctl_interrupt(kvm, &irq); 2070 + r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 2085 2071 if (r) 2086 2072 goto out; 2087 2073 r = 0; ··· 2093 2079 r = -EFAULT; 2094 2080 if (copy_from_user(&dbg, argp, sizeof dbg)) 2095 2081 goto out; 2096 - r = kvm_vm_ioctl_debug_guest(kvm, &dbg); 2082 + r = kvm_vcpu_ioctl_debug_guest(vcpu, &dbg); 2097 2083 if (r) 2098 2084 goto out; 2099 2085 r = 0; 2100 2086 break; 2101 2087 } 2088 + case KVM_GET_MSRS: 2089 + r = msr_io(vcpu, argp, get_msr, 1); 2090 + break; 2091 + case KVM_SET_MSRS: 2092 + r = msr_io(vcpu, argp, do_set_msr, 0); 2093 + break; 2094 + default: 2095 + ; 2096 + } 2097 + out: 2098 + return r; 2099 + } 2100 + 2101 + static long kvm_vm_ioctl(struct file *filp, 2102 + unsigned int ioctl, unsigned long arg) 2103 + { 2104 + struct kvm *kvm = filp->private_data; 2105 + void __user *argp = (void __user *)arg; 2106 + int r = -EINVAL; 2107 + 2108 + switch (ioctl) { 2109 + case KVM_CREATE_VCPU: 2110 + r = kvm_vm_ioctl_create_vcpu(kvm, arg); 2111 + if (r < 0) 2112 + goto out; 2113 + break; 2102 2114 case KVM_SET_MEMORY_REGION: { 2103 2115 struct kvm_memory_region kvm_mem; 2104 2116 ··· 2147 2107 goto out; 2148 2108 break; 2149 2109 } 2150 - case KVM_GET_MSRS: 2151 - r = msr_io(kvm, argp, get_msr, 1); 2152 - break; 2153 - case KVM_SET_MSRS: 2154 - r = msr_io(kvm, argp, do_set_msr, 0); 2155 - break; 2156 2110 default: 2157 2111 ; 2158 2112 } ··· 2216 2182 r = PTR_ERR(file); 2217 2183 goto out3; 2218 2184 } 2185 + kvm->filp = file; 2219 2186 2220 2187 r = get_unused_fd(); 2221 2188 if (r < 0)
+1 -2
drivers/kvm/svm.c
··· 600 600 kfree(vcpu->svm); 601 601 } 602 602 603 - static struct kvm_vcpu *svm_vcpu_load(struct kvm_vcpu *vcpu) 603 + static void svm_vcpu_load(struct kvm_vcpu *vcpu) 604 604 { 605 605 get_cpu(); 606 - return vcpu; 607 606 } 608 607 609 608 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
+1 -2
drivers/kvm/vmx.c
··· 204 204 * Switches to specified vcpu, until a matching vcpu_put(), but assumes 205 205 * vcpu mutex is already taken. 206 206 */ 207 - static struct kvm_vcpu *vmx_vcpu_load(struct kvm_vcpu *vcpu) 207 + static void vmx_vcpu_load(struct kvm_vcpu *vcpu) 208 208 { 209 209 u64 phys_addr = __pa(vcpu->vmcs); 210 210 int cpu; ··· 242 242 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); 243 243 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ 244 244 } 245 - return vcpu; 246 245 } 247 246 248 247 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
+17 -21
include/linux/kvm.h
··· 52 52 /* for KVM_RUN */ 53 53 struct kvm_run { 54 54 /* in */ 55 - __u32 vcpu; 56 55 __u32 emulated; /* skip current instruction */ 57 56 __u32 mmio_completed; /* mmio request completed */ 58 57 __u8 request_interrupt_window; 59 - __u8 padding1[3]; 58 + __u8 padding1[7]; 60 59 61 60 /* out */ 62 61 __u32 exit_type; ··· 110 111 111 112 /* for KVM_GET_REGS and KVM_SET_REGS */ 112 113 struct kvm_regs { 113 - /* in */ 114 - __u32 vcpu; 115 - __u32 padding; 116 - 117 114 /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ 118 115 __u64 rax, rbx, rcx, rdx; 119 116 __u64 rsi, rdi, rsp, rbp; ··· 136 141 137 142 /* for KVM_GET_SREGS and KVM_SET_SREGS */ 138 143 struct kvm_sregs { 139 - /* in */ 140 - __u32 vcpu; 141 - __u32 padding; 142 - 143 144 /* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */ 144 145 struct kvm_segment cs, ds, es, fs, gs, ss; 145 146 struct kvm_segment tr, ldt; ··· 154 163 155 164 /* for KVM_GET_MSRS and KVM_SET_MSRS */ 156 165 struct kvm_msrs { 157 - __u32 vcpu; 158 166 __u32 nmsrs; /* number of msrs in entries */ 167 + __u32 pad; 159 168 160 169 struct kvm_msr_entry entries[0]; 161 170 }; ··· 170 179 struct kvm_translation { 171 180 /* in */ 172 181 __u64 linear_address; 173 - __u32 vcpu; 174 - __u32 padding; 175 182 176 183 /* out */ 177 184 __u64 physical_address; ··· 182 193 /* for KVM_INTERRUPT */ 183 194 struct kvm_interrupt { 184 195 /* in */ 185 - __u32 vcpu; 186 196 __u32 irq; 187 197 }; 188 198 ··· 194 206 /* for KVM_DEBUG_GUEST */ 195 207 struct kvm_debug_guest { 196 208 /* int */ 197 - __u32 vcpu; 198 209 __u32 enabled; 210 + __u32 pad; 199 211 struct kvm_breakpoint breakpoints[4]; 200 212 __u32 singlestep; 201 213 }; ··· 222 234 /* 223 235 * ioctls for VM fds 224 236 */ 237 + #define KVM_SET_MEMORY_REGION _IOW(KVMIO, 10, struct kvm_memory_region) 238 + /* 239 + * KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns 240 + * a vcpu fd. 241 + */ 242 + #define KVM_CREATE_VCPU _IOW(KVMIO, 11, int) 243 + #define KVM_GET_DIRTY_LOG _IOW(KVMIO, 12, struct kvm_dirty_log) 244 + 245 + /* 246 + * ioctls for vcpu fds 247 + */ 225 248 #define KVM_RUN _IOWR(KVMIO, 2, struct kvm_run) 226 - #define KVM_GET_REGS _IOWR(KVMIO, 3, struct kvm_regs) 249 + #define KVM_GET_REGS _IOR(KVMIO, 3, struct kvm_regs) 227 250 #define KVM_SET_REGS _IOW(KVMIO, 4, struct kvm_regs) 228 - #define KVM_GET_SREGS _IOWR(KVMIO, 5, struct kvm_sregs) 251 + #define KVM_GET_SREGS _IOR(KVMIO, 5, struct kvm_sregs) 229 252 #define KVM_SET_SREGS _IOW(KVMIO, 6, struct kvm_sregs) 230 253 #define KVM_TRANSLATE _IOWR(KVMIO, 7, struct kvm_translation) 231 254 #define KVM_INTERRUPT _IOW(KVMIO, 8, struct kvm_interrupt) 232 255 #define KVM_DEBUG_GUEST _IOW(KVMIO, 9, struct kvm_debug_guest) 233 - #define KVM_SET_MEMORY_REGION _IOW(KVMIO, 10, struct kvm_memory_region) 234 - #define KVM_CREATE_VCPU _IOW(KVMIO, 11, int /* vcpu_slot */) 235 - #define KVM_GET_DIRTY_LOG _IOW(KVMIO, 12, struct kvm_dirty_log) 236 256 #define KVM_GET_MSRS _IOWR(KVMIO, 13, struct kvm_msrs) 237 - #define KVM_SET_MSRS _IOWR(KVMIO, 14, struct kvm_msrs) 257 + #define KVM_SET_MSRS _IOW(KVMIO, 14, struct kvm_msrs) 238 258 239 259 #endif