Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Book3S HV: Provide a method for userspace to read and write the HPT

A new ioctl, KVM_PPC_GET_HTAB_FD, returns a file descriptor. Reads on
this fd return the contents of the HPT (hashed page table), writes
create and/or remove entries in the HPT. There is a new capability,
KVM_CAP_PPC_HTAB_FD, to indicate the presence of the ioctl. The ioctl
takes an argument structure with the index of the first HPT entry to
read out and a set of flags. The flags indicate whether the user is
intending to read or write the HPT, and whether to return all entries
or only the "bolted" entries (those with the bolted bit, 0x10, set in
the first doubleword).

This is intended for use in implementing qemu's savevm/loadvm and for
live migration. Therefore, on reads, the first pass returns information
about all HPTEs (or all bolted HPTEs). When the first pass reaches the
end of the HPT, it returns from the read. Subsequent reads only return
information about HPTEs that have changed since they were last read.
A read that finds no changed HPTEs in the HPT following where the last
read finished will return 0 bytes.

The format of the data provides a simple run-length compression of the
invalid entries. Each block of data starts with a header that indicates
the index (position in the HPT, which is just an array), the number of
valid entries starting at that index (may be zero), and the number of
invalid entries following those valid entries. The valid entries, 16
bytes each, follow the header. The invalid entries are not explicitly
represented.

Signed-off-by: Paul Mackerras <paulus@samba.org>
[agraf: fix documentation]
Signed-off-by: Alexander Graf <agraf@suse.de>

authored by

Paul Mackerras and committed by
Alexander Graf
a2932923 6b445ad4

+467 -12
+54
Documentation/virtual/kvm/api.txt
··· 2071 2071 2072 2072 Note that the vcpu ioctl is asynchronous to vcpu execution. 2073 2073 2074 + 4.78 KVM_PPC_GET_HTAB_FD 2075 + 2076 + Capability: KVM_CAP_PPC_HTAB_FD 2077 + Architectures: powerpc 2078 + Type: vm ioctl 2079 + Parameters: Pointer to struct kvm_get_htab_fd (in) 2080 + Returns: file descriptor number (>= 0) on success, -1 on error 2081 + 2082 + This returns a file descriptor that can be used either to read out the 2083 + entries in the guest's hashed page table (HPT), or to write entries to 2084 + initialize the HPT. The returned fd can only be written to if the 2085 + KVM_GET_HTAB_WRITE bit is set in the flags field of the argument, and 2086 + can only be read if that bit is clear. The argument struct looks like 2087 + this: 2088 + 2089 + /* For KVM_PPC_GET_HTAB_FD */ 2090 + struct kvm_get_htab_fd { 2091 + __u64 flags; 2092 + __u64 start_index; 2093 + __u64 reserved[2]; 2094 + }; 2095 + 2096 + /* Values for kvm_get_htab_fd.flags */ 2097 + #define KVM_GET_HTAB_BOLTED_ONLY ((__u64)0x1) 2098 + #define KVM_GET_HTAB_WRITE ((__u64)0x2) 2099 + 2100 + The `start_index' field gives the index in the HPT of the entry at 2101 + which to start reading. It is ignored when writing. 2102 + 2103 + Reads on the fd will initially supply information about all 2104 + "interesting" HPT entries. Interesting entries are those with the 2105 + bolted bit set, if the KVM_GET_HTAB_BOLTED_ONLY bit is set, otherwise 2106 + all entries. When the end of the HPT is reached, the read() will 2107 + return. If read() is called again on the fd, it will start again from 2108 + the beginning of the HPT, but will only return HPT entries that have 2109 + changed since they were last read. 2110 + 2111 + Data read or written is structured as a header (8 bytes) followed by a 2112 + series of valid HPT entries (16 bytes) each. The header indicates how 2113 + many valid HPT entries there are and how many invalid entries follow 2114 + the valid entries. The invalid entries are not represented explicitly 2115 + in the stream. The header format is: 2116 + 2117 + struct kvm_get_htab_header { 2118 + __u32 index; 2119 + __u16 n_valid; 2120 + __u16 n_invalid; 2121 + }; 2122 + 2123 + Writes to the fd create HPT entries starting at the index given in the 2124 + header; first `n_valid' valid entries with contents from the data 2125 + written, then `n_invalid' invalid entries, invalidating any previously 2126 + valid entries found. 2127 + 2074 2128 2075 2129 5. The kvm_run structure 2076 2130 ------------------------
+22
arch/powerpc/include/asm/kvm_book3s_64.h
··· 246 246 return !(memslot->base_gfn & mask) && !(memslot->npages & mask); 247 247 } 248 248 249 + /* 250 + * This works for 4k, 64k and 16M pages on POWER7, 251 + * and 4k and 16M pages on PPC970. 252 + */ 253 + static inline unsigned long slb_pgsize_encoding(unsigned long psize) 254 + { 255 + unsigned long senc = 0; 256 + 257 + if (psize > 0x1000) { 258 + senc = SLB_VSID_L; 259 + if (psize == 0x10000) 260 + senc |= SLB_VSID_LP_01; 261 + } 262 + return senc; 263 + } 264 + 265 + static inline int is_vrma_hpte(unsigned long hpte_v) 266 + { 267 + return (hpte_v & ~0xffffffUL) == 268 + (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16))); 269 + } 270 + 249 271 #endif /* __ASM_KVM_BOOK3S_64_H__ */
+2
arch/powerpc/include/asm/kvm_ppc.h
··· 164 164 165 165 extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu); 166 166 167 + extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *); 168 + 167 169 /* 168 170 * Cuts out inst bits with ordering according to spec. 169 171 * That means the leftmost bit is zero. All given bits are included.
+25
arch/powerpc/include/uapi/asm/kvm.h
··· 331 331 __u32 reserved[8]; 332 332 }; 333 333 334 + /* For KVM_PPC_GET_HTAB_FD */ 335 + struct kvm_get_htab_fd { 336 + __u64 flags; 337 + __u64 start_index; 338 + __u64 reserved[2]; 339 + }; 340 + 341 + /* Values for kvm_get_htab_fd.flags */ 342 + #define KVM_GET_HTAB_BOLTED_ONLY ((__u64)0x1) 343 + #define KVM_GET_HTAB_WRITE ((__u64)0x2) 344 + 345 + /* 346 + * Data read on the file descriptor is formatted as a series of 347 + * records, each consisting of a header followed by a series of 348 + * `n_valid' HPTEs (16 bytes each), which are all valid. Following 349 + * those valid HPTEs there are `n_invalid' invalid HPTEs, which 350 + * are not represented explicitly in the stream. The same format 351 + * is used for writing. 352 + */ 353 + struct kvm_get_htab_header { 354 + __u32 index; 355 + __u16 n_valid; 356 + __u16 n_invalid; 357 + }; 358 + 334 359 #define KVM_REG_PPC_HIOR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x1) 335 360 #define KVM_REG_PPC_IAC1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x2) 336 361 #define KVM_REG_PPC_IAC2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x3)
+344
arch/powerpc/kvm/book3s_64_mmu_hv.c
··· 25 25 #include <linux/hugetlb.h> 26 26 #include <linux/vmalloc.h> 27 27 #include <linux/srcu.h> 28 + #include <linux/anon_inodes.h> 29 + #include <linux/file.h> 28 30 29 31 #include <asm/tlbflush.h> 30 32 #include <asm/kvm_ppc.h> ··· 1145 1143 struct page *page = virt_to_page(va); 1146 1144 1147 1145 put_page(page); 1146 + } 1147 + 1148 + /* 1149 + * Functions for reading and writing the hash table via reads and 1150 + * writes on a file descriptor. 1151 + * 1152 + * Reads return the guest view of the hash table, which has to be 1153 + * pieced together from the real hash table and the guest_rpte 1154 + * values in the revmap array. 1155 + * 1156 + * On writes, each HPTE written is considered in turn, and if it 1157 + * is valid, it is written to the HPT as if an H_ENTER with the 1158 + * exact flag set was done. When the invalid count is non-zero 1159 + * in the header written to the stream, the kernel will make 1160 + * sure that that many HPTEs are invalid, and invalidate them 1161 + * if not. 1162 + */ 1163 + 1164 + struct kvm_htab_ctx { 1165 + unsigned long index; 1166 + unsigned long flags; 1167 + struct kvm *kvm; 1168 + int first_pass; 1169 + }; 1170 + 1171 + #define HPTE_SIZE (2 * sizeof(unsigned long)) 1172 + 1173 + static long record_hpte(unsigned long flags, unsigned long *hptp, 1174 + unsigned long *hpte, struct revmap_entry *revp, 1175 + int want_valid, int first_pass) 1176 + { 1177 + unsigned long v, r; 1178 + int ok = 1; 1179 + int valid, dirty; 1180 + 1181 + /* Unmodified entries are uninteresting except on the first pass */ 1182 + dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED); 1183 + if (!first_pass && !dirty) 1184 + return 0; 1185 + 1186 + valid = 0; 1187 + if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) { 1188 + valid = 1; 1189 + if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && 1190 + !(hptp[0] & HPTE_V_BOLTED)) 1191 + valid = 0; 1192 + } 1193 + if (valid != want_valid) 1194 + return 0; 1195 + 1196 + v = r = 0; 1197 + if (valid || dirty) { 1198 + /* lock the HPTE so it's stable and read it */ 1199 + preempt_disable(); 1200 + while (!try_lock_hpte(hptp, HPTE_V_HVLOCK)) 1201 + cpu_relax(); 1202 + v = hptp[0]; 1203 + if (v & HPTE_V_ABSENT) { 1204 + v &= ~HPTE_V_ABSENT; 1205 + v |= HPTE_V_VALID; 1206 + } 1207 + /* re-evaluate valid and dirty from synchronized HPTE value */ 1208 + valid = !!(v & HPTE_V_VALID); 1209 + if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && !(v & HPTE_V_BOLTED)) 1210 + valid = 0; 1211 + r = revp->guest_rpte | (hptp[1] & (HPTE_R_R | HPTE_R_C)); 1212 + dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED); 1213 + /* only clear modified if this is the right sort of entry */ 1214 + if (valid == want_valid && dirty) { 1215 + r &= ~HPTE_GR_MODIFIED; 1216 + revp->guest_rpte = r; 1217 + } 1218 + asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); 1219 + hptp[0] &= ~HPTE_V_HVLOCK; 1220 + preempt_enable(); 1221 + if (!(valid == want_valid && (first_pass || dirty))) 1222 + ok = 0; 1223 + } 1224 + hpte[0] = v; 1225 + hpte[1] = r; 1226 + return ok; 1227 + } 1228 + 1229 + static ssize_t kvm_htab_read(struct file *file, char __user *buf, 1230 + size_t count, loff_t *ppos) 1231 + { 1232 + struct kvm_htab_ctx *ctx = file->private_data; 1233 + struct kvm *kvm = ctx->kvm; 1234 + struct kvm_get_htab_header hdr; 1235 + unsigned long *hptp; 1236 + struct revmap_entry *revp; 1237 + unsigned long i, nb, nw; 1238 + unsigned long __user *lbuf; 1239 + struct kvm_get_htab_header __user *hptr; 1240 + unsigned long flags; 1241 + int first_pass; 1242 + unsigned long hpte[2]; 1243 + 1244 + if (!access_ok(VERIFY_WRITE, buf, count)) 1245 + return -EFAULT; 1246 + 1247 + first_pass = ctx->first_pass; 1248 + flags = ctx->flags; 1249 + 1250 + i = ctx->index; 1251 + hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); 1252 + revp = kvm->arch.revmap + i; 1253 + lbuf = (unsigned long __user *)buf; 1254 + 1255 + nb = 0; 1256 + while (nb + sizeof(hdr) + HPTE_SIZE < count) { 1257 + /* Initialize header */ 1258 + hptr = (struct kvm_get_htab_header __user *)buf; 1259 + hdr.index = i; 1260 + hdr.n_valid = 0; 1261 + hdr.n_invalid = 0; 1262 + nw = nb; 1263 + nb += sizeof(hdr); 1264 + lbuf = (unsigned long __user *)(buf + sizeof(hdr)); 1265 + 1266 + /* Skip uninteresting entries, i.e. clean on not-first pass */ 1267 + if (!first_pass) { 1268 + while (i < kvm->arch.hpt_npte && 1269 + !(revp->guest_rpte & HPTE_GR_MODIFIED)) { 1270 + ++i; 1271 + hptp += 2; 1272 + ++revp; 1273 + } 1274 + } 1275 + 1276 + /* Grab a series of valid entries */ 1277 + while (i < kvm->arch.hpt_npte && 1278 + hdr.n_valid < 0xffff && 1279 + nb + HPTE_SIZE < count && 1280 + record_hpte(flags, hptp, hpte, revp, 1, first_pass)) { 1281 + /* valid entry, write it out */ 1282 + ++hdr.n_valid; 1283 + if (__put_user(hpte[0], lbuf) || 1284 + __put_user(hpte[1], lbuf + 1)) 1285 + return -EFAULT; 1286 + nb += HPTE_SIZE; 1287 + lbuf += 2; 1288 + ++i; 1289 + hptp += 2; 1290 + ++revp; 1291 + } 1292 + /* Now skip invalid entries while we can */ 1293 + while (i < kvm->arch.hpt_npte && 1294 + hdr.n_invalid < 0xffff && 1295 + record_hpte(flags, hptp, hpte, revp, 0, first_pass)) { 1296 + /* found an invalid entry */ 1297 + ++hdr.n_invalid; 1298 + ++i; 1299 + hptp += 2; 1300 + ++revp; 1301 + } 1302 + 1303 + if (hdr.n_valid || hdr.n_invalid) { 1304 + /* write back the header */ 1305 + if (__copy_to_user(hptr, &hdr, sizeof(hdr))) 1306 + return -EFAULT; 1307 + nw = nb; 1308 + buf = (char __user *)lbuf; 1309 + } else { 1310 + nb = nw; 1311 + } 1312 + 1313 + /* Check if we've wrapped around the hash table */ 1314 + if (i >= kvm->arch.hpt_npte) { 1315 + i = 0; 1316 + ctx->first_pass = 0; 1317 + break; 1318 + } 1319 + } 1320 + 1321 + ctx->index = i; 1322 + 1323 + return nb; 1324 + } 1325 + 1326 + static ssize_t kvm_htab_write(struct file *file, const char __user *buf, 1327 + size_t count, loff_t *ppos) 1328 + { 1329 + struct kvm_htab_ctx *ctx = file->private_data; 1330 + struct kvm *kvm = ctx->kvm; 1331 + struct kvm_get_htab_header hdr; 1332 + unsigned long i, j; 1333 + unsigned long v, r; 1334 + unsigned long __user *lbuf; 1335 + unsigned long *hptp; 1336 + unsigned long tmp[2]; 1337 + ssize_t nb; 1338 + long int err, ret; 1339 + int rma_setup; 1340 + 1341 + if (!access_ok(VERIFY_READ, buf, count)) 1342 + return -EFAULT; 1343 + 1344 + /* lock out vcpus from running while we're doing this */ 1345 + mutex_lock(&kvm->lock); 1346 + rma_setup = kvm->arch.rma_setup_done; 1347 + if (rma_setup) { 1348 + kvm->arch.rma_setup_done = 0; /* temporarily */ 1349 + /* order rma_setup_done vs. vcpus_running */ 1350 + smp_mb(); 1351 + if (atomic_read(&kvm->arch.vcpus_running)) { 1352 + kvm->arch.rma_setup_done = 1; 1353 + mutex_unlock(&kvm->lock); 1354 + return -EBUSY; 1355 + } 1356 + } 1357 + 1358 + err = 0; 1359 + for (nb = 0; nb + sizeof(hdr) <= count; ) { 1360 + err = -EFAULT; 1361 + if (__copy_from_user(&hdr, buf, sizeof(hdr))) 1362 + break; 1363 + 1364 + err = 0; 1365 + if (nb + hdr.n_valid * HPTE_SIZE > count) 1366 + break; 1367 + 1368 + nb += sizeof(hdr); 1369 + buf += sizeof(hdr); 1370 + 1371 + err = -EINVAL; 1372 + i = hdr.index; 1373 + if (i >= kvm->arch.hpt_npte || 1374 + i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte) 1375 + break; 1376 + 1377 + hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); 1378 + lbuf = (unsigned long __user *)buf; 1379 + for (j = 0; j < hdr.n_valid; ++j) { 1380 + err = -EFAULT; 1381 + if (__get_user(v, lbuf) || __get_user(r, lbuf + 1)) 1382 + goto out; 1383 + err = -EINVAL; 1384 + if (!(v & HPTE_V_VALID)) 1385 + goto out; 1386 + lbuf += 2; 1387 + nb += HPTE_SIZE; 1388 + 1389 + if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) 1390 + kvmppc_do_h_remove(kvm, 0, i, 0, tmp); 1391 + err = -EIO; 1392 + ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r, 1393 + tmp); 1394 + if (ret != H_SUCCESS) { 1395 + pr_err("kvm_htab_write ret %ld i=%ld v=%lx " 1396 + "r=%lx\n", ret, i, v, r); 1397 + goto out; 1398 + } 1399 + if (!rma_setup && is_vrma_hpte(v)) { 1400 + unsigned long psize = hpte_page_size(v, r); 1401 + unsigned long senc = slb_pgsize_encoding(psize); 1402 + unsigned long lpcr; 1403 + 1404 + kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | 1405 + (VRMA_VSID << SLB_VSID_SHIFT_1T); 1406 + lpcr = kvm->arch.lpcr & ~LPCR_VRMASD; 1407 + lpcr |= senc << (LPCR_VRMASD_SH - 4); 1408 + kvm->arch.lpcr = lpcr; 1409 + rma_setup = 1; 1410 + } 1411 + ++i; 1412 + hptp += 2; 1413 + } 1414 + 1415 + for (j = 0; j < hdr.n_invalid; ++j) { 1416 + if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) 1417 + kvmppc_do_h_remove(kvm, 0, i, 0, tmp); 1418 + ++i; 1419 + hptp += 2; 1420 + } 1421 + err = 0; 1422 + } 1423 + 1424 + out: 1425 + /* Order HPTE updates vs. rma_setup_done */ 1426 + smp_wmb(); 1427 + kvm->arch.rma_setup_done = rma_setup; 1428 + mutex_unlock(&kvm->lock); 1429 + 1430 + if (err) 1431 + return err; 1432 + return nb; 1433 + } 1434 + 1435 + static int kvm_htab_release(struct inode *inode, struct file *filp) 1436 + { 1437 + struct kvm_htab_ctx *ctx = filp->private_data; 1438 + 1439 + filp->private_data = NULL; 1440 + if (!(ctx->flags & KVM_GET_HTAB_WRITE)) 1441 + atomic_dec(&ctx->kvm->arch.hpte_mod_interest); 1442 + kvm_put_kvm(ctx->kvm); 1443 + kfree(ctx); 1444 + return 0; 1445 + } 1446 + 1447 + static struct file_operations kvm_htab_fops = { 1448 + .read = kvm_htab_read, 1449 + .write = kvm_htab_write, 1450 + .llseek = default_llseek, 1451 + .release = kvm_htab_release, 1452 + }; 1453 + 1454 + int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf) 1455 + { 1456 + int ret; 1457 + struct kvm_htab_ctx *ctx; 1458 + int rwflag; 1459 + 1460 + /* reject flags we don't recognize */ 1461 + if (ghf->flags & ~(KVM_GET_HTAB_BOLTED_ONLY | KVM_GET_HTAB_WRITE)) 1462 + return -EINVAL; 1463 + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 1464 + if (!ctx) 1465 + return -ENOMEM; 1466 + kvm_get_kvm(kvm); 1467 + ctx->kvm = kvm; 1468 + ctx->index = ghf->start_index; 1469 + ctx->flags = ghf->flags; 1470 + ctx->first_pass = 1; 1471 + 1472 + rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY; 1473 + ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag); 1474 + if (ret < 0) { 1475 + kvm_put_kvm(kvm); 1476 + return ret; 1477 + } 1478 + 1479 + if (rwflag == O_RDONLY) { 1480 + mutex_lock(&kvm->slots_lock); 1481 + atomic_inc(&kvm->arch.hpte_mod_interest); 1482 + /* make sure kvmppc_do_h_enter etc. see the increment */ 1483 + synchronize_srcu_expedited(&kvm->srcu); 1484 + mutex_unlock(&kvm->slots_lock); 1485 + } 1486 + 1487 + return ret; 1148 1488 } 1149 1489 1150 1490 void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
-12
arch/powerpc/kvm/book3s_hv.c
··· 1563 1563 return r; 1564 1564 } 1565 1565 1566 - static unsigned long slb_pgsize_encoding(unsigned long psize) 1567 - { 1568 - unsigned long senc = 0; 1569 - 1570 - if (psize > 0x1000) { 1571 - senc = SLB_VSID_L; 1572 - if (psize == 0x10000) 1573 - senc |= SLB_VSID_LP_01; 1574 - } 1575 - return senc; 1576 - } 1577 - 1578 1566 static void unpin_slot(struct kvm_memory_slot *memslot) 1579 1567 { 1580 1568 unsigned long *physp;
+17
arch/powerpc/kvm/powerpc.c
··· 354 354 r = 1; 355 355 #else 356 356 r = 0; 357 + break; 358 + #endif 359 + #ifdef CONFIG_KVM_BOOK3S_64_HV 360 + case KVM_CAP_PPC_HTAB_FD: 361 + r = 1; 362 + break; 357 363 #endif 358 364 break; 359 365 case KVM_CAP_NR_VCPUS: ··· 958 952 if (put_user(htab_order, (u32 __user *)argp)) 959 953 break; 960 954 r = 0; 955 + break; 956 + } 957 + 958 + case KVM_PPC_GET_HTAB_FD: { 959 + struct kvm *kvm = filp->private_data; 960 + struct kvm_get_htab_fd ghf; 961 + 962 + r = -EFAULT; 963 + if (copy_from_user(&ghf, argp, sizeof(ghf))) 964 + break; 965 + r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf); 961 966 break; 962 967 } 963 968 #endif /* CONFIG_KVM_BOOK3S_64_HV */
+3
include/uapi/linux/kvm.h
··· 634 634 #endif 635 635 #define KVM_CAP_IRQFD_RESAMPLE 82 636 636 #define KVM_CAP_PPC_BOOKE_WATCHDOG 83 637 + #define KVM_CAP_PPC_HTAB_FD 84 637 638 638 639 #ifdef KVM_CAP_IRQ_ROUTING 639 640 ··· 860 859 #define KVM_CREATE_SPAPR_TCE _IOW(KVMIO, 0xa8, struct kvm_create_spapr_tce) 861 860 /* Available with KVM_CAP_RMA */ 862 861 #define KVM_ALLOCATE_RMA _IOR(KVMIO, 0xa9, struct kvm_allocate_rma) 862 + /* Available with KVM_CAP_PPC_HTAB_FD */ 863 + #define KVM_PPC_GET_HTAB_FD _IOW(KVMIO, 0xaa, struct kvm_get_htab_fd) 863 864 864 865 /* 865 866 * ioctls for vcpu fds