Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <linux/types.h>
21#include <linux/string.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/highmem.h>
25
26#include <asm/tlbflush.h>
27#include <asm/kvm_ppc.h>
28#include <asm/kvm_book3s.h>
29#include <asm/mmu-hash64.h>
30
31/* #define DEBUG_MMU */
32
33#ifdef DEBUG_MMU
34#define dprintk(X...) printk(KERN_INFO X)
35#else
36#define dprintk(X...) do { } while(0)
37#endif
38
39static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
40{
41 kvmppc_set_msr(vcpu, MSR_SF);
42}
43
44static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
45 struct kvm_vcpu *vcpu,
46 gva_t eaddr)
47{
48 int i;
49 u64 esid = GET_ESID(eaddr);
50 u64 esid_1t = GET_ESID_1T(eaddr);
51
52 for (i = 0; i < vcpu->arch.slb_nr; i++) {
53 u64 cmp_esid = esid;
54
55 if (!vcpu->arch.slb[i].valid)
56 continue;
57
58 if (vcpu->arch.slb[i].tb)
59 cmp_esid = esid_1t;
60
61 if (vcpu->arch.slb[i].esid == cmp_esid)
62 return &vcpu->arch.slb[i];
63 }
64
65 dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n",
66 eaddr, esid, esid_1t);
67 for (i = 0; i < vcpu->arch.slb_nr; i++) {
68 if (vcpu->arch.slb[i].vsid)
69 dprintk(" %d: %c%c%c %llx %llx\n", i,
70 vcpu->arch.slb[i].valid ? 'v' : ' ',
71 vcpu->arch.slb[i].large ? 'l' : ' ',
72 vcpu->arch.slb[i].tb ? 't' : ' ',
73 vcpu->arch.slb[i].esid,
74 vcpu->arch.slb[i].vsid);
75 }
76
77 return NULL;
78}
79
80static int kvmppc_slb_sid_shift(struct kvmppc_slb *slbe)
81{
82 return slbe->tb ? SID_SHIFT_1T : SID_SHIFT;
83}
84
85static u64 kvmppc_slb_offset_mask(struct kvmppc_slb *slbe)
86{
87 return (1ul << kvmppc_slb_sid_shift(slbe)) - 1;
88}
89
90static u64 kvmppc_slb_calc_vpn(struct kvmppc_slb *slb, gva_t eaddr)
91{
92 eaddr &= kvmppc_slb_offset_mask(slb);
93
94 return (eaddr >> VPN_SHIFT) |
95 ((slb->vsid) << (kvmppc_slb_sid_shift(slb) - VPN_SHIFT));
96}
97
98static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
99 bool data)
100{
101 struct kvmppc_slb *slb;
102
103 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
104 if (!slb)
105 return 0;
106
107 return kvmppc_slb_calc_vpn(slb, eaddr);
108}
109
110static int mmu_pagesize(int mmu_pg)
111{
112 switch (mmu_pg) {
113 case MMU_PAGE_64K:
114 return 16;
115 case MMU_PAGE_16M:
116 return 24;
117 }
118 return 12;
119}
120
121static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
122{
123 return mmu_pagesize(slbe->base_page_size);
124}
125
126static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
127{
128 int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
129
130 return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p);
131}
132
133static hva_t kvmppc_mmu_book3s_64_get_pteg(struct kvm_vcpu *vcpu,
134 struct kvmppc_slb *slbe, gva_t eaddr,
135 bool second)
136{
137 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
138 u64 hash, pteg, htabsize;
139 u32 ssize;
140 hva_t r;
141 u64 vpn;
142
143 htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1);
144
145 vpn = kvmppc_slb_calc_vpn(slbe, eaddr);
146 ssize = slbe->tb ? MMU_SEGSIZE_1T : MMU_SEGSIZE_256M;
147 hash = hpt_hash(vpn, kvmppc_mmu_book3s_64_get_pagesize(slbe), ssize);
148 if (second)
149 hash = ~hash;
150 hash &= ((1ULL << 39ULL) - 1ULL);
151 hash &= htabsize;
152 hash <<= 7ULL;
153
154 pteg = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL;
155 pteg |= hash;
156
157 dprintk("MMU: page=0x%x sdr1=0x%llx pteg=0x%llx vsid=0x%llx\n",
158 page, vcpu_book3s->sdr1, pteg, slbe->vsid);
159
160 /* When running a PAPR guest, SDR1 contains a HVA address instead
161 of a GPA */
162 if (vcpu->arch.papr_enabled)
163 r = pteg;
164 else
165 r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
166
167 if (kvm_is_error_hva(r))
168 return r;
169 return r | (pteg & ~PAGE_MASK);
170}
171
172static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
173{
174 int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
175 u64 avpn;
176
177 avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
178 avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p);
179
180 if (p < 16)
181 avpn >>= ((80 - p) - 56) - 8; /* 16 - p */
182 else
183 avpn <<= p - 16;
184
185 return avpn;
186}
187
188/*
189 * Return page size encoded in the second word of a HPTE, or
190 * -1 for an invalid encoding for the base page size indicated by
191 * the SLB entry. This doesn't handle mixed pagesize segments yet.
192 */
193static int decode_pagesize(struct kvmppc_slb *slbe, u64 r)
194{
195 switch (slbe->base_page_size) {
196 case MMU_PAGE_64K:
197 if ((r & 0xf000) == 0x1000)
198 return MMU_PAGE_64K;
199 break;
200 case MMU_PAGE_16M:
201 if ((r & 0xff000) == 0)
202 return MMU_PAGE_16M;
203 break;
204 }
205 return -1;
206}
207
208static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
209 struct kvmppc_pte *gpte, bool data,
210 bool iswrite)
211{
212 struct kvmppc_slb *slbe;
213 hva_t ptegp;
214 u64 pteg[16];
215 u64 avpn = 0;
216 u64 v, r;
217 u64 v_val, v_mask;
218 u64 eaddr_mask;
219 int i;
220 u8 pp, key = 0;
221 bool found = false;
222 bool second = false;
223 int pgsize;
224 ulong mp_ea = vcpu->arch.magic_page_ea;
225
226 /* Magic page override */
227 if (unlikely(mp_ea) &&
228 unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
229 !(vcpu->arch.shared->msr & MSR_PR)) {
230 gpte->eaddr = eaddr;
231 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
232 gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff);
233 gpte->raddr &= KVM_PAM;
234 gpte->may_execute = true;
235 gpte->may_read = true;
236 gpte->may_write = true;
237 gpte->page_size = MMU_PAGE_4K;
238
239 return 0;
240 }
241
242 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
243 if (!slbe)
244 goto no_seg_found;
245
246 avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
247 v_val = avpn & HPTE_V_AVPN;
248
249 if (slbe->tb)
250 v_val |= SLB_VSID_B_1T;
251 if (slbe->large)
252 v_val |= HPTE_V_LARGE;
253 v_val |= HPTE_V_VALID;
254
255 v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID |
256 HPTE_V_SECONDARY;
257
258 pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K;
259
260 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
261
262do_second:
263 ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second);
264 if (kvm_is_error_hva(ptegp))
265 goto no_page_found;
266
267 if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) {
268 printk(KERN_ERR "KVM can't copy data from 0x%lx!\n", ptegp);
269 goto no_page_found;
270 }
271
272 if ((vcpu->arch.shared->msr & MSR_PR) && slbe->Kp)
273 key = 4;
274 else if (!(vcpu->arch.shared->msr & MSR_PR) && slbe->Ks)
275 key = 4;
276
277 for (i=0; i<16; i+=2) {
278 /* Check all relevant fields of 1st dword */
279 if ((pteg[i] & v_mask) == v_val) {
280 /* If large page bit is set, check pgsize encoding */
281 if (slbe->large &&
282 (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
283 pgsize = decode_pagesize(slbe, pteg[i+1]);
284 if (pgsize < 0)
285 continue;
286 }
287 found = true;
288 break;
289 }
290 }
291
292 if (!found) {
293 if (second)
294 goto no_page_found;
295 v_val |= HPTE_V_SECONDARY;
296 second = true;
297 goto do_second;
298 }
299
300 v = pteg[i];
301 r = pteg[i+1];
302 pp = (r & HPTE_R_PP) | key;
303 if (r & HPTE_R_PP0)
304 pp |= 8;
305
306 gpte->eaddr = eaddr;
307 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
308
309 eaddr_mask = (1ull << mmu_pagesize(pgsize)) - 1;
310 gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask);
311 gpte->page_size = pgsize;
312 gpte->may_execute = ((r & HPTE_R_N) ? false : true);
313 gpte->may_read = false;
314 gpte->may_write = false;
315
316 switch (pp) {
317 case 0:
318 case 1:
319 case 2:
320 case 6:
321 gpte->may_write = true;
322 /* fall through */
323 case 3:
324 case 5:
325 case 7:
326 case 10:
327 gpte->may_read = true;
328 break;
329 }
330
331 dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
332 "-> 0x%lx\n",
333 eaddr, avpn, gpte->vpage, gpte->raddr);
334
335 /* Update PTE R and C bits, so the guest's swapper knows we used the
336 * page */
337 if (gpte->may_read && !(r & HPTE_R_R)) {
338 /*
339 * Set the accessed flag.
340 * We have to write this back with a single byte write
341 * because another vcpu may be accessing this on
342 * non-PAPR platforms such as mac99, and this is
343 * what real hardware does.
344 */
345 char __user *addr = (char __user *) &pteg[i+1];
346 r |= HPTE_R_R;
347 put_user(r >> 8, addr + 6);
348 }
349 if (iswrite && gpte->may_write && !(r & HPTE_R_C)) {
350 /* Set the dirty flag */
351 /* Use a single byte write */
352 char __user *addr = (char __user *) &pteg[i+1];
353 r |= HPTE_R_C;
354 put_user(r, addr + 7);
355 }
356
357 mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
358
359 if (!gpte->may_read || (iswrite && !gpte->may_write))
360 return -EPERM;
361 return 0;
362
363no_page_found:
364 mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
365 return -ENOENT;
366
367no_seg_found:
368 dprintk("KVM MMU: Trigger segment fault\n");
369 return -EINVAL;
370}
371
372static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
373{
374 struct kvmppc_vcpu_book3s *vcpu_book3s;
375 u64 esid, esid_1t;
376 int slb_nr;
377 struct kvmppc_slb *slbe;
378
379 dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb);
380
381 vcpu_book3s = to_book3s(vcpu);
382
383 esid = GET_ESID(rb);
384 esid_1t = GET_ESID_1T(rb);
385 slb_nr = rb & 0xfff;
386
387 if (slb_nr > vcpu->arch.slb_nr)
388 return;
389
390 slbe = &vcpu->arch.slb[slb_nr];
391
392 slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
393 slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0;
394 slbe->esid = slbe->tb ? esid_1t : esid;
395 slbe->vsid = (rs & ~SLB_VSID_B) >> (kvmppc_slb_sid_shift(slbe) - 16);
396 slbe->valid = (rb & SLB_ESID_V) ? 1 : 0;
397 slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0;
398 slbe->Kp = (rs & SLB_VSID_KP) ? 1 : 0;
399 slbe->nx = (rs & SLB_VSID_N) ? 1 : 0;
400 slbe->class = (rs & SLB_VSID_C) ? 1 : 0;
401
402 slbe->base_page_size = MMU_PAGE_4K;
403 if (slbe->large) {
404 if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) {
405 switch (rs & SLB_VSID_LP) {
406 case SLB_VSID_LP_00:
407 slbe->base_page_size = MMU_PAGE_16M;
408 break;
409 case SLB_VSID_LP_01:
410 slbe->base_page_size = MMU_PAGE_64K;
411 break;
412 }
413 } else
414 slbe->base_page_size = MMU_PAGE_16M;
415 }
416
417 slbe->orige = rb & (ESID_MASK | SLB_ESID_V);
418 slbe->origv = rs;
419
420 /* Map the new segment */
421 kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT);
422}
423
424static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
425{
426 struct kvmppc_slb *slbe;
427
428 if (slb_nr > vcpu->arch.slb_nr)
429 return 0;
430
431 slbe = &vcpu->arch.slb[slb_nr];
432
433 return slbe->orige;
434}
435
436static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr)
437{
438 struct kvmppc_slb *slbe;
439
440 if (slb_nr > vcpu->arch.slb_nr)
441 return 0;
442
443 slbe = &vcpu->arch.slb[slb_nr];
444
445 return slbe->origv;
446}
447
448static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
449{
450 struct kvmppc_slb *slbe;
451 u64 seg_size;
452
453 dprintk("KVM MMU: slbie(0x%llx)\n", ea);
454
455 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
456
457 if (!slbe)
458 return;
459
460 dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid);
461
462 slbe->valid = false;
463 slbe->orige = 0;
464 slbe->origv = 0;
465
466 seg_size = 1ull << kvmppc_slb_sid_shift(slbe);
467 kvmppc_mmu_flush_segment(vcpu, ea & ~(seg_size - 1), seg_size);
468}
469
470static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
471{
472 int i;
473
474 dprintk("KVM MMU: slbia()\n");
475
476 for (i = 1; i < vcpu->arch.slb_nr; i++) {
477 vcpu->arch.slb[i].valid = false;
478 vcpu->arch.slb[i].orige = 0;
479 vcpu->arch.slb[i].origv = 0;
480 }
481
482 if (vcpu->arch.shared->msr & MSR_IR) {
483 kvmppc_mmu_flush_segments(vcpu);
484 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
485 }
486}
487
488static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
489 ulong value)
490{
491 u64 rb = 0, rs = 0;
492
493 /*
494 * According to Book3 2.01 mtsrin is implemented as:
495 *
496 * The SLB entry specified by (RB)32:35 is loaded from register
497 * RS, as follows.
498 *
499 * SLBE Bit Source SLB Field
500 *
501 * 0:31 0x0000_0000 ESID-0:31
502 * 32:35 (RB)32:35 ESID-32:35
503 * 36 0b1 V
504 * 37:61 0x00_0000|| 0b0 VSID-0:24
505 * 62:88 (RS)37:63 VSID-25:51
506 * 89:91 (RS)33:35 Ks Kp N
507 * 92 (RS)36 L ((RS)36 must be 0b0)
508 * 93 0b0 C
509 */
510
511 dprintk("KVM MMU: mtsrin(0x%x, 0x%lx)\n", srnum, value);
512
513 /* ESID = srnum */
514 rb |= (srnum & 0xf) << 28;
515 /* Set the valid bit */
516 rb |= 1 << 27;
517 /* Index = ESID */
518 rb |= srnum;
519
520 /* VSID = VSID */
521 rs |= (value & 0xfffffff) << 12;
522 /* flags = flags */
523 rs |= ((value >> 28) & 0x7) << 9;
524
525 kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb);
526}
527
528static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
529 bool large)
530{
531 u64 mask = 0xFFFFFFFFFULL;
532 long i;
533 struct kvm_vcpu *v;
534
535 dprintk("KVM MMU: tlbie(0x%lx)\n", va);
536
537 /*
538 * The tlbie instruction changed behaviour starting with
539 * POWER6. POWER6 and later don't have the large page flag
540 * in the instruction but in the RB value, along with bits
541 * indicating page and segment sizes.
542 */
543 if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) {
544 /* POWER6 or later */
545 if (va & 1) { /* L bit */
546 if ((va & 0xf000) == 0x1000)
547 mask = 0xFFFFFFFF0ULL; /* 64k page */
548 else
549 mask = 0xFFFFFF000ULL; /* 16M page */
550 }
551 } else {
552 /* older processors, e.g. PPC970 */
553 if (large)
554 mask = 0xFFFFFF000ULL;
555 }
556 /* flush this VA on all vcpus */
557 kvm_for_each_vcpu(i, v, vcpu->kvm)
558 kvmppc_mmu_pte_vflush(v, va >> 12, mask);
559}
560
561#ifdef CONFIG_PPC_64K_PAGES
562static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid)
563{
564 ulong mp_ea = vcpu->arch.magic_page_ea;
565
566 return mp_ea && !(vcpu->arch.shared->msr & MSR_PR) &&
567 (mp_ea >> SID_SHIFT) == esid;
568}
569#endif
570
571static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
572 u64 *vsid)
573{
574 ulong ea = esid << SID_SHIFT;
575 struct kvmppc_slb *slb;
576 u64 gvsid = esid;
577 ulong mp_ea = vcpu->arch.magic_page_ea;
578 int pagesize = MMU_PAGE_64K;
579
580 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
581 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
582 if (slb) {
583 gvsid = slb->vsid;
584 pagesize = slb->base_page_size;
585 if (slb->tb) {
586 gvsid <<= SID_SHIFT_1T - SID_SHIFT;
587 gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1);
588 gvsid |= VSID_1T;
589 }
590 }
591 }
592
593 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
594 case 0:
595 gvsid = VSID_REAL | esid;
596 break;
597 case MSR_IR:
598 gvsid |= VSID_REAL_IR;
599 break;
600 case MSR_DR:
601 gvsid |= VSID_REAL_DR;
602 break;
603 case MSR_DR|MSR_IR:
604 if (!slb)
605 goto no_slb;
606
607 break;
608 default:
609 BUG();
610 break;
611 }
612
613#ifdef CONFIG_PPC_64K_PAGES
614 /*
615 * Mark this as a 64k segment if the host is using
616 * 64k pages, the host MMU supports 64k pages and
617 * the guest segment page size is >= 64k,
618 * but not if this segment contains the magic page.
619 */
620 if (pagesize >= MMU_PAGE_64K &&
621 mmu_psize_defs[MMU_PAGE_64K].shift &&
622 !segment_contains_magic_page(vcpu, esid))
623 gvsid |= VSID_64K;
624#endif
625
626 if (vcpu->arch.shared->msr & MSR_PR)
627 gvsid |= VSID_PR;
628
629 *vsid = gvsid;
630 return 0;
631
632no_slb:
633 /* Catch magic page case */
634 if (unlikely(mp_ea) &&
635 unlikely(esid == (mp_ea >> SID_SHIFT)) &&
636 !(vcpu->arch.shared->msr & MSR_PR)) {
637 *vsid = VSID_REAL | esid;
638 return 0;
639 }
640
641 return -EINVAL;
642}
643
644static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu)
645{
646 return (to_book3s(vcpu)->hid[5] & 0x80);
647}
648
649void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu)
650{
651 struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
652
653 mmu->mfsrin = NULL;
654 mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin;
655 mmu->slbmte = kvmppc_mmu_book3s_64_slbmte;
656 mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee;
657 mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev;
658 mmu->slbie = kvmppc_mmu_book3s_64_slbie;
659 mmu->slbia = kvmppc_mmu_book3s_64_slbia;
660 mmu->xlate = kvmppc_mmu_book3s_64_xlate;
661 mmu->reset_msr = kvmppc_mmu_book3s_64_reset_msr;
662 mmu->tlbie = kvmppc_mmu_book3s_64_tlbie;
663 mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid;
664 mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp;
665 mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32;
666
667 vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
668}