+1
-1
arch/x86/kvm/lapic.c
+1
-1
arch/x86/kvm/lapic.c
+6
-7
arch/x86/kvm/x86.c
+6
-7
arch/x86/kvm/x86.c
···
1823
1823
return 0;
1824
1824
}
1825
1825
1826
-
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
1826
+
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
1827
+
sizeof(u32)))
1827
1828
return 1;
1828
1829
1829
1830
vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
···
1953
1952
1954
1953
gpa_offset = data & ~(PAGE_MASK | 1);
1955
1954
1956
-
/* Check that the address is 32-byte aligned. */
1957
-
if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
1958
-
break;
1959
-
1960
1955
if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
1961
-
&vcpu->arch.pv_time, data & ~1ULL))
1956
+
&vcpu->arch.pv_time, data & ~1ULL,
1957
+
sizeof(struct pvclock_vcpu_time_info)))
1962
1958
vcpu->arch.pv_time_enabled = false;
1963
1959
else
1964
1960
vcpu->arch.pv_time_enabled = true;
···
1975
1977
return 1;
1976
1978
1977
1979
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
1978
-
data & KVM_STEAL_VALID_BITS))
1980
+
data & KVM_STEAL_VALID_BITS,
1981
+
sizeof(struct kvm_steal_time)))
1979
1982
return 1;
1980
1983
1981
1984
vcpu->arch.st.msr_val = data;
+1
-1
include/linux/kvm_host.h
+1
-1
include/linux/kvm_host.h
···
518
518
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
519
519
void *data, unsigned long len);
520
520
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
521
-
gpa_t gpa);
521
+
gpa_t gpa, unsigned long len);
522
522
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
523
523
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
524
524
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
+1
include/linux/kvm_types.h
+1
include/linux/kvm_types.h
+37
-10
virt/kvm/kvm_main.c
+37
-10
virt/kvm/kvm_main.c
···
1541
1541
}
1542
1542
1543
1543
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1544
-
gpa_t gpa)
1544
+
gpa_t gpa, unsigned long len)
1545
1545
{
1546
1546
struct kvm_memslots *slots = kvm_memslots(kvm);
1547
1547
int offset = offset_in_page(gpa);
1548
-
gfn_t gfn = gpa >> PAGE_SHIFT;
1548
+
gfn_t start_gfn = gpa >> PAGE_SHIFT;
1549
+
gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
1550
+
gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
1551
+
gfn_t nr_pages_avail;
1549
1552
1550
1553
ghc->gpa = gpa;
1551
1554
ghc->generation = slots->generation;
1552
-
ghc->memslot = gfn_to_memslot(kvm, gfn);
1553
-
ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
1554
-
if (!kvm_is_error_hva(ghc->hva))
1555
+
ghc->len = len;
1556
+
ghc->memslot = gfn_to_memslot(kvm, start_gfn);
1557
+
ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
1558
+
if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
1555
1559
ghc->hva += offset;
1556
-
else
1557
-
return -EFAULT;
1558
-
1560
+
} else {
1561
+
/*
1562
+
* If the requested region crosses two memslots, we still
1563
+
* verify that the entire region is valid here.
1564
+
*/
1565
+
while (start_gfn <= end_gfn) {
1566
+
ghc->memslot = gfn_to_memslot(kvm, start_gfn);
1567
+
ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
1568
+
&nr_pages_avail);
1569
+
if (kvm_is_error_hva(ghc->hva))
1570
+
return -EFAULT;
1571
+
start_gfn += nr_pages_avail;
1572
+
}
1573
+
/* Use the slow path for cross page reads and writes. */
1574
+
ghc->memslot = NULL;
1575
+
}
1559
1576
return 0;
1560
1577
}
1561
1578
EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
···
1583
1566
struct kvm_memslots *slots = kvm_memslots(kvm);
1584
1567
int r;
1585
1568
1569
+
BUG_ON(len > ghc->len);
1570
+
1586
1571
if (slots->generation != ghc->generation)
1587
-
kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
1572
+
kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
1573
+
1574
+
if (unlikely(!ghc->memslot))
1575
+
return kvm_write_guest(kvm, ghc->gpa, data, len);
1588
1576
1589
1577
if (kvm_is_error_hva(ghc->hva))
1590
1578
return -EFAULT;
···
1609
1587
struct kvm_memslots *slots = kvm_memslots(kvm);
1610
1588
int r;
1611
1589
1590
+
BUG_ON(len > ghc->len);
1591
+
1612
1592
if (slots->generation != ghc->generation)
1613
-
kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
1593
+
kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
1594
+
1595
+
if (unlikely(!ghc->memslot))
1596
+
return kvm_read_guest(kvm, ghc->gpa, data, len);
1614
1597
1615
1598
if (kvm_is_error_hva(ghc->hva))
1616
1599
return -EFAULT;