KVM: MMU: Fix oopses with SLUB

The kvm mmu uses page->private on shadow page tables; so does slub, and
an oops result. Fix by allocating regular pages for shadows instead of
using slub.

Tested-by: S.Çağlar Onur <caglar@pardus.org.tr>
Signed-off-by: Avi Kivity <avi@qumranet.com>

+26 -13
+26 -13
drivers/kvm/mmu.c
··· 154 155 static struct kmem_cache *pte_chain_cache; 156 static struct kmem_cache *rmap_desc_cache; 157 - static struct kmem_cache *mmu_page_cache; 158 static struct kmem_cache *mmu_page_header_cache; 159 160 static int is_write_protection(struct kvm_vcpu *vcpu) ··· 224 kfree(mc->objects[--mc->nobjs]); 225 } 226 227 static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags) 228 { 229 int r; ··· 259 rmap_desc_cache, 1, gfp_flags); 260 if (r) 261 goto out; 262 - r = mmu_topup_memory_cache(&vcpu->mmu_page_cache, 263 - mmu_page_cache, 4, gfp_flags); 264 if (r) 265 goto out; 266 r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache, ··· 287 { 288 mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache); 289 mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache); 290 - mmu_free_memory_cache(&vcpu->mmu_page_cache); 291 mmu_free_memory_cache(&vcpu->mmu_page_header_cache); 292 } 293 ··· 479 { 480 ASSERT(is_empty_shadow_page(page_head->spt)); 481 list_del(&page_head->link); 482 - kfree(page_head->spt); 483 kfree(page_head); 484 ++kvm->n_free_mmu_pages; 485 } ··· 1322 kmem_cache_destroy(pte_chain_cache); 1323 if (rmap_desc_cache) 1324 kmem_cache_destroy(rmap_desc_cache); 1325 - if (mmu_page_cache) 1326 - kmem_cache_destroy(mmu_page_cache); 1327 if (mmu_page_header_cache) 1328 kmem_cache_destroy(mmu_page_header_cache); 1329 } ··· 1337 sizeof(struct kvm_rmap_desc), 1338 0, 0, NULL); 1339 if (!rmap_desc_cache) 1340 - goto nomem; 1341 - 1342 - mmu_page_cache = kmem_cache_create("kvm_mmu_page", 1343 - PAGE_SIZE, 1344 - PAGE_SIZE, 0, NULL); 1345 - if (!mmu_page_cache) 1346 goto nomem; 1347 1348 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
··· 154 155 static struct kmem_cache *pte_chain_cache; 156 static struct kmem_cache *rmap_desc_cache; 157 static struct kmem_cache *mmu_page_header_cache; 158 159 static int is_write_protection(struct kvm_vcpu *vcpu) ··· 225 kfree(mc->objects[--mc->nobjs]); 226 } 227 228 + static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, 229 + int min, gfp_t gfp_flags) 230 + { 231 + struct page *page; 232 + 233 + if (cache->nobjs >= min) 234 + return 0; 235 + while (cache->nobjs < ARRAY_SIZE(cache->objects)) { 236 + page = alloc_page(gfp_flags); 237 + if (!page) 238 + return -ENOMEM; 239 + set_page_private(page, 0); 240 + cache->objects[cache->nobjs++] = page_address(page); 241 + } 242 + return 0; 243 + } 244 + 245 + static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc) 246 + { 247 + while (mc->nobjs) 248 + __free_page(mc->objects[--mc->nobjs]); 249 + } 250 + 251 static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags) 252 { 253 int r; ··· 237 rmap_desc_cache, 1, gfp_flags); 238 if (r) 239 goto out; 240 + r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 4, gfp_flags); 241 if (r) 242 goto out; 243 r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache, ··· 266 { 267 mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache); 268 mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache); 269 + mmu_free_memory_cache_page(&vcpu->mmu_page_cache); 270 mmu_free_memory_cache(&vcpu->mmu_page_header_cache); 271 } 272 ··· 458 { 459 ASSERT(is_empty_shadow_page(page_head->spt)); 460 list_del(&page_head->link); 461 + __free_page(virt_to_page(page_head->spt)); 462 kfree(page_head); 463 ++kvm->n_free_mmu_pages; 464 } ··· 1301 kmem_cache_destroy(pte_chain_cache); 1302 if (rmap_desc_cache) 1303 kmem_cache_destroy(rmap_desc_cache); 1304 if (mmu_page_header_cache) 1305 kmem_cache_destroy(mmu_page_header_cache); 1306 } ··· 1318 sizeof(struct kvm_rmap_desc), 1319 0, 0, NULL); 1320 if (!rmap_desc_cache) 1321 goto nomem; 1322 1323 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",