KVM: MMU: Fix oopses with SLUB

The kvm mmu uses page->private on shadow page tables; so does slub, and
an oops result. Fix by allocating regular pages for shadows instead of
using slub.

Tested-by: S.Çağlar Onur <caglar@pardus.org.tr>
Signed-off-by: Avi Kivity <avi@qumranet.com>

+26 -13
+26 -13
drivers/kvm/mmu.c
··· 154 154 155 155 static struct kmem_cache *pte_chain_cache; 156 156 static struct kmem_cache *rmap_desc_cache; 157 - static struct kmem_cache *mmu_page_cache; 158 157 static struct kmem_cache *mmu_page_header_cache; 159 158 160 159 static int is_write_protection(struct kvm_vcpu *vcpu) ··· 224 225 kfree(mc->objects[--mc->nobjs]); 225 226 } 226 227 228 + static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, 229 + int min, gfp_t gfp_flags) 230 + { 231 + struct page *page; 232 + 233 + if (cache->nobjs >= min) 234 + return 0; 235 + while (cache->nobjs < ARRAY_SIZE(cache->objects)) { 236 + page = alloc_page(gfp_flags); 237 + if (!page) 238 + return -ENOMEM; 239 + set_page_private(page, 0); 240 + cache->objects[cache->nobjs++] = page_address(page); 241 + } 242 + return 0; 243 + } 244 + 245 + static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc) 246 + { 247 + while (mc->nobjs) 248 + __free_page(mc->objects[--mc->nobjs]); 249 + } 250 + 227 251 static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags) 228 252 { 229 253 int r; ··· 259 237 rmap_desc_cache, 1, gfp_flags); 260 238 if (r) 261 239 goto out; 262 - r = mmu_topup_memory_cache(&vcpu->mmu_page_cache, 263 - mmu_page_cache, 4, gfp_flags); 240 + r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 4, gfp_flags); 264 241 if (r) 265 242 goto out; 266 243 r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache, ··· 287 266 { 288 267 mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache); 289 268 mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache); 290 - mmu_free_memory_cache(&vcpu->mmu_page_cache); 269 + mmu_free_memory_cache_page(&vcpu->mmu_page_cache); 291 270 mmu_free_memory_cache(&vcpu->mmu_page_header_cache); 292 271 } 293 272 ··· 479 458 { 480 459 ASSERT(is_empty_shadow_page(page_head->spt)); 481 460 list_del(&page_head->link); 482 - kfree(page_head->spt); 461 + __free_page(virt_to_page(page_head->spt)); 483 462 kfree(page_head); 484 463 ++kvm->n_free_mmu_pages; 485 464 } ··· 1322 1301 kmem_cache_destroy(pte_chain_cache); 1323 1302 if (rmap_desc_cache) 1324 1303 kmem_cache_destroy(rmap_desc_cache); 1325 - if (mmu_page_cache) 1326 - kmem_cache_destroy(mmu_page_cache); 1327 1304 if (mmu_page_header_cache) 1328 1305 kmem_cache_destroy(mmu_page_header_cache); 1329 1306 } ··· 1337 1318 sizeof(struct kvm_rmap_desc), 1338 1319 0, 0, NULL); 1339 1320 if (!rmap_desc_cache) 1340 - goto nomem; 1341 - 1342 - mmu_page_cache = kmem_cache_create("kvm_mmu_page", 1343 - PAGE_SIZE, 1344 - PAGE_SIZE, 0, NULL); 1345 - if (!mmu_page_cache) 1346 1321 goto nomem; 1347 1322 1348 1323 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",