Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM bugfixes from Marcelo Tosatti.

* git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: x86: use dynamic percpu allocations for shared msrs area
KVM: PPC: Book3S HV: Fix compilation without CONFIG_PPC_POWERNV
powerpc: Corrected include header path in kvm_para.h
Add rcu user eqs exception hooks for async page fault

+33 -9
+1 -1
arch/powerpc/include/uapi/asm/kvm_para.h
··· 78 78 79 79 #define KVM_HCALL_TOKEN(num) _EV_HCALL_TOKEN(EV_KVM_VENDOR_ID, num) 80 80 81 - #include <uapi/asm/epapr_hcalls.h> 81 + #include <asm/epapr_hcalls.h> 82 82 83 83 #define KVM_FEATURE_MAGIC_PAGE 1 84 84
+4
arch/powerpc/kvm/book3s_hv_ras.c
··· 79 79 static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) 80 80 { 81 81 unsigned long srr1 = vcpu->arch.shregs.msr; 82 + #ifdef CONFIG_PPC_POWERNV 82 83 struct opal_machine_check_event *opal_evt; 84 + #endif 83 85 long handled = 1; 84 86 85 87 if (srr1 & SRR1_MC_LDSTERR) { ··· 119 117 handled = 0; 120 118 } 121 119 120 + #ifdef CONFIG_PPC_POWERNV 122 121 /* 123 122 * See if OPAL has already handled the condition. 124 123 * We assume that if the condition is recovered then OPAL ··· 134 131 135 132 if (handled) 136 133 opal_evt->in_use = 0; 134 + #endif 137 135 138 136 return handled; 139 137 }
+10 -2
arch/x86/kernel/kvm.c
··· 43 43 #include <asm/apicdef.h> 44 44 #include <asm/hypervisor.h> 45 45 #include <asm/kvm_guest.h> 46 + #include <asm/context_tracking.h> 46 47 47 48 static int kvmapf = 1; 48 49 ··· 122 121 struct kvm_task_sleep_node n, *e; 123 122 DEFINE_WAIT(wait); 124 123 124 + rcu_irq_enter(); 125 + 125 126 spin_lock(&b->lock); 126 127 e = _find_apf_task(b, token); 127 128 if (e) { ··· 131 128 hlist_del(&e->link); 132 129 kfree(e); 133 130 spin_unlock(&b->lock); 131 + 132 + rcu_irq_exit(); 134 133 return; 135 134 } 136 135 ··· 157 152 /* 158 153 * We cannot reschedule. So halt. 159 154 */ 155 + rcu_irq_exit(); 160 156 native_safe_halt(); 157 + rcu_irq_enter(); 161 158 local_irq_disable(); 162 159 } 163 160 } 164 161 if (!n.halted) 165 162 finish_wait(&n.wq, &wait); 166 163 164 + rcu_irq_exit(); 167 165 return; 168 166 } 169 167 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait); ··· 260 252 break; 261 253 case KVM_PV_REASON_PAGE_NOT_PRESENT: 262 254 /* page is swapped out by the host. */ 263 - rcu_irq_enter(); 255 + exception_enter(regs); 264 256 exit_idle(); 265 257 kvm_async_pf_task_wait((u32)read_cr2()); 266 - rcu_irq_exit(); 258 + exception_exit(regs); 267 259 break; 268 260 case KVM_PV_REASON_PAGE_READY: 269 261 rcu_irq_enter();
+18 -6
arch/x86/kvm/x86.c
··· 120 120 }; 121 121 122 122 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; 123 - static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs); 123 + static struct kvm_shared_msrs __percpu *shared_msrs; 124 124 125 125 struct kvm_stats_debugfs_item debugfs_entries[] = { 126 126 { "pf_fixed", VCPU_STAT(pf_fixed) }, ··· 191 191 192 192 static void shared_msr_update(unsigned slot, u32 msr) 193 193 { 194 - struct kvm_shared_msrs *smsr; 195 194 u64 value; 195 + unsigned int cpu = smp_processor_id(); 196 + struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); 196 197 197 - smsr = &__get_cpu_var(shared_msrs); 198 198 /* only read, and nobody should modify it at this time, 199 199 * so don't need lock */ 200 200 if (slot >= shared_msrs_global.nr) { ··· 226 226 227 227 void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) 228 228 { 229 - struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); 229 + unsigned int cpu = smp_processor_id(); 230 + struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); 230 231 231 232 if (((value ^ smsr->values[slot].curr) & mask) == 0) 232 233 return; ··· 243 242 244 243 static void drop_user_return_notifiers(void *ignore) 245 244 { 246 - struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); 245 + unsigned int cpu = smp_processor_id(); 246 + struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); 247 247 248 248 if (smsr->registered) 249 249 kvm_on_user_return(&smsr->urn); ··· 5235 5233 goto out; 5236 5234 } 5237 5235 5236 + r = -ENOMEM; 5237 + shared_msrs = alloc_percpu(struct kvm_shared_msrs); 5238 + if (!shared_msrs) { 5239 + printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n"); 5240 + goto out; 5241 + } 5242 + 5238 5243 r = kvm_mmu_module_init(); 5239 5244 if (r) 5240 - goto out; 5245 + goto out_free_percpu; 5241 5246 5242 5247 kvm_set_mmio_spte_mask(); 5243 5248 kvm_init_msr_list(); ··· 5267 5258 5268 5259 return 0; 5269 5260 5261 + out_free_percpu: 5262 + free_percpu(shared_msrs); 5270 5263 out: 5271 5264 return r; 5272 5265 } ··· 5286 5275 #endif 5287 5276 kvm_x86_ops = NULL; 5288 5277 kvm_mmu_module_exit(); 5278 + free_percpu(shared_msrs); 5289 5279 } 5290 5280 5291 5281 int kvm_emulate_halt(struct kvm_vcpu *vcpu)