at v4.9 519 lines 14 kB view raw
1/* 2 * Copyright (C) 2012 ARM Ltd. 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 */ 18 19#include <linux/cpu.h> 20#include <linux/kvm.h> 21#include <linux/kvm_host.h> 22#include <linux/interrupt.h> 23#include <linux/irq.h> 24 25#include <clocksource/arm_arch_timer.h> 26#include <asm/arch_timer.h> 27 28#include <kvm/arm_vgic.h> 29#include <kvm/arm_arch_timer.h> 30 31#include "trace.h" 32 33static struct timecounter *timecounter; 34static unsigned int host_vtimer_irq; 35static u32 host_vtimer_irq_flags; 36 37void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) 38{ 39 vcpu->arch.timer_cpu.active_cleared_last = false; 40} 41 42static cycle_t kvm_phys_timer_read(void) 43{ 44 return timecounter->cc->read(timecounter->cc); 45} 46 47static bool timer_is_armed(struct arch_timer_cpu *timer) 48{ 49 return timer->armed; 50} 51 52/* timer_arm: as in "arm the timer", not as in ARM the company */ 53static void timer_arm(struct arch_timer_cpu *timer, u64 ns) 54{ 55 timer->armed = true; 56 hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns), 57 HRTIMER_MODE_ABS); 58} 59 60static void timer_disarm(struct arch_timer_cpu *timer) 61{ 62 if (timer_is_armed(timer)) { 63 hrtimer_cancel(&timer->timer); 64 cancel_work_sync(&timer->expired); 65 timer->armed = false; 66 } 67} 68 69static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) 70{ 71 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; 72 73 /* 74 * We disable the timer in the world switch and let it be 75 * handled by kvm_timer_sync_hwstate(). Getting a timer 76 * interrupt at this point is a sure sign of some major 77 * breakage. 78 */ 79 pr_warn("Unexpected interrupt %d on vcpu %p\n", irq, vcpu); 80 return IRQ_HANDLED; 81} 82 83/* 84 * Work function for handling the backup timer that we schedule when a vcpu is 85 * no longer running, but had a timer programmed to fire in the future. 86 */ 87static void kvm_timer_inject_irq_work(struct work_struct *work) 88{ 89 struct kvm_vcpu *vcpu; 90 91 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired); 92 vcpu->arch.timer_cpu.armed = false; 93 94 WARN_ON(!kvm_timer_should_fire(vcpu)); 95 96 /* 97 * If the vcpu is blocked we want to wake it up so that it will see 98 * the timer has expired when entering the guest. 99 */ 100 kvm_vcpu_kick(vcpu); 101} 102 103static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu) 104{ 105 cycle_t cval, now; 106 107 cval = vcpu->arch.timer_cpu.cntv_cval; 108 now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; 109 110 if (now < cval) { 111 u64 ns; 112 113 ns = cyclecounter_cyc2ns(timecounter->cc, 114 cval - now, 115 timecounter->mask, 116 &timecounter->frac); 117 return ns; 118 } 119 120 return 0; 121} 122 123static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt) 124{ 125 struct arch_timer_cpu *timer; 126 struct kvm_vcpu *vcpu; 127 u64 ns; 128 129 timer = container_of(hrt, struct arch_timer_cpu, timer); 130 vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu); 131 132 /* 133 * Check that the timer has really expired from the guest's 134 * PoV (NTP on the host may have forced it to expire 135 * early). If we should have slept longer, restart it. 136 */ 137 ns = kvm_timer_compute_delta(vcpu); 138 if (unlikely(ns)) { 139 hrtimer_forward_now(hrt, ns_to_ktime(ns)); 140 return HRTIMER_RESTART; 141 } 142 143 schedule_work(&timer->expired); 144 return HRTIMER_NORESTART; 145} 146 147static bool kvm_timer_irq_can_fire(struct kvm_vcpu *vcpu) 148{ 149 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 150 151 return !(timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) && 152 (timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE); 153} 154 155bool kvm_timer_should_fire(struct kvm_vcpu *vcpu) 156{ 157 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 158 cycle_t cval, now; 159 160 if (!kvm_timer_irq_can_fire(vcpu)) 161 return false; 162 163 cval = timer->cntv_cval; 164 now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; 165 166 return cval <= now; 167} 168 169static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level) 170{ 171 int ret; 172 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 173 174 BUG_ON(!vgic_initialized(vcpu->kvm)); 175 176 timer->active_cleared_last = false; 177 timer->irq.level = new_level; 178 trace_kvm_timer_update_irq(vcpu->vcpu_id, timer->irq.irq, 179 timer->irq.level); 180 ret = kvm_vgic_inject_mapped_irq(vcpu->kvm, vcpu->vcpu_id, 181 timer->irq.irq, 182 timer->irq.level); 183 WARN_ON(ret); 184} 185 186/* 187 * Check if there was a change in the timer state (should we raise or lower 188 * the line level to the GIC). 189 */ 190static int kvm_timer_update_state(struct kvm_vcpu *vcpu) 191{ 192 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 193 194 /* 195 * If userspace modified the timer registers via SET_ONE_REG before 196 * the vgic was initialized, we mustn't set the timer->irq.level value 197 * because the guest would never see the interrupt. Instead wait 198 * until we call this function from kvm_timer_flush_hwstate. 199 */ 200 if (!vgic_initialized(vcpu->kvm) || !timer->enabled) 201 return -ENODEV; 202 203 if (kvm_timer_should_fire(vcpu) != timer->irq.level) 204 kvm_timer_update_irq(vcpu, !timer->irq.level); 205 206 return 0; 207} 208 209/* 210 * Schedule the background timer before calling kvm_vcpu_block, so that this 211 * thread is removed from its waitqueue and made runnable when there's a timer 212 * interrupt to handle. 213 */ 214void kvm_timer_schedule(struct kvm_vcpu *vcpu) 215{ 216 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 217 218 BUG_ON(timer_is_armed(timer)); 219 220 /* 221 * No need to schedule a background timer if the guest timer has 222 * already expired, because kvm_vcpu_block will return before putting 223 * the thread to sleep. 224 */ 225 if (kvm_timer_should_fire(vcpu)) 226 return; 227 228 /* 229 * If the timer is not capable of raising interrupts (disabled or 230 * masked), then there's no more work for us to do. 231 */ 232 if (!kvm_timer_irq_can_fire(vcpu)) 233 return; 234 235 /* The timer has not yet expired, schedule a background timer */ 236 timer_arm(timer, kvm_timer_compute_delta(vcpu)); 237} 238 239void kvm_timer_unschedule(struct kvm_vcpu *vcpu) 240{ 241 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 242 timer_disarm(timer); 243} 244 245/** 246 * kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu 247 * @vcpu: The vcpu pointer 248 * 249 * Check if the virtual timer has expired while we were running in the host, 250 * and inject an interrupt if that was the case. 251 */ 252void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) 253{ 254 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 255 bool phys_active; 256 int ret; 257 258 if (kvm_timer_update_state(vcpu)) 259 return; 260 261 /* 262 * If we enter the guest with the virtual input level to the VGIC 263 * asserted, then we have already told the VGIC what we need to, and 264 * we don't need to exit from the guest until the guest deactivates 265 * the already injected interrupt, so therefore we should set the 266 * hardware active state to prevent unnecessary exits from the guest. 267 * 268 * Also, if we enter the guest with the virtual timer interrupt active, 269 * then it must be active on the physical distributor, because we set 270 * the HW bit and the guest must be able to deactivate the virtual and 271 * physical interrupt at the same time. 272 * 273 * Conversely, if the virtual input level is deasserted and the virtual 274 * interrupt is not active, then always clear the hardware active state 275 * to ensure that hardware interrupts from the timer triggers a guest 276 * exit. 277 */ 278 phys_active = timer->irq.level || 279 kvm_vgic_map_is_active(vcpu, timer->irq.irq); 280 281 /* 282 * We want to avoid hitting the (re)distributor as much as 283 * possible, as this is a potentially expensive MMIO access 284 * (not to mention locks in the irq layer), and a solution for 285 * this is to cache the "active" state in memory. 286 * 287 * Things to consider: we cannot cache an "active set" state, 288 * because the HW can change this behind our back (it becomes 289 * "clear" in the HW). We must then restrict the caching to 290 * the "clear" state. 291 * 292 * The cache is invalidated on: 293 * - vcpu put, indicating that the HW cannot be trusted to be 294 * in a sane state on the next vcpu load, 295 * - any change in the interrupt state 296 * 297 * Usage conditions: 298 * - cached value is "active clear" 299 * - value to be programmed is "active clear" 300 */ 301 if (timer->active_cleared_last && !phys_active) 302 return; 303 304 ret = irq_set_irqchip_state(host_vtimer_irq, 305 IRQCHIP_STATE_ACTIVE, 306 phys_active); 307 WARN_ON(ret); 308 309 timer->active_cleared_last = !phys_active; 310} 311 312/** 313 * kvm_timer_sync_hwstate - sync timer state from cpu 314 * @vcpu: The vcpu pointer 315 * 316 * Check if the virtual timer has expired while we were running in the guest, 317 * and inject an interrupt if that was the case. 318 */ 319void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) 320{ 321 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 322 323 BUG_ON(timer_is_armed(timer)); 324 325 /* 326 * The guest could have modified the timer registers or the timer 327 * could have expired, update the timer state. 328 */ 329 kvm_timer_update_state(vcpu); 330} 331 332int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, 333 const struct kvm_irq_level *irq) 334{ 335 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 336 337 /* 338 * The vcpu timer irq number cannot be determined in 339 * kvm_timer_vcpu_init() because it is called much before 340 * kvm_vcpu_set_target(). To handle this, we determine 341 * vcpu timer irq number when the vcpu is reset. 342 */ 343 timer->irq.irq = irq->irq; 344 345 /* 346 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8 347 * and to 0 for ARMv7. We provide an implementation that always 348 * resets the timer to be disabled and unmasked and is compliant with 349 * the ARMv7 architecture. 350 */ 351 timer->cntv_ctl = 0; 352 kvm_timer_update_state(vcpu); 353 354 return 0; 355} 356 357void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) 358{ 359 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 360 361 INIT_WORK(&timer->expired, kvm_timer_inject_irq_work); 362 hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 363 timer->timer.function = kvm_timer_expire; 364} 365 366static void kvm_timer_init_interrupt(void *info) 367{ 368 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags); 369} 370 371int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) 372{ 373 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 374 375 switch (regid) { 376 case KVM_REG_ARM_TIMER_CTL: 377 timer->cntv_ctl = value; 378 break; 379 case KVM_REG_ARM_TIMER_CNT: 380 vcpu->kvm->arch.timer.cntvoff = kvm_phys_timer_read() - value; 381 break; 382 case KVM_REG_ARM_TIMER_CVAL: 383 timer->cntv_cval = value; 384 break; 385 default: 386 return -1; 387 } 388 389 kvm_timer_update_state(vcpu); 390 return 0; 391} 392 393u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) 394{ 395 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 396 397 switch (regid) { 398 case KVM_REG_ARM_TIMER_CTL: 399 return timer->cntv_ctl; 400 case KVM_REG_ARM_TIMER_CNT: 401 return kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; 402 case KVM_REG_ARM_TIMER_CVAL: 403 return timer->cntv_cval; 404 } 405 return (u64)-1; 406} 407 408static int kvm_timer_starting_cpu(unsigned int cpu) 409{ 410 kvm_timer_init_interrupt(NULL); 411 return 0; 412} 413 414static int kvm_timer_dying_cpu(unsigned int cpu) 415{ 416 disable_percpu_irq(host_vtimer_irq); 417 return 0; 418} 419 420int kvm_timer_hyp_init(void) 421{ 422 struct arch_timer_kvm_info *info; 423 int err; 424 425 info = arch_timer_get_kvm_info(); 426 timecounter = &info->timecounter; 427 428 if (info->virtual_irq <= 0) { 429 kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n", 430 info->virtual_irq); 431 return -ENODEV; 432 } 433 host_vtimer_irq = info->virtual_irq; 434 435 host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq); 436 if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH && 437 host_vtimer_irq_flags != IRQF_TRIGGER_LOW) { 438 kvm_err("Invalid trigger for IRQ%d, assuming level low\n", 439 host_vtimer_irq); 440 host_vtimer_irq_flags = IRQF_TRIGGER_LOW; 441 } 442 443 err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler, 444 "kvm guest timer", kvm_get_running_vcpus()); 445 if (err) { 446 kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n", 447 host_vtimer_irq, err); 448 return err; 449 } 450 451 kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); 452 453 cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, 454 "AP_KVM_ARM_TIMER_STARTING", kvm_timer_starting_cpu, 455 kvm_timer_dying_cpu); 456 return err; 457} 458 459void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) 460{ 461 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 462 463 timer_disarm(timer); 464 kvm_vgic_unmap_phys_irq(vcpu, timer->irq.irq); 465} 466 467int kvm_timer_enable(struct kvm_vcpu *vcpu) 468{ 469 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 470 struct irq_desc *desc; 471 struct irq_data *data; 472 int phys_irq; 473 int ret; 474 475 if (timer->enabled) 476 return 0; 477 478 /* 479 * Find the physical IRQ number corresponding to the host_vtimer_irq 480 */ 481 desc = irq_to_desc(host_vtimer_irq); 482 if (!desc) { 483 kvm_err("%s: no interrupt descriptor\n", __func__); 484 return -EINVAL; 485 } 486 487 data = irq_desc_get_irq_data(desc); 488 while (data->parent_data) 489 data = data->parent_data; 490 491 phys_irq = data->hwirq; 492 493 /* 494 * Tell the VGIC that the virtual interrupt is tied to a 495 * physical interrupt. We do that once per VCPU. 496 */ 497 ret = kvm_vgic_map_phys_irq(vcpu, timer->irq.irq, phys_irq); 498 if (ret) 499 return ret; 500 501 502 /* 503 * There is a potential race here between VCPUs starting for the first 504 * time, which may be enabling the timer multiple times. That doesn't 505 * hurt though, because we're just setting a variable to the same 506 * variable that it already was. The important thing is that all 507 * VCPUs have the enabled variable set, before entering the guest, if 508 * the arch timers are enabled. 509 */ 510 if (timecounter) 511 timer->enabled = 1; 512 513 return 0; 514} 515 516void kvm_timer_init(struct kvm *kvm) 517{ 518 kvm->arch.timer.cntvoff = kvm_phys_timer_read(); 519}