at v6.0 351 lines 8.8 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Copyright (C) 2019 Western Digital Corporation or its affiliates. 4 * 5 * Authors: 6 * Atish Patra <atish.patra@wdc.com> 7 */ 8 9#include <linux/errno.h> 10#include <linux/err.h> 11#include <linux/kvm_host.h> 12#include <linux/uaccess.h> 13#include <clocksource/timer-riscv.h> 14#include <asm/csr.h> 15#include <asm/delay.h> 16#include <asm/kvm_vcpu_timer.h> 17 18static u64 kvm_riscv_current_cycles(struct kvm_guest_timer *gt) 19{ 20 return get_cycles64() + gt->time_delta; 21} 22 23static u64 kvm_riscv_delta_cycles2ns(u64 cycles, 24 struct kvm_guest_timer *gt, 25 struct kvm_vcpu_timer *t) 26{ 27 unsigned long flags; 28 u64 cycles_now, cycles_delta, delta_ns; 29 30 local_irq_save(flags); 31 cycles_now = kvm_riscv_current_cycles(gt); 32 if (cycles_now < cycles) 33 cycles_delta = cycles - cycles_now; 34 else 35 cycles_delta = 0; 36 delta_ns = (cycles_delta * gt->nsec_mult) >> gt->nsec_shift; 37 local_irq_restore(flags); 38 39 return delta_ns; 40} 41 42static enum hrtimer_restart kvm_riscv_vcpu_hrtimer_expired(struct hrtimer *h) 43{ 44 u64 delta_ns; 45 struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt); 46 struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer); 47 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; 48 49 if (kvm_riscv_current_cycles(gt) < t->next_cycles) { 50 delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t); 51 hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns)); 52 return HRTIMER_RESTART; 53 } 54 55 t->next_set = false; 56 kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_TIMER); 57 58 return HRTIMER_NORESTART; 59} 60 61static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t) 62{ 63 if (!t->init_done || !t->next_set) 64 return -EINVAL; 65 66 hrtimer_cancel(&t->hrt); 67 t->next_set = false; 68 69 return 0; 70} 71 72static int kvm_riscv_vcpu_update_vstimecmp(struct kvm_vcpu *vcpu, u64 ncycles) 73{ 74#if defined(CONFIG_32BIT) 75 csr_write(CSR_VSTIMECMP, ncycles & 0xFFFFFFFF); 76 csr_write(CSR_VSTIMECMPH, ncycles >> 32); 77#else 78 csr_write(CSR_VSTIMECMP, ncycles); 79#endif 80 return 0; 81} 82 83static int kvm_riscv_vcpu_update_hrtimer(struct kvm_vcpu *vcpu, u64 ncycles) 84{ 85 struct kvm_vcpu_timer *t = &vcpu->arch.timer; 86 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; 87 u64 delta_ns; 88 89 if (!t->init_done) 90 return -EINVAL; 91 92 kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_TIMER); 93 94 delta_ns = kvm_riscv_delta_cycles2ns(ncycles, gt, t); 95 t->next_cycles = ncycles; 96 hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL); 97 t->next_set = true; 98 99 return 0; 100} 101 102int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles) 103{ 104 struct kvm_vcpu_timer *t = &vcpu->arch.timer; 105 106 return t->timer_next_event(vcpu, ncycles); 107} 108 109static enum hrtimer_restart kvm_riscv_vcpu_vstimer_expired(struct hrtimer *h) 110{ 111 u64 delta_ns; 112 struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt); 113 struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer); 114 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; 115 116 if (kvm_riscv_current_cycles(gt) < t->next_cycles) { 117 delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t); 118 hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns)); 119 return HRTIMER_RESTART; 120 } 121 122 t->next_set = false; 123 kvm_vcpu_kick(vcpu); 124 125 return HRTIMER_NORESTART; 126} 127 128bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu) 129{ 130 struct kvm_vcpu_timer *t = &vcpu->arch.timer; 131 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; 132 133 if (!kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t) || 134 kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER)) 135 return true; 136 else 137 return false; 138} 139 140static void kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu *vcpu) 141{ 142 struct kvm_vcpu_timer *t = &vcpu->arch.timer; 143 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; 144 u64 delta_ns; 145 146 if (!t->init_done) 147 return; 148 149 delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t); 150 if (delta_ns) { 151 hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL); 152 t->next_set = true; 153 } 154} 155 156static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu) 157{ 158 kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer); 159} 160 161int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu, 162 const struct kvm_one_reg *reg) 163{ 164 struct kvm_vcpu_timer *t = &vcpu->arch.timer; 165 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; 166 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; 167 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 168 KVM_REG_SIZE_MASK | 169 KVM_REG_RISCV_TIMER); 170 u64 reg_val; 171 172 if (KVM_REG_SIZE(reg->id) != sizeof(u64)) 173 return -EINVAL; 174 if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64)) 175 return -EINVAL; 176 177 switch (reg_num) { 178 case KVM_REG_RISCV_TIMER_REG(frequency): 179 reg_val = riscv_timebase; 180 break; 181 case KVM_REG_RISCV_TIMER_REG(time): 182 reg_val = kvm_riscv_current_cycles(gt); 183 break; 184 case KVM_REG_RISCV_TIMER_REG(compare): 185 reg_val = t->next_cycles; 186 break; 187 case KVM_REG_RISCV_TIMER_REG(state): 188 reg_val = (t->next_set) ? KVM_RISCV_TIMER_STATE_ON : 189 KVM_RISCV_TIMER_STATE_OFF; 190 break; 191 default: 192 return -EINVAL; 193 } 194 195 if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id))) 196 return -EFAULT; 197 198 return 0; 199} 200 201int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu, 202 const struct kvm_one_reg *reg) 203{ 204 struct kvm_vcpu_timer *t = &vcpu->arch.timer; 205 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; 206 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; 207 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 208 KVM_REG_SIZE_MASK | 209 KVM_REG_RISCV_TIMER); 210 u64 reg_val; 211 int ret = 0; 212 213 if (KVM_REG_SIZE(reg->id) != sizeof(u64)) 214 return -EINVAL; 215 if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64)) 216 return -EINVAL; 217 218 if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id))) 219 return -EFAULT; 220 221 switch (reg_num) { 222 case KVM_REG_RISCV_TIMER_REG(frequency): 223 ret = -EOPNOTSUPP; 224 break; 225 case KVM_REG_RISCV_TIMER_REG(time): 226 gt->time_delta = reg_val - get_cycles64(); 227 break; 228 case KVM_REG_RISCV_TIMER_REG(compare): 229 t->next_cycles = reg_val; 230 break; 231 case KVM_REG_RISCV_TIMER_REG(state): 232 if (reg_val == KVM_RISCV_TIMER_STATE_ON) 233 ret = kvm_riscv_vcpu_timer_next_event(vcpu, reg_val); 234 else 235 ret = kvm_riscv_vcpu_timer_cancel(t); 236 break; 237 default: 238 ret = -EINVAL; 239 break; 240 } 241 242 return ret; 243} 244 245int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu) 246{ 247 struct kvm_vcpu_timer *t = &vcpu->arch.timer; 248 249 if (t->init_done) 250 return -EINVAL; 251 252 hrtimer_init(&t->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 253 t->init_done = true; 254 t->next_set = false; 255 256 /* Enable sstc for every vcpu if available in hardware */ 257 if (riscv_isa_extension_available(NULL, SSTC)) { 258 t->sstc_enabled = true; 259 t->hrt.function = kvm_riscv_vcpu_vstimer_expired; 260 t->timer_next_event = kvm_riscv_vcpu_update_vstimecmp; 261 } else { 262 t->sstc_enabled = false; 263 t->hrt.function = kvm_riscv_vcpu_hrtimer_expired; 264 t->timer_next_event = kvm_riscv_vcpu_update_hrtimer; 265 } 266 267 return 0; 268} 269 270int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu) 271{ 272 int ret; 273 274 ret = kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer); 275 vcpu->arch.timer.init_done = false; 276 277 return ret; 278} 279 280int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu) 281{ 282 struct kvm_vcpu_timer *t = &vcpu->arch.timer; 283 284 t->next_cycles = -1ULL; 285 return kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer); 286} 287 288static void kvm_riscv_vcpu_update_timedelta(struct kvm_vcpu *vcpu) 289{ 290 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; 291 292#if defined(CONFIG_32BIT) 293 csr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta)); 294 csr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32)); 295#else 296 csr_write(CSR_HTIMEDELTA, gt->time_delta); 297#endif 298} 299 300void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu) 301{ 302 struct kvm_vcpu_timer *t = &vcpu->arch.timer; 303 304 kvm_riscv_vcpu_update_timedelta(vcpu); 305 306 if (!t->sstc_enabled) 307 return; 308 309#if defined(CONFIG_32BIT) 310 csr_write(CSR_VSTIMECMP, (u32)t->next_cycles); 311 csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycles >> 32)); 312#else 313 csr_write(CSR_VSTIMECMP, t->next_cycles); 314#endif 315 316 /* timer should be enabled for the remaining operations */ 317 if (unlikely(!t->init_done)) 318 return; 319 320 kvm_riscv_vcpu_timer_unblocking(vcpu); 321} 322 323void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu) 324{ 325 struct kvm_vcpu_timer *t = &vcpu->arch.timer; 326 327 if (!t->sstc_enabled) 328 return; 329 330 t = &vcpu->arch.timer; 331#if defined(CONFIG_32BIT) 332 t->next_cycles = csr_read(CSR_VSTIMECMP); 333 t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32; 334#else 335 t->next_cycles = csr_read(CSR_VSTIMECMP); 336#endif 337 /* timer should be enabled for the remaining operations */ 338 if (unlikely(!t->init_done)) 339 return; 340 341 if (kvm_vcpu_is_blocking(vcpu)) 342 kvm_riscv_vcpu_timer_blocking(vcpu); 343} 344 345void kvm_riscv_guest_timer_init(struct kvm *kvm) 346{ 347 struct kvm_guest_timer *gt = &kvm->arch.timer; 348 349 riscv_cs_get_mult_shift(&gt->nsec_mult, &gt->nsec_shift); 350 gt->time_delta = -get_cycles64(); 351}