Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012 ARM Ltd.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#ifndef __ASM_ARM_KVM_ARCH_TIMER_H
8#define __ASM_ARM_KVM_ARCH_TIMER_H
9
10#include <linux/clocksource.h>
11#include <linux/hrtimer.h>
12
13enum kvm_arch_timers {
14 TIMER_PTIMER,
15 TIMER_VTIMER,
16 NR_KVM_EL0_TIMERS,
17 TIMER_HVTIMER = NR_KVM_EL0_TIMERS,
18 TIMER_HPTIMER,
19 NR_KVM_TIMERS
20};
21
22enum kvm_arch_timer_regs {
23 TIMER_REG_CNT,
24 TIMER_REG_CVAL,
25 TIMER_REG_TVAL,
26 TIMER_REG_CTL,
27 TIMER_REG_VOFF,
28};
29
30struct arch_timer_offset {
31 /*
32 * If set, pointer to one of the offsets in the kvm's offset
33 * structure. If NULL, assume a zero offset.
34 */
35 u64 *vm_offset;
36 /*
37 * If set, pointer to one of the offsets in the vcpu's sysreg
38 * array. If NULL, assume a zero offset.
39 */
40 u64 *vcpu_offset;
41};
42
43struct arch_timer_vm_data {
44 /* Offset applied to the virtual timer/counter */
45 u64 voffset;
46 /* Offset applied to the physical timer/counter */
47 u64 poffset;
48
49 /* The PPI for each timer, global to the VM */
50 u8 ppi[NR_KVM_TIMERS];
51};
52
53struct arch_timer_context {
54 /* Emulated Timer (may be unused) */
55 struct hrtimer hrtimer;
56 u64 ns_frac;
57
58 /* Offset for this counter/timer */
59 struct arch_timer_offset offset;
60 /*
61 * We have multiple paths which can save/restore the timer state onto
62 * the hardware, so we need some way of keeping track of where the
63 * latest state is.
64 */
65 bool loaded;
66
67 /* Output level of the timer IRQ */
68 struct {
69 bool level;
70 } irq;
71
72 /* Who am I? */
73 enum kvm_arch_timers timer_id;
74
75 /* Duplicated state from arch_timer.c for convenience */
76 u32 host_timer_irq;
77};
78
79struct timer_map {
80 struct arch_timer_context *direct_vtimer;
81 struct arch_timer_context *direct_ptimer;
82 struct arch_timer_context *emul_vtimer;
83 struct arch_timer_context *emul_ptimer;
84};
85
86void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map);
87
88struct arch_timer_cpu {
89 struct arch_timer_context timers[NR_KVM_TIMERS];
90
91 /* Background timer used when the guest is not running */
92 struct hrtimer bg_timer;
93
94 /* Is the timer enabled */
95 bool enabled;
96};
97
98int __init kvm_timer_hyp_init(bool has_gic);
99int kvm_timer_enable(struct kvm_vcpu *vcpu);
100void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
101void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
102void kvm_timer_sync_nested(struct kvm_vcpu *vcpu);
103void kvm_timer_sync_user(struct kvm_vcpu *vcpu);
104bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu);
105void kvm_timer_update_run(struct kvm_vcpu *vcpu);
106void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
107
108void kvm_timer_init_vm(struct kvm *kvm);
109
110int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
111int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
112int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
113
114u64 kvm_phys_timer_read(void);
115
116void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu);
117void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
118
119void kvm_timer_init_vhe(void);
120
121#define vcpu_timer(v) (&(v)->arch.timer_cpu)
122#define vcpu_get_timer(v,t) (&vcpu_timer(v)->timers[(t)])
123#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_VTIMER])
124#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_PTIMER])
125#define vcpu_hvtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_HVTIMER])
126#define vcpu_hptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_HPTIMER])
127
128#define arch_timer_ctx_index(ctx) ((ctx)->timer_id)
129#define timer_context_to_vcpu(ctx) container_of((ctx), struct kvm_vcpu, arch.timer_cpu.timers[(ctx)->timer_id])
130#define timer_vm_data(ctx) (&(timer_context_to_vcpu(ctx)->kvm->arch.timer_data))
131#define timer_irq(ctx) (timer_vm_data(ctx)->ppi[arch_timer_ctx_index(ctx)])
132
133u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
134 enum kvm_arch_timers tmr,
135 enum kvm_arch_timer_regs treg);
136void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
137 enum kvm_arch_timers tmr,
138 enum kvm_arch_timer_regs treg,
139 u64 val);
140
141/* Needed for tracing */
142u32 timer_get_ctl(struct arch_timer_context *ctxt);
143u64 timer_get_cval(struct arch_timer_context *ctxt);
144
145/* CPU HP callbacks */
146void kvm_timer_cpu_up(void);
147void kvm_timer_cpu_down(void);
148
149/* CNTKCTL_EL1 valid bits as of DDI0487J.a */
150#define CNTKCTL_VALID_BITS (BIT(17) | GENMASK_ULL(9, 0))
151
152DECLARE_STATIC_KEY_FALSE(broken_cntvoff_key);
153
154static inline bool has_broken_cntvoff(void)
155{
156 return static_branch_unlikely(&broken_cntvoff_key);
157}
158
159static inline bool has_cntpoff(void)
160{
161 return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF));
162}
163
164static inline u64 timer_get_offset(struct arch_timer_context *ctxt)
165{
166 u64 offset = 0;
167
168 if (!ctxt)
169 return 0;
170
171 if (ctxt->offset.vm_offset)
172 offset += *ctxt->offset.vm_offset;
173 if (ctxt->offset.vcpu_offset)
174 offset += *ctxt->offset.vcpu_offset;
175
176 return offset;
177}
178
179static inline void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
180{
181 if (!ctxt->offset.vm_offset) {
182 WARN(offset, "timer %d\n", arch_timer_ctx_index(ctxt));
183 return;
184 }
185
186 WRITE_ONCE(*ctxt->offset.vm_offset, offset);
187}
188
189#endif