-8
arch/riscv/include/asm/cacheflush.h
-8
arch/riscv/include/asm/cacheflush.h
···
42
42
43
43
#endif /* CONFIG_SMP */
44
44
45
-
/*
46
-
* The T-Head CMO errata internally probe the CBOM block size, but otherwise
47
-
* don't depend on Zicbom.
48
-
*/
49
45
extern unsigned int riscv_cbom_block_size;
50
-
#ifdef CONFIG_RISCV_ISA_ZICBOM
51
46
void riscv_init_cbom_blocksize(void);
52
-
#else
53
-
static inline void riscv_init_cbom_blocksize(void) { }
54
-
#endif
55
47
56
48
#ifdef CONFIG_RISCV_DMA_NONCOHERENT
57
49
void riscv_noncoherent_supported(void);
+1
arch/riscv/include/asm/kvm_vcpu_timer.h
+1
arch/riscv/include/asm/kvm_vcpu_timer.h
···
45
45
int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu);
46
46
void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu);
47
47
void kvm_riscv_guest_timer_init(struct kvm *kvm);
48
+
void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu);
48
49
void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu);
49
50
bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu);
50
51
+3
arch/riscv/kvm/vcpu.c
+3
arch/riscv/kvm/vcpu.c
+20
-7
arch/riscv/kvm/vcpu_timer.c
+20
-7
arch/riscv/kvm/vcpu_timer.c
···
320
320
kvm_riscv_vcpu_timer_unblocking(vcpu);
321
321
}
322
322
323
+
void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu)
324
+
{
325
+
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
326
+
327
+
if (!t->sstc_enabled)
328
+
return;
329
+
330
+
#if defined(CONFIG_32BIT)
331
+
t->next_cycles = csr_read(CSR_VSTIMECMP);
332
+
t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32;
333
+
#else
334
+
t->next_cycles = csr_read(CSR_VSTIMECMP);
335
+
#endif
336
+
}
337
+
323
338
void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
324
339
{
325
340
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
···
342
327
if (!t->sstc_enabled)
343
328
return;
344
329
345
-
t = &vcpu->arch.timer;
346
-
#if defined(CONFIG_32BIT)
347
-
t->next_cycles = csr_read(CSR_VSTIMECMP);
348
-
t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32;
349
-
#else
350
-
t->next_cycles = csr_read(CSR_VSTIMECMP);
351
-
#endif
330
+
/*
331
+
* The vstimecmp CSRs are saved by kvm_riscv_vcpu_timer_sync()
332
+
* upon every VM exit so no need to save here.
333
+
*/
334
+
352
335
/* timer should be enabled for the remaining operations */
353
336
if (unlikely(!t->init_done))
354
337
return;
+38
arch/riscv/mm/cacheflush.c
+38
arch/riscv/mm/cacheflush.c
···
3
3
* Copyright (C) 2017 SiFive
4
4
*/
5
5
6
+
#include <linux/of.h>
6
7
#include <asm/cacheflush.h>
7
8
8
9
#ifdef CONFIG_SMP
···
87
86
flush_icache_all();
88
87
}
89
88
#endif /* CONFIG_MMU */
89
+
90
+
unsigned int riscv_cbom_block_size;
91
+
EXPORT_SYMBOL_GPL(riscv_cbom_block_size);
92
+
93
+
void riscv_init_cbom_blocksize(void)
94
+
{
95
+
struct device_node *node;
96
+
unsigned long cbom_hartid;
97
+
u32 val, probed_block_size;
98
+
int ret;
99
+
100
+
probed_block_size = 0;
101
+
for_each_of_cpu_node(node) {
102
+
unsigned long hartid;
103
+
104
+
ret = riscv_of_processor_hartid(node, &hartid);
105
+
if (ret)
106
+
continue;
107
+
108
+
/* set block-size for cbom extension if available */
109
+
ret = of_property_read_u32(node, "riscv,cbom-block-size", &val);
110
+
if (ret)
111
+
continue;
112
+
113
+
if (!probed_block_size) {
114
+
probed_block_size = val;
115
+
cbom_hartid = hartid;
116
+
} else {
117
+
if (probed_block_size != val)
118
+
pr_warn("cbom-block-size mismatched between harts %lu and %lu\n",
119
+
cbom_hartid, hartid);
120
+
}
121
+
}
122
+
123
+
if (probed_block_size)
124
+
riscv_cbom_block_size = probed_block_size;
125
+
}
-41
arch/riscv/mm/dma-noncoherent.c
-41
arch/riscv/mm/dma-noncoherent.c
···
8
8
#include <linux/dma-direct.h>
9
9
#include <linux/dma-map-ops.h>
10
10
#include <linux/mm.h>
11
-
#include <linux/of.h>
12
-
#include <linux/of_device.h>
13
11
#include <asm/cacheflush.h>
14
-
15
-
unsigned int riscv_cbom_block_size;
16
-
EXPORT_SYMBOL_GPL(riscv_cbom_block_size);
17
12
18
13
static bool noncoherent_supported;
19
14
···
71
76
72
77
dev->dma_coherent = coherent;
73
78
}
74
-
75
-
#ifdef CONFIG_RISCV_ISA_ZICBOM
76
-
void riscv_init_cbom_blocksize(void)
77
-
{
78
-
struct device_node *node;
79
-
unsigned long cbom_hartid;
80
-
u32 val, probed_block_size;
81
-
int ret;
82
-
83
-
probed_block_size = 0;
84
-
for_each_of_cpu_node(node) {
85
-
unsigned long hartid;
86
-
87
-
ret = riscv_of_processor_hartid(node, &hartid);
88
-
if (ret)
89
-
continue;
90
-
91
-
/* set block-size for cbom extension if available */
92
-
ret = of_property_read_u32(node, "riscv,cbom-block-size", &val);
93
-
if (ret)
94
-
continue;
95
-
96
-
if (!probed_block_size) {
97
-
probed_block_size = val;
98
-
cbom_hartid = hartid;
99
-
} else {
100
-
if (probed_block_size != val)
101
-
pr_warn("cbom-block-size mismatched between harts %lu and %lu\n",
102
-
cbom_hartid, hartid);
103
-
}
104
-
}
105
-
106
-
if (probed_block_size)
107
-
riscv_cbom_block_size = probed_block_size;
108
-
}
109
-
#endif
110
79
111
80
void riscv_noncoherent_supported(void)
112
81
{