Skip to content

Commit 3239c52

Browse files
Mina-Chouavpatel
authored andcommitted
RISC-V: KVM: Flush VS-stage TLB after VCPU migration for Andes cores
Most implementations cache the combined result of two-stage translation, but some, like Andes cores, use split TLBs that store VS-stage and G-stage entries separately. On such systems, when a VCPU migrates to another CPU, an additional HFENCE.VVMA is required to avoid using stale VS-stage entries, which could otherwise cause guest faults. Introduce a static key to identify CPUs with split two-stage TLBs. When enabled, KVM issues an extra HFENCE.VVMA on VCPU migration to prevent stale VS-stage mappings. Signed-off-by: Hui Min Mina Chou <minachou@andestech.com> Signed-off-by: Ben Zong-You Xie <ben717@andestech.com> Reviewed-by: Radim Krčmář <rkrcmar@ventanamicro.com> Reviewed-by: Nutty Liu <nutty.liu@hotmail.com> Link: https://lore.kernel.org/r/20251117084555.157642-1-minachou@andestech.com Signed-off-by: Anup Patel <anup@brainfault.org>
1 parent 974555d commit 3239c52

File tree

7 files changed

+49
-25
lines changed

7 files changed

+49
-25
lines changed

arch/riscv/include/asm/kvm_host.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -330,4 +330,7 @@ bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu);
330330

331331
void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu);
332332

333+
/* Flags representing implementation specific details */
334+
DECLARE_STATIC_KEY_FALSE(kvm_riscv_vsstage_tlb_no_gpa);
335+
333336
#endif /* __RISCV_KVM_HOST_H__ */

arch/riscv/include/asm/kvm_tlb.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
4949
unsigned long gva, unsigned long gvsz,
5050
unsigned long order);
5151
void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
52+
void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu);
5253

5354
void kvm_riscv_tlb_flush_process(struct kvm_vcpu *vcpu);
5455

arch/riscv/include/asm/kvm_vmid.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,5 @@ unsigned long kvm_riscv_gstage_vmid_bits(void);
2222
int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
2323
bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
2424
void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);
25-
void kvm_riscv_gstage_vmid_sanitize(struct kvm_vcpu *vcpu);
2625

2726
#endif

arch/riscv/kvm/main.c

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,18 @@
1515
#include <asm/kvm_nacl.h>
1616
#include <asm/sbi.h>
1717

18+
DEFINE_STATIC_KEY_FALSE(kvm_riscv_vsstage_tlb_no_gpa);
19+
20+
static void kvm_riscv_setup_vendor_features(void)
21+
{
22+
/* Andes AX66: split two-stage TLBs */
23+
if (riscv_cached_mvendorid(0) == ANDES_VENDOR_ID &&
24+
(riscv_cached_marchid(0) & 0xFFFF) == 0x8A66) {
25+
static_branch_enable(&kvm_riscv_vsstage_tlb_no_gpa);
26+
kvm_info("VS-stage TLB does not cache guest physical address and VMID\n");
27+
}
28+
}
29+
1830
long kvm_arch_dev_ioctl(struct file *filp,
1931
unsigned int ioctl, unsigned long arg)
2032
{
@@ -160,6 +172,8 @@ static int __init riscv_kvm_init(void)
160172
kvm_info("AIA available with %d guest external interrupts\n",
161173
kvm_riscv_aia_nr_hgei);
162174

175+
kvm_riscv_setup_vendor_features();
176+
163177
kvm_register_perf_callbacks(NULL);
164178

165179
rc = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);

arch/riscv/kvm/tlb.c

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,36 @@ void kvm_riscv_local_hfence_vvma_all(unsigned long vmid)
158158
csr_write(CSR_HGATP, hgatp);
159159
}
160160

161+
void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu)
162+
{
163+
unsigned long vmid;
164+
165+
if (!kvm_riscv_gstage_vmid_bits() ||
166+
vcpu->arch.last_exit_cpu == vcpu->cpu)
167+
return;
168+
169+
/*
170+
* On RISC-V platforms with hardware VMID support, we share same
171+
* VMID for all VCPUs of a particular Guest/VM. This means we might
172+
* have stale G-stage TLB entries on the current Host CPU due to
173+
* some other VCPU of the same Guest which ran previously on the
174+
* current Host CPU.
175+
*
176+
* To cleanup stale TLB entries, we simply flush all G-stage TLB
177+
* entries by VMID whenever underlying Host CPU changes for a VCPU.
178+
*/
179+
180+
vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
181+
kvm_riscv_local_hfence_gvma_vmid_all(vmid);
182+
183+
/*
184+
* Flush VS-stage TLB entries for implementation where VS-stage
185+
* TLB does not cahce guest physical address and VMID.
186+
*/
187+
if (static_branch_unlikely(&kvm_riscv_vsstage_tlb_no_gpa))
188+
kvm_riscv_local_hfence_vvma_all(vmid);
189+
}
190+
161191
void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu)
162192
{
163193
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_RCVD);

arch/riscv/kvm/vcpu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -968,7 +968,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
968968
* Note: This should be done after G-stage VMID has been
969969
* updated using kvm_riscv_gstage_vmid_ver_changed()
970970
*/
971-
kvm_riscv_gstage_vmid_sanitize(vcpu);
971+
kvm_riscv_local_tlb_sanitize(vcpu);
972972

973973
trace_kvm_entry(vcpu);
974974

arch/riscv/kvm/vmid.c

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -122,26 +122,3 @@ void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu)
122122
kvm_for_each_vcpu(i, v, vcpu->kvm)
123123
kvm_make_request(KVM_REQ_UPDATE_HGATP, v);
124124
}
125-
126-
void kvm_riscv_gstage_vmid_sanitize(struct kvm_vcpu *vcpu)
127-
{
128-
unsigned long vmid;
129-
130-
if (!kvm_riscv_gstage_vmid_bits() ||
131-
vcpu->arch.last_exit_cpu == vcpu->cpu)
132-
return;
133-
134-
/*
135-
* On RISC-V platforms with hardware VMID support, we share same
136-
* VMID for all VCPUs of a particular Guest/VM. This means we might
137-
* have stale G-stage TLB entries on the current Host CPU due to
138-
* some other VCPU of the same Guest which ran previously on the
139-
* current Host CPU.
140-
*
141-
* To cleanup stale TLB entries, we simply flush all G-stage TLB
142-
* entries by VMID whenever underlying Host CPU changes for a VCPU.
143-
*/
144-
145-
vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
146-
kvm_riscv_local_hfence_gvma_vmid_all(vmid);
147-
}

0 commit comments

Comments
 (0)