Skip to content

Commit 6316366

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/misc-6.20 into kvmarm-master/next
* kvm-arm64/misc-6.20: : . : Misc KVM/arm64 changes for 6.20 : : - Trivial FPSIMD cleanups : : - Calculate hyp VA size only once, avoiding potential mapping issues when : VA bits is smaller than expected : : - Silence sparse warning for the HYP stack base : : - Fix error checking when handling FFA_VERSION : : - Add missing trap configuration for DBGWCR15_EL1 : : - Don't try to deal with nested S2 when NV isn't enabled for a guest : : - Various spelling fixes : . KVM: arm64: nv: Avoid NV stage-2 code when NV is not supported KVM: arm64: Fix various comments KVM: arm64: nv: Add trap config for DBGWCR<15>_EL1 KVM: arm64: Fix error checking for FFA_VERSION KVM: arm64: Fix missing <asm/stackpage/nvhe.h> include KVM: arm64: Calculate hyp VA size only once KVM: arm64: Remove ISB after writing FPEXC32_EL2 KVM: arm64: Shuffle KVM_HOST_DATA_FLAG_* indices KVM: arm64: Fix comment in fpsimd_lazy_switch_to_host() Signed-off-by: Marc Zyngier <maz@kernel.org>
2 parents 1df3f01 + 0c4762e commit 6316366

File tree

11 files changed

+62
-46
lines changed

11 files changed

+62
-46
lines changed

arch/arm64/include/asm/kvm_host.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -201,7 +201,7 @@ struct kvm_s2_mmu {
201201
* host to parse the guest S2.
202202
* This either contains:
203203
* - the virtual VTTBR programmed by the guest hypervisor with
204-
* CnP cleared
204+
* CnP cleared
205205
* - The value 1 (VMID=0, BADDR=0, CnP=1) if invalid
206206
*
207207
* We also cache the full VTCR which gets used for TLB invalidation,
@@ -734,11 +734,11 @@ struct cpu_sve_state {
734734
struct kvm_host_data {
735735
#define KVM_HOST_DATA_FLAG_HAS_SPE 0
736736
#define KVM_HOST_DATA_FLAG_HAS_TRBE 1
737-
#define KVM_HOST_DATA_FLAG_TRBE_ENABLED 4
738-
#define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 5
739-
#define KVM_HOST_DATA_FLAG_VCPU_IN_HYP_CONTEXT 6
740-
#define KVM_HOST_DATA_FLAG_L1_VNCR_MAPPED 7
741-
#define KVM_HOST_DATA_FLAG_HAS_BRBE 8
737+
#define KVM_HOST_DATA_FLAG_TRBE_ENABLED 2
738+
#define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 3
739+
#define KVM_HOST_DATA_FLAG_VCPU_IN_HYP_CONTEXT 4
740+
#define KVM_HOST_DATA_FLAG_L1_VNCR_MAPPED 5
741+
#define KVM_HOST_DATA_FLAG_HAS_BRBE 6
742742
unsigned long flags;
743743

744744
struct kvm_cpu_context host_ctxt;

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,7 @@ alternative_cb_end
103103
void kvm_update_va_mask(struct alt_instr *alt,
104104
__le32 *origptr, __le32 *updptr, int nr_inst);
105105
void kvm_compute_layout(void);
106+
u32 kvm_hyp_va_bits(void);
106107
void kvm_apply_hyp_relocations(void);
107108

108109
#define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset)
@@ -185,7 +186,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
185186

186187
phys_addr_t kvm_mmu_get_httbr(void);
187188
phys_addr_t kvm_get_idmap_vector(void);
188-
int __init kvm_mmu_init(u32 *hyp_va_bits);
189+
int __init kvm_mmu_init(u32 hyp_va_bits);
189190

190191
static inline void *__kvm_vector_slot2addr(void *base,
191192
enum arm64_hyp_spectre_vector slot)

arch/arm64/kvm/arm.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@
4040
#include <asm/kvm_pkvm.h>
4141
#include <asm/kvm_ptrauth.h>
4242
#include <asm/sections.h>
43+
#include <asm/stacktrace/nvhe.h>
4344

4445
#include <kvm/arm_hypercalls.h>
4546
#include <kvm/arm_pmu.h>
@@ -2623,7 +2624,7 @@ static void pkvm_hyp_init_ptrauth(void)
26232624
/* Inits Hyp-mode on all online CPUs */
26242625
static int __init init_hyp_mode(void)
26252626
{
2626-
u32 hyp_va_bits;
2627+
u32 hyp_va_bits = kvm_hyp_va_bits();
26272628
int cpu;
26282629
int err = -ENOMEM;
26292630

@@ -2637,7 +2638,7 @@ static int __init init_hyp_mode(void)
26372638
/*
26382639
* Allocate Hyp PGD and setup Hyp identity mapping
26392640
*/
2640-
err = kvm_mmu_init(&hyp_va_bits);
2641+
err = kvm_mmu_init(hyp_va_bits);
26412642
if (err)
26422643
goto out_err;
26432644

arch/arm64/kvm/emulate-nested.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1174,6 +1174,7 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
11741174
SR_TRAP(SYS_DBGWCRn_EL1(12), CGT_MDCR_TDE_TDA),
11751175
SR_TRAP(SYS_DBGWCRn_EL1(13), CGT_MDCR_TDE_TDA),
11761176
SR_TRAP(SYS_DBGWCRn_EL1(14), CGT_MDCR_TDE_TDA),
1177+
SR_TRAP(SYS_DBGWCRn_EL1(15), CGT_MDCR_TDE_TDA),
11771178
SR_TRAP(SYS_DBGCLAIMSET_EL1, CGT_MDCR_TDE_TDA),
11781179
SR_TRAP(SYS_DBGCLAIMCLR_EL1, CGT_MDCR_TDE_TDA),
11791180
SR_TRAP(SYS_DBGAUTHSTATUS_EL1, CGT_MDCR_TDE_TDA),

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -59,10 +59,8 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
5959
* If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
6060
* it will cause an exception.
6161
*/
62-
if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) {
62+
if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd())
6363
write_sysreg(1 << 30, fpexc32_el2);
64-
isb();
65-
}
6664
}
6765

6866
static inline void __activate_cptr_traps_nvhe(struct kvm_vcpu *vcpu)
@@ -495,7 +493,7 @@ static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
495493
/*
496494
* When the guest owns the FP regs, we know that guest+hyp traps for
497495
* any FPSIMD/SVE/SME features exposed to the guest have been disabled
498-
* by either fpsimd_lazy_switch_to_guest() or kvm_hyp_handle_fpsimd()
496+
* by either __activate_cptr_traps() or kvm_hyp_handle_fpsimd()
499497
* prior to __guest_entry(). As __guest_entry() guarantees a context
500498
* synchronization event, we don't need an ISB here to avoid taking
501499
* traps for anything that was exposed to the guest.

arch/arm64/kvm/hyp/nvhe/ffa.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -792,7 +792,7 @@ static void do_ffa_version(struct arm_smccc_1_2_regs *res,
792792
.a0 = FFA_VERSION,
793793
.a1 = ffa_req_version,
794794
}, res);
795-
if (res->a0 == FFA_RET_NOT_SUPPORTED)
795+
if ((s32)res->a0 == FFA_RET_NOT_SUPPORTED)
796796
goto unlock;
797797

798798
hyp_ffa_version = ffa_req_version;
@@ -943,7 +943,7 @@ int hyp_ffa_init(void *pages)
943943
.a0 = FFA_VERSION,
944944
.a1 = FFA_VERSION_1_2,
945945
}, &res);
946-
if (res.a0 == FFA_RET_NOT_SUPPORTED)
946+
if ((s32)res.a0 == FFA_RET_NOT_SUPPORTED)
947947
return 0;
948948

949949
/*

arch/arm64/kvm/hyp/vhe/sysreg-sr.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -205,7 +205,7 @@ void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu)
205205

206206
/*
207207
* When running a normal EL1 guest, we only load a new vcpu
208-
* after a context switch, which imvolves a DSB, so all
208+
* after a context switch, which involves a DSB, so all
209209
* speculative EL1&0 walks will have already completed.
210210
* If running NV, the vcpu may transition between vEL1 and
211211
* vEL2 without a context switch, so make sure we complete

arch/arm64/kvm/mmu.c

Lines changed: 4 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -2294,11 +2294,9 @@ static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = {
22942294
.virt_to_phys = kvm_host_pa,
22952295
};
22962296

2297-
int __init kvm_mmu_init(u32 *hyp_va_bits)
2297+
int __init kvm_mmu_init(u32 hyp_va_bits)
22982298
{
22992299
int err;
2300-
u32 idmap_bits;
2301-
u32 kernel_bits;
23022300

23032301
hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start);
23042302
hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
@@ -2312,25 +2310,7 @@ int __init kvm_mmu_init(u32 *hyp_va_bits)
23122310
*/
23132311
BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
23142312

2315-
/*
2316-
* The ID map is always configured for 48 bits of translation, which
2317-
* may be fewer than the number of VA bits used by the regular kernel
2318-
* stage 1, when VA_BITS=52.
2319-
*
2320-
* At EL2, there is only one TTBR register, and we can't switch between
2321-
* translation tables *and* update TCR_EL2.T0SZ at the same time. Bottom
2322-
* line: we need to use the extended range with *both* our translation
2323-
* tables.
2324-
*
2325-
* So use the maximum of the idmap VA bits and the regular kernel stage
2326-
* 1 VA bits to assure that the hypervisor can both ID map its code page
2327-
* and map any kernel memory.
2328-
*/
2329-
idmap_bits = IDMAP_VA_BITS;
2330-
kernel_bits = vabits_actual;
2331-
*hyp_va_bits = max(idmap_bits, kernel_bits);
2332-
2333-
kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits);
2313+
kvm_debug("Using %u-bit virtual addresses at EL2\n", hyp_va_bits);
23342314
kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
23352315
kvm_debug("HYP VA range: %lx:%lx\n",
23362316
kern_hyp_va(PAGE_OFFSET),
@@ -2355,7 +2335,7 @@ int __init kvm_mmu_init(u32 *hyp_va_bits)
23552335
goto out;
23562336
}
23572337

2358-
err = kvm_pgtable_hyp_init(hyp_pgtable, *hyp_va_bits, &kvm_hyp_mm_ops);
2338+
err = kvm_pgtable_hyp_init(hyp_pgtable, hyp_va_bits, &kvm_hyp_mm_ops);
23592339
if (err)
23602340
goto out_free_pgtable;
23612341

@@ -2364,7 +2344,7 @@ int __init kvm_mmu_init(u32 *hyp_va_bits)
23642344
goto out_destroy_pgtable;
23652345

23662346
io_map_base = hyp_idmap_start;
2367-
__hyp_va_bits = *hyp_va_bits;
2347+
__hyp_va_bits = hyp_va_bits;
23682348
return 0;
23692349

23702350
out_destroy_pgtable:

arch/arm64/kvm/nested.c

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1101,6 +1101,9 @@ void kvm_nested_s2_wp(struct kvm *kvm)
11011101

11021102
lockdep_assert_held_write(&kvm->mmu_lock);
11031103

1104+
if (!kvm->arch.nested_mmus_size)
1105+
return;
1106+
11041107
for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
11051108
struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
11061109

@@ -1117,6 +1120,9 @@ void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block)
11171120

11181121
lockdep_assert_held_write(&kvm->mmu_lock);
11191122

1123+
if (!kvm->arch.nested_mmus_size)
1124+
return;
1125+
11201126
for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
11211127
struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
11221128

@@ -1133,6 +1139,9 @@ void kvm_nested_s2_flush(struct kvm *kvm)
11331139

11341140
lockdep_assert_held_write(&kvm->mmu_lock);
11351141

1142+
if (!kvm->arch.nested_mmus_size)
1143+
return;
1144+
11361145
for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
11371146
struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
11381147

@@ -1145,6 +1154,9 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
11451154
{
11461155
int i;
11471156

1157+
if (!kvm->arch.nested_mmus_size)
1158+
return;
1159+
11481160
for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
11491161
struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
11501162

arch/arm64/kvm/va_layout.c

Lines changed: 28 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -46,9 +46,31 @@ static void init_hyp_physvirt_offset(void)
4646
hyp_physvirt_offset = (s64)__pa(kern_va) - (s64)hyp_va;
4747
}
4848

49+
/*
50+
* Calculate the actual VA size used by the hypervisor
51+
*/
52+
__init u32 kvm_hyp_va_bits(void)
53+
{
54+
/*
55+
* The ID map is always configured for 48 bits of translation, which may
56+
* be different from the number of VA bits used by the regular kernel
57+
* stage 1.
58+
*
59+
* At EL2, there is only one TTBR register, and we can't switch between
60+
* translation tables *and* update TCR_EL2.T0SZ at the same time. Bottom
61+
* line: we need to use the extended range with *both* our translation
62+
* tables.
63+
*
64+
* So use the maximum of the idmap VA bits and the regular kernel stage
65+
* 1 VA bits as the hypervisor VA size to assure that the hypervisor can
66+
* both ID map its code page and map any kernel memory.
67+
*/
68+
return max(IDMAP_VA_BITS, vabits_actual);
69+
}
70+
4971
/*
5072
* We want to generate a hyp VA with the following format (with V ==
51-
* vabits_actual):
73+
* hypervisor VA bits):
5274
*
5375
* 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0
5476
* ---------------------------------------------------------
@@ -61,20 +83,21 @@ __init void kvm_compute_layout(void)
6183
{
6284
phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
6385
u64 hyp_va_msb;
86+
u32 hyp_va_bits = kvm_hyp_va_bits();
6487

6588
/* Where is my RAM region? */
66-
hyp_va_msb = idmap_addr & BIT(vabits_actual - 1);
67-
hyp_va_msb ^= BIT(vabits_actual - 1);
89+
hyp_va_msb = idmap_addr & BIT(hyp_va_bits - 1);
90+
hyp_va_msb ^= BIT(hyp_va_bits - 1);
6891

6992
tag_lsb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
7093
(u64)(high_memory - 1));
7194

7295
va_mask = GENMASK_ULL(tag_lsb - 1, 0);
7396
tag_val = hyp_va_msb;
7497

75-
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (vabits_actual - 1)) {
98+
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (hyp_va_bits - 1)) {
7699
/* We have some free bits to insert a random tag. */
77-
tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
100+
tag_val |= get_random_long() & GENMASK_ULL(hyp_va_bits - 2, tag_lsb);
78101
}
79102
tag_val >>= tag_lsb;
80103

0 commit comments

Comments
 (0)