Skip to content

Commit 55365ab

Browse files
committed
Merge tag 'kvmarm-fixes-7.0-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 fixes for 7.0, take #1 - Make sure we don't leak any S1POE state from guest to guest when the feature is supported on the HW, but not enabled on the host - Propagate the ID registers from the host into non-protected VMs managed by pKVM, ensuring that the guest sees the intended feature set - Drop double kern_hyp_va() from unpin_host_sve_state(), which could bite us if we were to change kern_hyp_va() to not being idempotent - Don't leak stage-2 mappings in protected mode - Correctly align the faulting address when dealing with single page stage-2 mappings for PAGE_SIZE > 4kB - Fix detection of virtualisation-capable GICv5 IRS, due to the maintainer being obviously fat fingered... - Remove duplication of code retrieving the ASID for the purpose of S1 PT handling - Fix slightly abusive const-ification in vgic_set_kvm_info()
2 parents 70295a4 + 54e367c commit 55365ab

File tree

8 files changed

+80
-69
lines changed

8 files changed

+80
-69
lines changed

arch/arm64/include/asm/kvm_host.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1616,7 +1616,8 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
16161616
(kvm_has_feat((k), ID_AA64MMFR3_EL1, S1PIE, IMP))
16171617

16181618
#define kvm_has_s1poe(k) \
1619-
(kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP))
1619+
(system_supports_poe() && \
1620+
kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP))
16201621

16211622
#define kvm_has_ras(k) \
16221623
(kvm_has_feat((k), ID_AA64PFR0_EL1, RAS, IMP))

arch/arm64/include/asm/kvm_nested.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -397,6 +397,8 @@ int kvm_vcpu_allocate_vncr_tlb(struct kvm_vcpu *vcpu);
397397
int kvm_handle_vncr_abort(struct kvm_vcpu *vcpu);
398398
void kvm_handle_s1e2_tlbi(struct kvm_vcpu *vcpu, u32 inst, u64 val);
399399

400+
u16 get_asid_by_regime(struct kvm_vcpu *vcpu, enum trans_regime regime);
401+
400402
#define vncr_fixmap(c) \
401403
({ \
402404
u32 __c = (c); \

arch/arm64/kvm/at.c

Lines changed: 2 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -540,31 +540,8 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
540540
wr->pa |= va & GENMASK_ULL(va_bottom - 1, 0);
541541

542542
wr->nG = (wi->regime != TR_EL2) && (desc & PTE_NG);
543-
if (wr->nG) {
544-
u64 asid_ttbr, tcr;
545-
546-
switch (wi->regime) {
547-
case TR_EL10:
548-
tcr = vcpu_read_sys_reg(vcpu, TCR_EL1);
549-
asid_ttbr = ((tcr & TCR_A1) ?
550-
vcpu_read_sys_reg(vcpu, TTBR1_EL1) :
551-
vcpu_read_sys_reg(vcpu, TTBR0_EL1));
552-
break;
553-
case TR_EL20:
554-
tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
555-
asid_ttbr = ((tcr & TCR_A1) ?
556-
vcpu_read_sys_reg(vcpu, TTBR1_EL2) :
557-
vcpu_read_sys_reg(vcpu, TTBR0_EL2));
558-
break;
559-
default:
560-
BUG();
561-
}
562-
563-
wr->asid = FIELD_GET(TTBR_ASID_MASK, asid_ttbr);
564-
if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
565-
!(tcr & TCR_ASID16))
566-
wr->asid &= GENMASK(7, 0);
567-
}
543+
if (wr->nG)
544+
wr->asid = get_asid_by_regime(vcpu, wi->regime);
568545

569546
return 0;
570547

arch/arm64/kvm/hyp/nvhe/pkvm.c

Lines changed: 34 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -342,6 +342,7 @@ static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struc
342342
/* No restrictions for non-protected VMs. */
343343
if (!kvm_vm_is_protected(kvm)) {
344344
hyp_vm->kvm.arch.flags = host_arch_flags;
345+
hyp_vm->kvm.arch.flags &= ~BIT_ULL(KVM_ARCH_FLAG_ID_REGS_INITIALIZED);
345346

346347
bitmap_copy(kvm->arch.vcpu_features,
347348
host_kvm->arch.vcpu_features,
@@ -391,7 +392,7 @@ static void unpin_host_sve_state(struct pkvm_hyp_vcpu *hyp_vcpu)
391392
if (!vcpu_has_feature(&hyp_vcpu->vcpu, KVM_ARM_VCPU_SVE))
392393
return;
393394

394-
sve_state = kern_hyp_va(hyp_vcpu->vcpu.arch.sve_state);
395+
sve_state = hyp_vcpu->vcpu.arch.sve_state;
395396
hyp_unpin_shared_mem(sve_state,
396397
sve_state + vcpu_sve_state_size(&hyp_vcpu->vcpu));
397398
}
@@ -471,6 +472,35 @@ static int pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *h
471472
return ret;
472473
}
473474

475+
static int vm_copy_id_regs(struct pkvm_hyp_vcpu *hyp_vcpu)
476+
{
477+
struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
478+
const struct kvm *host_kvm = hyp_vm->host_kvm;
479+
struct kvm *kvm = &hyp_vm->kvm;
480+
481+
if (!test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &host_kvm->arch.flags))
482+
return -EINVAL;
483+
484+
if (test_and_set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
485+
return 0;
486+
487+
memcpy(kvm->arch.id_regs, host_kvm->arch.id_regs, sizeof(kvm->arch.id_regs));
488+
489+
return 0;
490+
}
491+
492+
static int pkvm_vcpu_init_sysregs(struct pkvm_hyp_vcpu *hyp_vcpu)
493+
{
494+
int ret = 0;
495+
496+
if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
497+
kvm_init_pvm_id_regs(&hyp_vcpu->vcpu);
498+
else
499+
ret = vm_copy_id_regs(hyp_vcpu);
500+
501+
return ret;
502+
}
503+
474504
static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
475505
struct pkvm_hyp_vm *hyp_vm,
476506
struct kvm_vcpu *host_vcpu)
@@ -490,8 +520,9 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
490520
hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
491521
hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
492522

493-
if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
494-
kvm_init_pvm_id_regs(&hyp_vcpu->vcpu);
523+
ret = pkvm_vcpu_init_sysregs(hyp_vcpu);
524+
if (ret)
525+
goto done;
495526

496527
ret = pkvm_vcpu_init_traps(hyp_vcpu);
497528
if (ret)

arch/arm64/kvm/mmu.c

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1754,14 +1754,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
17541754
}
17551755

17561756
/*
1757-
* Both the canonical IPA and fault IPA must be hugepage-aligned to
1758-
* ensure we find the right PFN and lay down the mapping in the right
1759-
* place.
1757+
* Both the canonical IPA and fault IPA must be aligned to the
1758+
* mapping size to ensure we find the right PFN and lay down the
1759+
* mapping in the right place.
17601760
*/
1761-
if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) {
1762-
fault_ipa &= ~(vma_pagesize - 1);
1763-
ipa &= ~(vma_pagesize - 1);
1764-
}
1761+
fault_ipa = ALIGN_DOWN(fault_ipa, vma_pagesize);
1762+
ipa = ALIGN_DOWN(ipa, vma_pagesize);
17651763

17661764
gfn = ipa >> PAGE_SHIFT;
17671765
mte_allowed = kvm_vma_mte_allowed(vma);

arch/arm64/kvm/nested.c

Lines changed: 31 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -854,6 +854,33 @@ int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2)
854854
return kvm_inject_nested_sync(vcpu, esr_el2);
855855
}
856856

857+
u16 get_asid_by_regime(struct kvm_vcpu *vcpu, enum trans_regime regime)
858+
{
859+
enum vcpu_sysreg ttbr_elx;
860+
u64 tcr;
861+
u16 asid;
862+
863+
switch (regime) {
864+
case TR_EL10:
865+
tcr = vcpu_read_sys_reg(vcpu, TCR_EL1);
866+
ttbr_elx = (tcr & TCR_A1) ? TTBR1_EL1 : TTBR0_EL1;
867+
break;
868+
case TR_EL20:
869+
tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
870+
ttbr_elx = (tcr & TCR_A1) ? TTBR1_EL2 : TTBR0_EL2;
871+
break;
872+
default:
873+
BUG();
874+
}
875+
876+
asid = FIELD_GET(TTBRx_EL1_ASID, vcpu_read_sys_reg(vcpu, ttbr_elx));
877+
if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
878+
!(tcr & TCR_ASID16))
879+
asid &= GENMASK(7, 0);
880+
881+
return asid;
882+
}
883+
857884
static void invalidate_vncr(struct vncr_tlb *vt)
858885
{
859886
vt->valid = false;
@@ -1154,9 +1181,6 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
11541181
{
11551182
int i;
11561183

1157-
if (!kvm->arch.nested_mmus_size)
1158-
return;
1159-
11601184
for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
11611185
struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
11621186

@@ -1336,20 +1360,8 @@ static bool kvm_vncr_tlb_lookup(struct kvm_vcpu *vcpu)
13361360
if (read_vncr_el2(vcpu) != vt->gva)
13371361
return false;
13381362

1339-
if (vt->wr.nG) {
1340-
u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
1341-
u64 ttbr = ((tcr & TCR_A1) ?
1342-
vcpu_read_sys_reg(vcpu, TTBR1_EL2) :
1343-
vcpu_read_sys_reg(vcpu, TTBR0_EL2));
1344-
u16 asid;
1345-
1346-
asid = FIELD_GET(TTBR_ASID_MASK, ttbr);
1347-
if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
1348-
!(tcr & TCR_ASID16))
1349-
asid &= GENMASK(7, 0);
1350-
1351-
return asid == vt->wr.asid;
1352-
}
1363+
if (vt->wr.nG)
1364+
return get_asid_by_regime(vcpu, TR_EL20) == vt->wr.asid;
13531365

13541366
return true;
13551367
}
@@ -1452,21 +1464,8 @@ static void kvm_map_l1_vncr(struct kvm_vcpu *vcpu)
14521464
if (read_vncr_el2(vcpu) != vt->gva)
14531465
return;
14541466

1455-
if (vt->wr.nG) {
1456-
u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
1457-
u64 ttbr = ((tcr & TCR_A1) ?
1458-
vcpu_read_sys_reg(vcpu, TTBR1_EL2) :
1459-
vcpu_read_sys_reg(vcpu, TTBR0_EL2));
1460-
u16 asid;
1461-
1462-
asid = FIELD_GET(TTBR_ASID_MASK, ttbr);
1463-
if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
1464-
!(tcr & TCR_ASID16))
1465-
asid &= GENMASK(7, 0);
1466-
1467-
if (asid != vt->wr.asid)
1468-
return;
1469-
}
1467+
if (vt->wr.nG && get_asid_by_regime(vcpu, TR_EL20) != vt->wr.asid)
1468+
return;
14701469

14711470
vt->cpu = smp_processor_id();
14721471

arch/arm64/kvm/sys_regs.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1816,6 +1816,9 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
18161816
ID_AA64MMFR3_EL1_SCTLRX |
18171817
ID_AA64MMFR3_EL1_S1POE |
18181818
ID_AA64MMFR3_EL1_S1PIE;
1819+
1820+
if (!system_supports_poe())
1821+
val &= ~ID_AA64MMFR3_EL1_S1POE;
18191822
break;
18201823
case SYS_ID_MMFR4_EL1:
18211824
val &= ~ID_MMFR4_EL1_CCIDX;

drivers/irqchip/irq-gic-v5-irs.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -699,7 +699,7 @@ static int __init gicv5_irs_init(struct gicv5_irs_chip_data *irs_data)
699699
*/
700700
if (list_empty(&irs_nodes)) {
701701
idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR0);
702-
gicv5_global_data.virt_capable = !FIELD_GET(GICV5_IRS_IDR0_VIRT, idr);
702+
gicv5_global_data.virt_capable = !!FIELD_GET(GICV5_IRS_IDR0_VIRT, idr);
703703

704704
idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR1);
705705
irs_setup_pri_bits(idr);

0 commit comments

Comments
 (0)