Skip to content

Commit 679fcce

Browse files
committed
Merge tag 'kvm-x86-svm-6.19' of https://github.com/kvm-x86/linux into HEAD
KVM SVM changes for 6.19: - Fix a few missing "VMCB dirty" bugs. - Fix the worst of KVM's lack of EFER.LMSLE emulation. - Add AVIC support for addressing 4k vCPUs in x2AVIC mode. - Fix incorrect handling of selective CR0 writes when checking intercepts during emulation of L2 instructions. - Fix a currently-benign bug where KVM would clobber SPEC_CTRL[63:32] on VMRUN and #VMEXIT. - Fix a bug where KVM corrupt the guest code stream when re-injecting a soft interrupt if the guest patched the underlying code after the VM-Exit, e.g. when Linux patches code with a temporary INT3. - Add KVM_X86_SNP_POLICY_BITS to advertise supported SNP policy bits to userspace, and extend KVM "support" to all policy bits that don't require any actual support from KVM.
2 parents d1e7b46 + 275d6d1 commit 679fcce

File tree

16 files changed

+310
-85
lines changed

16 files changed

+310
-85
lines changed

Documentation/virt/kvm/x86/errata.rst

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,14 @@ versus "has_error_code", i.e. KVM's ABI follows AMD behavior.
4848
Nested virtualization features
4949
------------------------------
5050

51-
TBD
51+
On AMD CPUs, when GIF is cleared, #DB exceptions or traps due to a breakpoint
52+
register match are ignored and discarded by the CPU. The CPU relies on the VMM
53+
to fully virtualize this behavior, even when vGIF is enabled for the guest
54+
(i.e. vGIF=0 does not cause the CPU to drop #DBs when the guest is running).
55+
KVM does not virtualize this behavior as the complexity is unjustified given
56+
the rarity of the use case. One way to handle this would be for KVM to
57+
intercept the #DB, temporarily disable the breakpoint, single-step over the
58+
instruction, then re-enable the breakpoint.
5259

5360
x2APIC
5461
------

arch/x86/include/asm/cpufeatures.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -338,6 +338,7 @@
338338
#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
339339
#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* Single Thread Indirect Branch Predictors always-on preferred */
340340
#define X86_FEATURE_AMD_IBRS_SAME_MODE (13*32+19) /* Indirect Branch Restricted Speculation same mode protection*/
341+
#define X86_FEATURE_EFER_LMSLE_MBZ (13*32+20) /* EFER.LMSLE must be zero */
341342
#define X86_FEATURE_AMD_PPIN (13*32+23) /* "amd_ppin" Protected Processor Inventory Number */
342343
#define X86_FEATURE_AMD_SSBD (13*32+24) /* Speculative Store Bypass Disable */
343344
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* "virt_ssbd" Virtualized Speculative Store Bypass Disable */
@@ -504,6 +505,7 @@
504505
* can access host MMIO (ignored for all intents
505506
* and purposes if CLEAR_CPU_BUF_VM is set).
506507
*/
508+
#define X86_FEATURE_X2AVIC_EXT (21*32+18) /* AMD SVM x2AVIC support for 4k vCPUs */
507509

508510
/*
509511
* BUG word(s)

arch/x86/include/asm/kvm_host.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2139,6 +2139,11 @@ u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
21392139
* the gfn, i.e. retrying the instruction will hit a
21402140
* !PRESENT fault, which results in a new shadow page
21412141
* and sends KVM back to square one.
2142+
*
2143+
* EMULTYPE_SKIP_SOFT_INT - Set in combination with EMULTYPE_SKIP to only skip
2144+
* an instruction if it could generate a given software
2145+
* interrupt, which must be encoded via
2146+
* EMULTYPE_SET_SOFT_INT_VECTOR().
21422147
*/
21432148
#define EMULTYPE_NO_DECODE (1 << 0)
21442149
#define EMULTYPE_TRAP_UD (1 << 1)
@@ -2149,6 +2154,10 @@ u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
21492154
#define EMULTYPE_PF (1 << 6)
21502155
#define EMULTYPE_COMPLETE_USER_EXIT (1 << 7)
21512156
#define EMULTYPE_WRITE_PF_TO_SP (1 << 8)
2157+
#define EMULTYPE_SKIP_SOFT_INT (1 << 9)
2158+
2159+
#define EMULTYPE_SET_SOFT_INT_VECTOR(v) ((u32)((v) & 0xff) << 16)
2160+
#define EMULTYPE_GET_SOFT_INT_VECTOR(e) (((e) >> 16) & 0xff)
21522161

21532162
static inline bool kvm_can_emulate_event_vectoring(int emul_type)
21542163
{

arch/x86/include/asm/svm.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -279,7 +279,7 @@ enum avic_ipi_failure_cause {
279279
AVIC_IPI_FAILURE_INVALID_IPI_VECTOR,
280280
};
281281

282-
#define AVIC_PHYSICAL_MAX_INDEX_MASK GENMASK_ULL(8, 0)
282+
#define AVIC_PHYSICAL_MAX_INDEX_MASK GENMASK_ULL(11, 0)
283283

284284
/*
285285
* For AVIC, the max index allowed for physical APIC ID table is 0xfe (254), as
@@ -289,11 +289,14 @@ enum avic_ipi_failure_cause {
289289

290290
/*
291291
* For x2AVIC, the max index allowed for physical APIC ID table is 0x1ff (511).
292+
* With X86_FEATURE_X2AVIC_EXT, the max index is increased to 0xfff (4095).
292293
*/
293294
#define X2AVIC_MAX_PHYSICAL_ID 0x1FFUL
295+
#define X2AVIC_4K_MAX_PHYSICAL_ID 0xFFFUL
294296

295297
static_assert((AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == AVIC_MAX_PHYSICAL_ID);
296298
static_assert((X2AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == X2AVIC_MAX_PHYSICAL_ID);
299+
static_assert((X2AVIC_4K_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == X2AVIC_4K_MAX_PHYSICAL_ID);
297300

298301
#define SVM_SEV_FEAT_SNP_ACTIVE BIT(0)
299302
#define SVM_SEV_FEAT_RESTRICTED_INJECTION BIT(3)

arch/x86/include/uapi/asm/kvm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -502,6 +502,7 @@ struct kvm_sync_regs {
502502
/* vendor-specific groups and attributes for system fd */
503503
#define KVM_X86_GRP_SEV 1
504504
# define KVM_X86_SEV_VMSA_FEATURES 0
505+
# define KVM_X86_SNP_POLICY_BITS 1
505506

506507
struct kvm_vmx_nested_state_data {
507508
__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];

arch/x86/kernel/cpu/scattered.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ static const struct cpuid_bit cpuid_bits[] = {
4949
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
5050
{ X86_FEATURE_AMD_FAST_CPPC, CPUID_EDX, 15, 0x80000007, 0 },
5151
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
52+
{ X86_FEATURE_X2AVIC_EXT, CPUID_ECX, 6, 0x8000000a, 0 },
5253
{ X86_FEATURE_COHERENCY_SFW_NO, CPUID_EBX, 31, 0x8000001f, 0 },
5354
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
5455
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },

arch/x86/kvm/cpuid.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1135,6 +1135,7 @@ void kvm_set_cpu_caps(void)
11351135
F(AMD_STIBP),
11361136
F(AMD_STIBP_ALWAYS_ON),
11371137
F(AMD_IBRS_SAME_MODE),
1138+
PASSTHROUGH_F(EFER_LMSLE_MBZ),
11381139
F(AMD_PSFD),
11391140
F(AMD_IBPB_RET),
11401141
);

arch/x86/kvm/svm/avic.c

Lines changed: 70 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ static u32 next_vm_id = 0;
106106
static bool next_vm_id_wrapped = 0;
107107
static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
108108
static bool x2avic_enabled;
109-
109+
static u32 x2avic_max_physical_id;
110110

111111
static void avic_set_x2apic_msr_interception(struct vcpu_svm *svm,
112112
bool intercept)
@@ -158,12 +158,40 @@ static void avic_set_x2apic_msr_interception(struct vcpu_svm *svm,
158158
svm->x2avic_msrs_intercepted = intercept;
159159
}
160160

161+
static u32 __avic_get_max_physical_id(struct kvm *kvm, struct kvm_vcpu *vcpu)
162+
{
163+
u32 arch_max;
164+
165+
/*
166+
* Return the largest size (x2APIC) when querying without a vCPU, e.g.
167+
* to allocate the per-VM table..
168+
*/
169+
if (x2avic_enabled && (!vcpu || apic_x2apic_mode(vcpu->arch.apic)))
170+
arch_max = x2avic_max_physical_id;
171+
else
172+
arch_max = AVIC_MAX_PHYSICAL_ID;
173+
174+
/*
175+
* Despite its name, KVM_CAP_MAX_VCPU_ID represents the maximum APIC ID
176+
* plus one, so the max possible APIC ID is one less than that.
177+
*/
178+
return min(kvm->arch.max_vcpu_ids - 1, arch_max);
179+
}
180+
181+
static u32 avic_get_max_physical_id(struct kvm_vcpu *vcpu)
182+
{
183+
return __avic_get_max_physical_id(vcpu->kvm, vcpu);
184+
}
185+
161186
static void avic_activate_vmcb(struct vcpu_svm *svm)
162187
{
163188
struct vmcb *vmcb = svm->vmcb01.ptr;
189+
struct kvm_vcpu *vcpu = &svm->vcpu;
164190

165191
vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK);
192+
166193
vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK;
194+
vmcb->control.avic_physical_id |= avic_get_max_physical_id(vcpu);
167195

168196
vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
169197

@@ -176,7 +204,7 @@ static void avic_activate_vmcb(struct vcpu_svm *svm)
176204
*/
177205
if (x2avic_enabled && apic_x2apic_mode(svm->vcpu.arch.apic)) {
178206
vmcb->control.int_ctl |= X2APIC_MODE_MASK;
179-
vmcb->control.avic_physical_id |= X2AVIC_MAX_PHYSICAL_ID;
207+
180208
/* Disabling MSR intercept for x2APIC registers */
181209
avic_set_x2apic_msr_interception(svm, false);
182210
} else {
@@ -186,8 +214,6 @@ static void avic_activate_vmcb(struct vcpu_svm *svm)
186214
*/
187215
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, &svm->vcpu);
188216

189-
/* For xAVIC and hybrid-xAVIC modes */
190-
vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID;
191217
/* Enabling MSR intercept for x2APIC registers */
192218
avic_set_x2apic_msr_interception(svm, true);
193219
}
@@ -247,6 +273,30 @@ static int avic_ga_log_notifier(u32 ga_tag)
247273
return 0;
248274
}
249275

276+
static int avic_get_physical_id_table_order(struct kvm *kvm)
277+
{
278+
/* Provision for the maximum physical ID supported in x2avic mode */
279+
return get_order((__avic_get_max_physical_id(kvm, NULL) + 1) * sizeof(u64));
280+
}
281+
282+
int avic_alloc_physical_id_table(struct kvm *kvm)
283+
{
284+
struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
285+
286+
if (!irqchip_in_kernel(kvm) || !enable_apicv)
287+
return 0;
288+
289+
if (kvm_svm->avic_physical_id_table)
290+
return 0;
291+
292+
kvm_svm->avic_physical_id_table = (void *)__get_free_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO,
293+
avic_get_physical_id_table_order(kvm));
294+
if (!kvm_svm->avic_physical_id_table)
295+
return -ENOMEM;
296+
297+
return 0;
298+
}
299+
250300
void avic_vm_destroy(struct kvm *kvm)
251301
{
252302
unsigned long flags;
@@ -256,7 +306,8 @@ void avic_vm_destroy(struct kvm *kvm)
256306
return;
257307

258308
free_page((unsigned long)kvm_svm->avic_logical_id_table);
259-
free_page((unsigned long)kvm_svm->avic_physical_id_table);
309+
free_pages((unsigned long)kvm_svm->avic_physical_id_table,
310+
avic_get_physical_id_table_order(kvm));
260311

261312
spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
262313
hash_del(&kvm_svm->hnode);
@@ -274,10 +325,6 @@ int avic_vm_init(struct kvm *kvm)
274325
if (!enable_apicv)
275326
return 0;
276327

277-
kvm_svm->avic_physical_id_table = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
278-
if (!kvm_svm->avic_physical_id_table)
279-
goto free_avic;
280-
281328
kvm_svm->avic_logical_id_table = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
282329
if (!kvm_svm->avic_logical_id_table)
283330
goto free_avic;
@@ -342,7 +389,7 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
342389
* fully initialized AVIC.
343390
*/
344391
if ((!x2avic_enabled && id > AVIC_MAX_PHYSICAL_ID) ||
345-
(id > X2AVIC_MAX_PHYSICAL_ID)) {
392+
(id > x2avic_max_physical_id)) {
346393
kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_TOO_BIG);
347394
vcpu->arch.apic->apicv_active = false;
348395
return 0;
@@ -562,7 +609,7 @@ int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
562609
u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
563610
u32 icrl = svm->vmcb->control.exit_info_1;
564611
u32 id = svm->vmcb->control.exit_info_2 >> 32;
565-
u32 index = svm->vmcb->control.exit_info_2 & 0x1FF;
612+
u32 index = svm->vmcb->control.exit_info_2 & AVIC_PHYSICAL_MAX_INDEX_MASK;
566613
struct kvm_lapic *apic = vcpu->arch.apic;
567614

568615
trace_kvm_avic_incomplete_ipi(vcpu->vcpu_id, icrh, icrl, id, index);
@@ -962,7 +1009,8 @@ static void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu,
9621009
if (WARN_ON(h_physical_id & ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
9631010
return;
9641011

965-
if (WARN_ON_ONCE(vcpu->vcpu_id * sizeof(entry) >= PAGE_SIZE))
1012+
if (WARN_ON_ONCE(vcpu->vcpu_id * sizeof(entry) >=
1013+
PAGE_SIZE << avic_get_physical_id_table_order(vcpu->kvm)))
9661014
return;
9671015

9681016
/*
@@ -1024,7 +1072,8 @@ static void __avic_vcpu_put(struct kvm_vcpu *vcpu, enum avic_vcpu_action action)
10241072

10251073
lockdep_assert_preemption_disabled();
10261074

1027-
if (WARN_ON_ONCE(vcpu->vcpu_id * sizeof(entry) >= PAGE_SIZE))
1075+
if (WARN_ON_ONCE(vcpu->vcpu_id * sizeof(entry) >=
1076+
PAGE_SIZE << avic_get_physical_id_table_order(vcpu->kvm)))
10281077
return;
10291078

10301079
/*
@@ -1226,10 +1275,15 @@ bool __init avic_hardware_setup(void)
12261275

12271276
/* AVIC is a prerequisite for x2AVIC. */
12281277
x2avic_enabled = boot_cpu_has(X86_FEATURE_X2AVIC);
1229-
if (x2avic_enabled)
1230-
pr_info("x2AVIC enabled\n");
1231-
else
1278+
if (x2avic_enabled) {
1279+
if (cpu_feature_enabled(X86_FEATURE_X2AVIC_EXT))
1280+
x2avic_max_physical_id = X2AVIC_4K_MAX_PHYSICAL_ID;
1281+
else
1282+
x2avic_max_physical_id = X2AVIC_MAX_PHYSICAL_ID;
1283+
pr_info("x2AVIC enabled (max %u vCPUs)\n", x2avic_max_physical_id + 1);
1284+
} else {
12321285
svm_x86_ops.allow_apicv_in_x2apic_without_x2apic_virtualization = true;
1286+
}
12331287

12341288
/*
12351289
* Disable IPI virtualization for AMD Family 17h CPUs (Zen1 and Zen2)

arch/x86/kvm/svm/nested.c

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -613,6 +613,7 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
613613
struct kvm_vcpu *vcpu = &svm->vcpu;
614614

615615
nested_vmcb02_compute_g_pat(svm);
616+
vmcb_mark_dirty(vmcb02, VMCB_NPT);
616617

617618
/* Load the nested guest state */
618619
if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
@@ -751,6 +752,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
751752
vmcb02->control.nested_ctl = vmcb01->control.nested_ctl;
752753
vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa;
753754
vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa;
755+
vmcb_mark_dirty(vmcb02, VMCB_PERM_MAP);
754756

755757
/*
756758
* Stash vmcb02's counter if the guest hasn't moved past the guilty
@@ -1430,16 +1432,6 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
14301432
case SVM_EXIT_IOIO:
14311433
vmexit = nested_svm_intercept_ioio(svm);
14321434
break;
1433-
case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
1434-
if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1435-
vmexit = NESTED_EXIT_DONE;
1436-
break;
1437-
}
1438-
case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
1439-
if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1440-
vmexit = NESTED_EXIT_DONE;
1441-
break;
1442-
}
14431435
case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
14441436
/*
14451437
* Host-intercepted exceptions have been checked already in

arch/x86/kvm/svm/sev.c

Lines changed: 29 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -65,20 +65,24 @@ module_param_named(ciphertext_hiding_asids, nr_ciphertext_hiding_asids, uint, 04
6565
#define AP_RESET_HOLD_NAE_EVENT 1
6666
#define AP_RESET_HOLD_MSR_PROTO 2
6767

68-
/* As defined by SEV-SNP Firmware ABI, under "Guest Policy". */
69-
#define SNP_POLICY_MASK_API_MINOR GENMASK_ULL(7, 0)
70-
#define SNP_POLICY_MASK_API_MAJOR GENMASK_ULL(15, 8)
71-
#define SNP_POLICY_MASK_SMT BIT_ULL(16)
72-
#define SNP_POLICY_MASK_RSVD_MBO BIT_ULL(17)
73-
#define SNP_POLICY_MASK_DEBUG BIT_ULL(19)
74-
#define SNP_POLICY_MASK_SINGLE_SOCKET BIT_ULL(20)
75-
76-
#define SNP_POLICY_MASK_VALID (SNP_POLICY_MASK_API_MINOR | \
77-
SNP_POLICY_MASK_API_MAJOR | \
78-
SNP_POLICY_MASK_SMT | \
79-
SNP_POLICY_MASK_RSVD_MBO | \
80-
SNP_POLICY_MASK_DEBUG | \
81-
SNP_POLICY_MASK_SINGLE_SOCKET)
68+
/*
69+
* SEV-SNP policy bits that can be supported by KVM. These include policy bits
70+
* that have implementation support within KVM or policy bits that do not
71+
* require implementation support within KVM to enforce the policy.
72+
*/
73+
#define KVM_SNP_POLICY_MASK_VALID (SNP_POLICY_MASK_API_MINOR | \
74+
SNP_POLICY_MASK_API_MAJOR | \
75+
SNP_POLICY_MASK_SMT | \
76+
SNP_POLICY_MASK_RSVD_MBO | \
77+
SNP_POLICY_MASK_DEBUG | \
78+
SNP_POLICY_MASK_SINGLE_SOCKET | \
79+
SNP_POLICY_MASK_CXL_ALLOW | \
80+
SNP_POLICY_MASK_MEM_AES_256_XTS | \
81+
SNP_POLICY_MASK_RAPL_DIS | \
82+
SNP_POLICY_MASK_CIPHERTEXT_HIDING_DRAM | \
83+
SNP_POLICY_MASK_PAGE_SWAP_DISABLE)
84+
85+
static u64 snp_supported_policy_bits __ro_after_init;
8286

8387
#define INITIAL_VMSA_GPA 0xFFFFFFFFF000
8488

@@ -2143,6 +2147,10 @@ int sev_dev_get_attr(u32 group, u64 attr, u64 *val)
21432147
*val = sev_supported_vmsa_features;
21442148
return 0;
21452149

2150+
case KVM_X86_SNP_POLICY_BITS:
2151+
*val = snp_supported_policy_bits;
2152+
return 0;
2153+
21462154
default:
21472155
return -ENXIO;
21482156
}
@@ -2207,7 +2215,7 @@ static int snp_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
22072215
if (params.flags)
22082216
return -EINVAL;
22092217

2210-
if (params.policy & ~SNP_POLICY_MASK_VALID)
2218+
if (params.policy & ~snp_supported_policy_bits)
22112219
return -EINVAL;
22122220

22132221
/* Check for policy bits that must be set */
@@ -3100,8 +3108,11 @@ void __init sev_hardware_setup(void)
31003108
else if (sev_snp_supported)
31013109
sev_snp_supported = is_sev_snp_initialized();
31023110

3103-
if (sev_snp_supported)
3111+
if (sev_snp_supported) {
3112+
snp_supported_policy_bits = sev_get_snp_policy_bits() &
3113+
KVM_SNP_POLICY_MASK_VALID;
31043114
nr_ciphertext_hiding_asids = init_args.max_snp_asid;
3115+
}
31053116

31063117
/*
31073118
* If ciphertext hiding is enabled, the joint SEV-ES/SEV-SNP
@@ -5085,10 +5096,10 @@ struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu)
50855096

50865097
/* Check if the SEV policy allows debugging */
50875098
if (sev_snp_guest(vcpu->kvm)) {
5088-
if (!(sev->policy & SNP_POLICY_DEBUG))
5099+
if (!(sev->policy & SNP_POLICY_MASK_DEBUG))
50895100
return NULL;
50905101
} else {
5091-
if (sev->policy & SEV_POLICY_NODBG)
5102+
if (sev->policy & SEV_POLICY_MASK_NODBG)
50925103
return NULL;
50935104
}
50945105

0 commit comments

Comments
 (0)