Skip to content

Commit b0bf3d6

Browse files
committed
Merge tag 'kvm-x86-selftests-6.19' of https://github.com/kvm-x86/linux into HEAD
KVM selftests changes for 6.19: - Fix a math goof in mmu_stress_test when running on a single-CPU system/VM. - Forcefully override ARCH from x86_64 to x86 to play nice with specifying ARCH=x86_64 on the command line. - Extend a bunch of nested VMX to validate nested SVM as well. - Add support for LA57 in the core VM_MODE_xxx macro, and add a test to verify KVM can save/restore nested VMX state when L1 is using 5-level paging, but L2 is not. - Clean up the guest paging code in anticipation of sharing the core logic for nested EPT and nested NPT.
2 parents e64dcfa + d2e5038 commit b0bf3d6

24 files changed

+481
-166
lines changed

tools/testing/selftests/kvm/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ ARCH ?= $(SUBARCH)
66
ifeq ($(ARCH),$(filter $(ARCH),arm64 s390 riscv x86 x86_64 loongarch))
77
# Top-level selftests allows ARCH=x86_64 :-(
88
ifeq ($(ARCH),x86_64)
9-
ARCH := x86
9+
override ARCH := x86
1010
endif
1111
include Makefile.kvm
1212
else

tools/testing/selftests/kvm/Makefile.kvm

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -88,8 +88,12 @@ TEST_GEN_PROGS_x86 += x86/kvm_pv_test
8888
TEST_GEN_PROGS_x86 += x86/kvm_buslock_test
8989
TEST_GEN_PROGS_x86 += x86/monitor_mwait_test
9090
TEST_GEN_PROGS_x86 += x86/msrs_test
91+
TEST_GEN_PROGS_x86 += x86/nested_close_kvm_test
9192
TEST_GEN_PROGS_x86 += x86/nested_emulation_test
9293
TEST_GEN_PROGS_x86 += x86/nested_exceptions_test
94+
TEST_GEN_PROGS_x86 += x86/nested_invalid_cr3_test
95+
TEST_GEN_PROGS_x86 += x86/nested_tsc_adjust_test
96+
TEST_GEN_PROGS_x86 += x86/nested_tsc_scaling_test
9397
TEST_GEN_PROGS_x86 += x86/platform_info_test
9498
TEST_GEN_PROGS_x86 += x86/pmu_counters_test
9599
TEST_GEN_PROGS_x86 += x86/pmu_event_filter_test
@@ -111,14 +115,12 @@ TEST_GEN_PROGS_x86 += x86/ucna_injection_test
111115
TEST_GEN_PROGS_x86 += x86/userspace_io_test
112116
TEST_GEN_PROGS_x86 += x86/userspace_msr_exit_test
113117
TEST_GEN_PROGS_x86 += x86/vmx_apic_access_test
114-
TEST_GEN_PROGS_x86 += x86/vmx_close_while_nested_test
115118
TEST_GEN_PROGS_x86 += x86/vmx_dirty_log_test
116119
TEST_GEN_PROGS_x86 += x86/vmx_exception_with_invalid_guest_state
117120
TEST_GEN_PROGS_x86 += x86/vmx_msrs_test
118121
TEST_GEN_PROGS_x86 += x86/vmx_invalid_nested_guest_state
122+
TEST_GEN_PROGS_x86 += x86/vmx_nested_la57_state_test
119123
TEST_GEN_PROGS_x86 += x86/vmx_set_nested_state_test
120-
TEST_GEN_PROGS_x86 += x86/vmx_tsc_adjust_test
121-
TEST_GEN_PROGS_x86 += x86/vmx_nested_tsc_scaling_test
122124
TEST_GEN_PROGS_x86 += x86/apic_bus_clock_test
123125
TEST_GEN_PROGS_x86 += x86/xapic_ipi_test
124126
TEST_GEN_PROGS_x86 += x86/xapic_state_test

tools/testing/selftests/kvm/include/kvm_util.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ enum vm_guest_mode {
178178
VM_MODE_P40V48_4K,
179179
VM_MODE_P40V48_16K,
180180
VM_MODE_P40V48_64K,
181-
VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */
181+
VM_MODE_PXXVYY_4K, /* For 48-bit or 57-bit VA, depending on host support */
182182
VM_MODE_P47V64_4K,
183183
VM_MODE_P44V64_4K,
184184
VM_MODE_P36V48_4K,
@@ -220,7 +220,7 @@ extern enum vm_guest_mode vm_mode_default;
220220

221221
#elif defined(__x86_64__)
222222

223-
#define VM_MODE_DEFAULT VM_MODE_PXXV48_4K
223+
#define VM_MODE_DEFAULT VM_MODE_PXXVYY_4K
224224
#define MIN_PAGE_SHIFT 12U
225225
#define ptes_per_page(page_size) ((page_size) / 8)
226226

@@ -1203,6 +1203,7 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
12031203
static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
12041204
{
12051205
virt_arch_pg_map(vm, vaddr, paddr);
1206+
sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
12061207
}
12071208

12081209

tools/testing/selftests/kvm/include/x86/processor.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1441,7 +1441,7 @@ enum pg_level {
14411441
PG_LEVEL_2M,
14421442
PG_LEVEL_1G,
14431443
PG_LEVEL_512G,
1444-
PG_LEVEL_NUM
1444+
PG_LEVEL_256T
14451445
};
14461446

14471447
#define PG_LEVEL_SHIFT(_level) ((_level - 1) * 9 + 12)

tools/testing/selftests/kvm/include/x86/vmx.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -568,8 +568,7 @@ void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
568568
void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
569569
uint64_t addr, uint64_t size);
570570
bool kvm_cpu_has_ept(void);
571-
void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
572-
uint32_t eptp_memslot);
571+
void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm);
573572
void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm);
574573

575574
#endif /* SELFTEST_KVM_VMX_H */

tools/testing/selftests/kvm/lib/arm64/processor.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -324,7 +324,7 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
324324

325325
/* Configure base granule size */
326326
switch (vm->mode) {
327-
case VM_MODE_PXXV48_4K:
327+
case VM_MODE_PXXVYY_4K:
328328
TEST_FAIL("AArch64 does not support 4K sized pages "
329329
"with ANY-bit physical address ranges");
330330
case VM_MODE_P52V48_64K:

tools/testing/selftests/kvm/lib/kvm_util.c

Lines changed: 16 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -201,7 +201,7 @@ const char *vm_guest_mode_string(uint32_t i)
201201
[VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages",
202202
[VM_MODE_P40V48_16K] = "PA-bits:40, VA-bits:48, 16K pages",
203203
[VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages",
204-
[VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages",
204+
[VM_MODE_PXXVYY_4K] = "PA-bits:ANY, VA-bits:48 or 57, 4K pages",
205205
[VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages",
206206
[VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages",
207207
[VM_MODE_P36V48_4K] = "PA-bits:36, VA-bits:48, 4K pages",
@@ -228,7 +228,7 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = {
228228
[VM_MODE_P40V48_4K] = { 40, 48, 0x1000, 12 },
229229
[VM_MODE_P40V48_16K] = { 40, 48, 0x4000, 14 },
230230
[VM_MODE_P40V48_64K] = { 40, 48, 0x10000, 16 },
231-
[VM_MODE_PXXV48_4K] = { 0, 0, 0x1000, 12 },
231+
[VM_MODE_PXXVYY_4K] = { 0, 0, 0x1000, 12 },
232232
[VM_MODE_P47V64_4K] = { 47, 64, 0x1000, 12 },
233233
[VM_MODE_P44V64_4K] = { 44, 64, 0x1000, 12 },
234234
[VM_MODE_P36V48_4K] = { 36, 48, 0x1000, 12 },
@@ -310,24 +310,26 @@ struct kvm_vm *____vm_create(struct vm_shape shape)
310310
case VM_MODE_P36V47_16K:
311311
vm->pgtable_levels = 3;
312312
break;
313-
case VM_MODE_PXXV48_4K:
313+
case VM_MODE_PXXVYY_4K:
314314
#ifdef __x86_64__
315315
kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits);
316316
kvm_init_vm_address_properties(vm);
317-
/*
318-
* Ignore KVM support for 5-level paging (vm->va_bits == 57),
319-
* it doesn't take effect unless a CR4.LA57 is set, which it
320-
* isn't for this mode (48-bit virtual address space).
321-
*/
322-
TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57,
323-
"Linear address width (%d bits) not supported",
324-
vm->va_bits);
317+
325318
pr_debug("Guest physical address width detected: %d\n",
326319
vm->pa_bits);
327-
vm->pgtable_levels = 4;
328-
vm->va_bits = 48;
320+
pr_debug("Guest virtual address width detected: %d\n",
321+
vm->va_bits);
322+
323+
if (vm->va_bits == 57) {
324+
vm->pgtable_levels = 5;
325+
} else {
326+
TEST_ASSERT(vm->va_bits == 48,
327+
"Unexpected guest virtual address width: %d",
328+
vm->va_bits);
329+
vm->pgtable_levels = 4;
330+
}
329331
#else
330-
TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms");
332+
TEST_FAIL("VM_MODE_PXXVYY_4K not supported on non-x86 platforms");
331333
#endif
332334
break;
333335
case VM_MODE_P47V64_4K:
@@ -1437,8 +1439,6 @@ static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz,
14371439
pages--, vaddr += vm->page_size, paddr += vm->page_size) {
14381440

14391441
virt_pg_map(vm, vaddr, paddr);
1440-
1441-
sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
14421442
}
14431443

14441444
return vaddr_start;
@@ -1552,7 +1552,6 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
15521552

15531553
while (npages--) {
15541554
virt_pg_map(vm, vaddr, paddr);
1555-
sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
15561555

15571556
vaddr += page_size;
15581557
paddr += page_size;

tools/testing/selftests/kvm/lib/x86/memstress.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ void memstress_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm)
6363
{
6464
uint64_t start, end;
6565

66-
prepare_eptp(vmx, vm, 0);
66+
prepare_eptp(vmx, vm);
6767

6868
/*
6969
* Identity map the first 4G and the test region with 1G pages so that

tools/testing/selftests/kvm/lib/x86/processor.c

Lines changed: 41 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -158,10 +158,10 @@ bool kvm_is_tdp_enabled(void)
158158

159159
void virt_arch_pgd_alloc(struct kvm_vm *vm)
160160
{
161-
TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
162-
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
161+
TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
162+
"Unknown or unsupported guest mode: 0x%x", vm->mode);
163163

164-
/* If needed, create page map l4 table. */
164+
/* If needed, create the top-level page table. */
165165
if (!vm->pgd_created) {
166166
vm->pgd = vm_alloc_page_table(vm);
167167
vm->pgd_created = true;
@@ -218,11 +218,11 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
218218
void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
219219
{
220220
const uint64_t pg_size = PG_LEVEL_SIZE(level);
221-
uint64_t *pml4e, *pdpe, *pde;
222-
uint64_t *pte;
221+
uint64_t *pte = &vm->pgd;
222+
int current_level;
223223

224-
TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K,
225-
"Unknown or unsupported guest mode, mode: 0x%x", vm->mode);
224+
TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
225+
"Unknown or unsupported guest mode: 0x%x", vm->mode);
226226

227227
TEST_ASSERT((vaddr % pg_size) == 0,
228228
"Virtual address not aligned,\n"
@@ -243,20 +243,17 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
243243
* Allocate upper level page tables, if not already present. Return
244244
* early if a hugepage was created.
245245
*/
246-
pml4e = virt_create_upper_pte(vm, &vm->pgd, vaddr, paddr, PG_LEVEL_512G, level);
247-
if (*pml4e & PTE_LARGE_MASK)
248-
return;
249-
250-
pdpe = virt_create_upper_pte(vm, pml4e, vaddr, paddr, PG_LEVEL_1G, level);
251-
if (*pdpe & PTE_LARGE_MASK)
252-
return;
253-
254-
pde = virt_create_upper_pte(vm, pdpe, vaddr, paddr, PG_LEVEL_2M, level);
255-
if (*pde & PTE_LARGE_MASK)
256-
return;
246+
for (current_level = vm->pgtable_levels;
247+
current_level > PG_LEVEL_4K;
248+
current_level--) {
249+
pte = virt_create_upper_pte(vm, pte, vaddr, paddr,
250+
current_level, level);
251+
if (*pte & PTE_LARGE_MASK)
252+
return;
253+
}
257254

258255
/* Fill in page table entry. */
259-
pte = virt_get_pte(vm, pde, vaddr, PG_LEVEL_4K);
256+
pte = virt_get_pte(vm, pte, vaddr, PG_LEVEL_4K);
260257
TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
261258
"PTE already present for 4k page at vaddr: 0x%lx", vaddr);
262259
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
@@ -289,6 +286,8 @@ void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
289286

290287
for (i = 0; i < nr_pages; i++) {
291288
__virt_pg_map(vm, vaddr, paddr, level);
289+
sparsebit_set_num(vm->vpages_mapped, vaddr >> vm->page_shift,
290+
nr_bytes / PAGE_SIZE);
292291

293292
vaddr += pg_size;
294293
paddr += pg_size;
@@ -310,40 +309,38 @@ static bool vm_is_target_pte(uint64_t *pte, int *level, int current_level)
310309
uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr,
311310
int *level)
312311
{
313-
uint64_t *pml4e, *pdpe, *pde;
312+
int va_width = 12 + (vm->pgtable_levels) * 9;
313+
uint64_t *pte = &vm->pgd;
314+
int current_level;
314315

315316
TEST_ASSERT(!vm->arch.is_pt_protected,
316317
"Walking page tables of protected guests is impossible");
317318

318-
TEST_ASSERT(*level >= PG_LEVEL_NONE && *level < PG_LEVEL_NUM,
319+
TEST_ASSERT(*level >= PG_LEVEL_NONE && *level <= vm->pgtable_levels,
319320
"Invalid PG_LEVEL_* '%d'", *level);
320321

321-
TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
322-
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
322+
TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
323+
"Unknown or unsupported guest mode: 0x%x", vm->mode);
323324
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
324325
(vaddr >> vm->page_shift)),
325326
"Invalid virtual address, vaddr: 0x%lx",
326327
vaddr);
327328
/*
328-
* Based on the mode check above there are 48 bits in the vaddr, so
329-
* shift 16 to sign extend the last bit (bit-47),
329+
* Check that the vaddr is a sign-extended va_width value.
330330
*/
331-
TEST_ASSERT(vaddr == (((int64_t)vaddr << 16) >> 16),
332-
"Canonical check failed. The virtual address is invalid.");
333-
334-
pml4e = virt_get_pte(vm, &vm->pgd, vaddr, PG_LEVEL_512G);
335-
if (vm_is_target_pte(pml4e, level, PG_LEVEL_512G))
336-
return pml4e;
337-
338-
pdpe = virt_get_pte(vm, pml4e, vaddr, PG_LEVEL_1G);
339-
if (vm_is_target_pte(pdpe, level, PG_LEVEL_1G))
340-
return pdpe;
341-
342-
pde = virt_get_pte(vm, pdpe, vaddr, PG_LEVEL_2M);
343-
if (vm_is_target_pte(pde, level, PG_LEVEL_2M))
344-
return pde;
331+
TEST_ASSERT(vaddr ==
332+
(((int64_t)vaddr << (64 - va_width) >> (64 - va_width))),
333+
"Canonical check failed. The virtual address is invalid.");
334+
335+
for (current_level = vm->pgtable_levels;
336+
current_level > PG_LEVEL_4K;
337+
current_level--) {
338+
pte = virt_get_pte(vm, pte, vaddr, current_level);
339+
if (vm_is_target_pte(pte, level, current_level))
340+
return pte;
341+
}
345342

346-
return virt_get_pte(vm, pde, vaddr, PG_LEVEL_4K);
343+
return virt_get_pte(vm, pte, vaddr, PG_LEVEL_4K);
347344
}
348345

349346
uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr)
@@ -526,7 +523,8 @@ static void vcpu_init_sregs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
526523
{
527524
struct kvm_sregs sregs;
528525

529-
TEST_ASSERT_EQ(vm->mode, VM_MODE_PXXV48_4K);
526+
TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
527+
"Unknown or unsupported guest mode: 0x%x", vm->mode);
530528

531529
/* Set mode specific system register values. */
532530
vcpu_sregs_get(vcpu, &sregs);
@@ -540,6 +538,8 @@ static void vcpu_init_sregs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
540538
sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR;
541539
if (kvm_cpu_has(X86_FEATURE_XSAVE))
542540
sregs.cr4 |= X86_CR4_OSXSAVE;
541+
if (vm->pgtable_levels == 5)
542+
sregs.cr4 |= X86_CR4_LA57;
543543
sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
544544

545545
kvm_seg_set_unusable(&sregs.ldt);

tools/testing/selftests/kvm/lib/x86/vmx.c

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -401,11 +401,11 @@ void __nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
401401
struct eptPageTableEntry *pt = vmx->eptp_hva, *pte;
402402
uint16_t index;
403403

404-
TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
405-
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
404+
TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
405+
"Unknown or unsupported guest mode: 0x%x", vm->mode);
406406

407407
TEST_ASSERT((nested_paddr >> 48) == 0,
408-
"Nested physical address 0x%lx requires 5-level paging",
408+
"Nested physical address 0x%lx is > 48-bits and requires 5-level EPT",
409409
nested_paddr);
410410
TEST_ASSERT((nested_paddr % page_size) == 0,
411411
"Nested physical address not on page boundary,\n"
@@ -534,8 +534,7 @@ bool kvm_cpu_has_ept(void)
534534
return ctrl & SECONDARY_EXEC_ENABLE_EPT;
535535
}
536536

537-
void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
538-
uint32_t eptp_memslot)
537+
void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm)
539538
{
540539
TEST_ASSERT(kvm_cpu_has_ept(), "KVM doesn't support nested EPT");
541540

0 commit comments

Comments
 (0)