@@ -201,6 +201,7 @@ struct kvm_x86_cpu_feature {
201201#define X86_FEATURE_TSCRATEMSR KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 4)
202202#define X86_FEATURE_PAUSEFILTER KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 10)
203203#define X86_FEATURE_PFTHRESHOLD KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 12)
204+ #define X86_FEATURE_V_VMSAVE_VMLOAD KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 15)
204205#define X86_FEATURE_VGIF KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 16)
205206#define X86_FEATURE_IDLE_HLT KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 30)
206207#define X86_FEATURE_SEV KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 1)
@@ -362,16 +363,6 @@ static inline unsigned int x86_model(unsigned int eax)
362363 return ((eax >> 12 ) & 0xf0 ) | ((eax >> 4 ) & 0x0f );
363364}
364365
365- /* Page table bitfield declarations */
366- #define PTE_PRESENT_MASK BIT_ULL(0)
367- #define PTE_WRITABLE_MASK BIT_ULL(1)
368- #define PTE_USER_MASK BIT_ULL(2)
369- #define PTE_ACCESSED_MASK BIT_ULL(5)
370- #define PTE_DIRTY_MASK BIT_ULL(6)
371- #define PTE_LARGE_MASK BIT_ULL(7)
372- #define PTE_GLOBAL_MASK BIT_ULL(8)
373- #define PTE_NX_MASK BIT_ULL(63)
374-
375366#define PHYSICAL_PAGE_MASK GENMASK_ULL(51, 12)
376367
377368#define PAGE_SHIFT 12
@@ -436,8 +427,10 @@ struct kvm_x86_state {
436427
437428static inline uint64_t get_desc64_base (const struct desc64 * desc )
438429{
439- return ((uint64_t )desc -> base3 << 32 ) |
440- (desc -> base0 | ((desc -> base1 ) << 16 ) | ((desc -> base2 ) << 24 ));
430+ return (uint64_t )desc -> base3 << 32 |
431+ (uint64_t )desc -> base2 << 24 |
432+ (uint64_t )desc -> base1 << 16 |
433+ (uint64_t )desc -> base0 ;
441434}
442435
443436static inline uint64_t rdtsc (void )
@@ -1367,9 +1360,7 @@ static inline bool kvm_is_ignore_msrs(void)
13671360 return get_kvm_param_bool ("ignore_msrs" );
13681361}
13691362
1370- uint64_t * __vm_get_page_table_entry (struct kvm_vm * vm , uint64_t vaddr ,
1371- int * level );
1372- uint64_t * vm_get_page_table_entry (struct kvm_vm * vm , uint64_t vaddr );
1363+ uint64_t * vm_get_pte (struct kvm_vm * vm , uint64_t vaddr );
13731364
13741365uint64_t kvm_hypercall (uint64_t nr , uint64_t a0 , uint64_t a1 , uint64_t a2 ,
13751366 uint64_t a3 );
@@ -1451,10 +1442,52 @@ enum pg_level {
14511442#define PG_SIZE_2M PG_LEVEL_SIZE(PG_LEVEL_2M)
14521443#define PG_SIZE_1G PG_LEVEL_SIZE(PG_LEVEL_1G)
14531444
1454- void __virt_pg_map (struct kvm_vm * vm , uint64_t vaddr , uint64_t paddr , int level );
1445+ #define PTE_PRESENT_MASK (mmu ) ((mmu)->arch.pte_masks.present)
1446+ #define PTE_WRITABLE_MASK (mmu ) ((mmu)->arch.pte_masks.writable)
1447+ #define PTE_USER_MASK (mmu ) ((mmu)->arch.pte_masks.user)
1448+ #define PTE_READABLE_MASK (mmu ) ((mmu)->arch.pte_masks.readable)
1449+ #define PTE_EXECUTABLE_MASK (mmu ) ((mmu)->arch.pte_masks.executable)
1450+ #define PTE_ACCESSED_MASK (mmu ) ((mmu)->arch.pte_masks.accessed)
1451+ #define PTE_DIRTY_MASK (mmu ) ((mmu)->arch.pte_masks.dirty)
1452+ #define PTE_HUGE_MASK (mmu ) ((mmu)->arch.pte_masks.huge)
1453+ #define PTE_NX_MASK (mmu ) ((mmu)->arch.pte_masks.nx)
1454+ #define PTE_C_BIT_MASK (mmu ) ((mmu)->arch.pte_masks.c)
1455+ #define PTE_S_BIT_MASK (mmu ) ((mmu)->arch.pte_masks.s)
1456+ #define PTE_ALWAYS_SET_MASK (mmu ) ((mmu)->arch.pte_masks.always_set)
1457+
1458+ /*
1459+ * For PTEs without a PRESENT bit (i.e. EPT entries), treat the PTE as present
1460+ * if it's executable or readable, as EPT supports execute-only PTEs, but not
1461+ * write-only PTEs.
1462+ */
1463+ #define is_present_pte (mmu , pte ) \
1464+ (PTE_PRESENT_MASK(mmu) ? \
1465+ !!(*(pte) & PTE_PRESENT_MASK(mmu)) : \
1466+ !!(*(pte) & (PTE_READABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu))))
1467+ #define is_executable_pte (mmu , pte ) \
1468+ ((*(pte) & (PTE_EXECUTABLE_MASK(mmu) | PTE_NX_MASK(mmu))) == PTE_EXECUTABLE_MASK(mmu))
1469+ #define is_writable_pte (mmu , pte ) (!!(*(pte) & PTE_WRITABLE_MASK(mmu)))
1470+ #define is_user_pte (mmu , pte ) (!!(*(pte) & PTE_USER_MASK(mmu)))
1471+ #define is_accessed_pte (mmu , pte ) (!!(*(pte) & PTE_ACCESSED_MASK(mmu)))
1472+ #define is_dirty_pte (mmu , pte ) (!!(*(pte) & PTE_DIRTY_MASK(mmu)))
1473+ #define is_huge_pte (mmu , pte ) (!!(*(pte) & PTE_HUGE_MASK(mmu)))
1474+ #define is_nx_pte (mmu , pte ) (!is_executable_pte(mmu, pte))
1475+
1476+ void tdp_mmu_init (struct kvm_vm * vm , int pgtable_levels ,
1477+ struct pte_masks * pte_masks );
1478+
1479+ void __virt_pg_map (struct kvm_vm * vm , struct kvm_mmu * mmu , uint64_t vaddr ,
1480+ uint64_t paddr , int level );
14551481void virt_map_level (struct kvm_vm * vm , uint64_t vaddr , uint64_t paddr ,
14561482 uint64_t nr_bytes , int level );
14571483
1484+ void vm_enable_tdp (struct kvm_vm * vm );
1485+ bool kvm_cpu_has_tdp (void );
1486+ void tdp_map (struct kvm_vm * vm , uint64_t nested_paddr , uint64_t paddr , uint64_t size );
1487+ void tdp_identity_map_default_memslots (struct kvm_vm * vm );
1488+ void tdp_identity_map_1g (struct kvm_vm * vm , uint64_t addr , uint64_t size );
1489+ uint64_t * tdp_get_pte (struct kvm_vm * vm , uint64_t l2_gpa );
1490+
14581491/*
14591492 * Basic CPU control in CR0
14601493 */
0 commit comments