Skip to content

Commit 662f1d1

Browse files
suomilewisbonzini
authored andcommitted
KVM: nVMX: Add support for capturing highest observable L2 TSC
The L1 hypervisor may include the IA32_TIME_STAMP_COUNTER MSR in the vmcs12 MSR VM-exit MSR-store area as a way of determining the highest TSC value that might have been observed by L2 prior to VM-exit. The current implementation does not capture a very tight bound on this value. To tighten the bound, add the IA32_TIME_STAMP_COUNTER MSR to the vmcs02 VM-exit MSR-store area whenever it appears in the vmcs12 VM-exit MSR-store area. When L0 processes the vmcs12 VM-exit MSR-store area during the emulation of an L2->L1 VM-exit, special-case the IA32_TIME_STAMP_COUNTER MSR, using the value stored in the vmcs02 VM-exit MSR-store area to derive the value to be stored in the vmcs12 VM-exit MSR-store area. Reviewed-by: Liran Alon <[email protected]> Reviewed-by: Jim Mattson <[email protected]> Signed-off-by: Aaron Lewis <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent ef0fbca commit 662f1d1

File tree

3 files changed

+101
-7
lines changed

3 files changed

+101
-7
lines changed

arch/x86/kvm/vmx/nested.c

Lines changed: 95 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -940,6 +940,37 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
940940
return i + 1;
941941
}
942942

943+
static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu,
944+
u32 msr_index,
945+
u64 *data)
946+
{
947+
struct vcpu_vmx *vmx = to_vmx(vcpu);
948+
949+
/*
950+
* If the L0 hypervisor stored a more accurate value for the TSC that
951+
* does not include the time taken for emulation of the L2->L1
952+
* VM-exit in L0, use the more accurate value.
953+
*/
954+
if (msr_index == MSR_IA32_TSC) {
955+
int index = vmx_find_msr_index(&vmx->msr_autostore.guest,
956+
MSR_IA32_TSC);
957+
958+
if (index >= 0) {
959+
u64 val = vmx->msr_autostore.guest.val[index].value;
960+
961+
*data = kvm_read_l1_tsc(vcpu, val);
962+
return true;
963+
}
964+
}
965+
966+
if (kvm_get_msr(vcpu, msr_index, data)) {
967+
pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__,
968+
msr_index);
969+
return false;
970+
}
971+
return true;
972+
}
973+
943974
static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i,
944975
struct vmx_msr_entry *e)
945976
{
@@ -974,12 +1005,9 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
9741005
if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
9751006
return -EINVAL;
9761007

977-
if (kvm_get_msr(vcpu, e.index, &data)) {
978-
pr_debug_ratelimited(
979-
"%s cannot read MSR (%u, 0x%x)\n",
980-
__func__, i, e.index);
1008+
if (!nested_vmx_get_vmexit_msr_value(vcpu, e.index, &data))
9811009
return -EINVAL;
982-
}
1010+
9831011
if (kvm_vcpu_write_guest(vcpu,
9841012
gpa + i * sizeof(e) +
9851013
offsetof(struct vmx_msr_entry, value),
@@ -993,6 +1021,60 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
9931021
return 0;
9941022
}
9951023

1024+
static bool nested_msr_store_list_has_msr(struct kvm_vcpu *vcpu, u32 msr_index)
1025+
{
1026+
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1027+
u32 count = vmcs12->vm_exit_msr_store_count;
1028+
u64 gpa = vmcs12->vm_exit_msr_store_addr;
1029+
struct vmx_msr_entry e;
1030+
u32 i;
1031+
1032+
for (i = 0; i < count; i++) {
1033+
if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
1034+
return false;
1035+
1036+
if (e.index == msr_index)
1037+
return true;
1038+
}
1039+
return false;
1040+
}
1041+
1042+
static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu,
1043+
u32 msr_index)
1044+
{
1045+
struct vcpu_vmx *vmx = to_vmx(vcpu);
1046+
struct vmx_msrs *autostore = &vmx->msr_autostore.guest;
1047+
bool in_vmcs12_store_list;
1048+
int msr_autostore_index;
1049+
bool in_autostore_list;
1050+
int last;
1051+
1052+
msr_autostore_index = vmx_find_msr_index(autostore, msr_index);
1053+
in_autostore_list = msr_autostore_index >= 0;
1054+
in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index);
1055+
1056+
if (in_vmcs12_store_list && !in_autostore_list) {
1057+
if (autostore->nr == NR_LOADSTORE_MSRS) {
1058+
/*
1059+
* Emulated VMEntry does not fail here. Instead a less
1060+
* accurate value will be returned by
1061+
* nested_vmx_get_vmexit_msr_value() using kvm_get_msr()
1062+
* instead of reading the value from the vmcs02 VMExit
1063+
* MSR-store area.
1064+
*/
1065+
pr_warn_ratelimited(
1066+
"Not enough msr entries in msr_autostore. Can't add msr %x\n",
1067+
msr_index);
1068+
return;
1069+
}
1070+
last = autostore->nr++;
1071+
autostore->val[last].index = msr_index;
1072+
} else if (!in_vmcs12_store_list && in_autostore_list) {
1073+
last = --autostore->nr;
1074+
autostore->val[msr_autostore_index] = autostore->val[last];
1075+
}
1076+
}
1077+
9961078
static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
9971079
{
9981080
unsigned long invalid_mask;
@@ -2038,7 +2120,7 @@ static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
20382120
* addresses are constant (for vmcs02), the counts can change based
20392121
* on L2's behavior, e.g. switching to/from long mode.
20402122
*/
2041-
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
2123+
vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val));
20422124
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
20432125
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
20442126

@@ -2306,6 +2388,13 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
23062388
vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
23072389
}
23082390

2391+
/*
2392+
* Make sure the msr_autostore list is up to date before we set the
2393+
* count in the vmcs02.
2394+
*/
2395+
prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC);
2396+
2397+
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr);
23092398
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
23102399
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
23112400

arch/x86/kvm/vmx/vmx.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -833,7 +833,7 @@ static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
833833
vm_exit_controls_clearbit(vmx, exit);
834834
}
835835

836-
static int vmx_find_msr_index(struct vmx_msrs *m, u32 msr)
836+
int vmx_find_msr_index(struct vmx_msrs *m, u32 msr)
837837
{
838838
unsigned int i;
839839

arch/x86/kvm/vmx/vmx.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -233,6 +233,10 @@ struct vcpu_vmx {
233233
struct vmx_msrs host;
234234
} msr_autoload;
235235

236+
struct msr_autostore {
237+
struct vmx_msrs guest;
238+
} msr_autostore;
239+
236240
struct {
237241
int vm86_active;
238242
ulong save_rflags;
@@ -337,6 +341,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
337341
struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
338342
void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
339343
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
344+
int vmx_find_msr_index(struct vmx_msrs *m, u32 msr);
340345

341346
#define POSTED_INTR_ON 0
342347
#define POSTED_INTR_SN 1

0 commit comments

Comments
 (0)