Skip to content

Commit

Permalink
KVM: x86: don't disable APICv memslot when inhibited
Browse files Browse the repository at this point in the history
Thanks to the former patches, it is now possible to keep the APICv
memslot always enabled, and it will be invisible to the guest
when it is inhibited

This code is based on a suggestion from Sean Christopherson:
https://lkml.org/lkml/2021/7/19/2970

Suggested-by: Sean Christopherson <[email protected]>
Signed-off-by: Maxim Levitsky <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
  • Loading branch information
Maxim Levitsky authored and bonzini committed Aug 20, 2021
1 parent 9cc13d6 commit 36222b1
Show file tree
Hide file tree
Showing 6 changed files with 14 additions and 32 deletions.
1 change: 0 additions & 1 deletion arch/x86/include/asm/kvm-x86-ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,6 @@ KVM_X86_OP(enable_nmi_window)
KVM_X86_OP(enable_irq_window)
KVM_X86_OP(update_cr8_intercept)
KVM_X86_OP(check_apicv_inhibit_reasons)
KVM_X86_OP_NULL(pre_update_apicv_exec_ctrl)
KVM_X86_OP(refresh_apicv_exec_ctrl)
KVM_X86_OP(hwapic_irr_update)
KVM_X86_OP(hwapic_isr_update)
Expand Down
1 change: 0 additions & 1 deletion arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -1352,7 +1352,6 @@ struct kvm_x86_ops {
void (*enable_irq_window)(struct kvm_vcpu *vcpu);
void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
bool (*check_apicv_inhibit_reasons)(ulong bit);
void (*pre_update_apicv_exec_ctrl)(struct kvm *kvm, bool activate);
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
Expand Down
21 changes: 6 additions & 15 deletions arch/x86/kvm/svm/avic.c
Original file line number Diff line number Diff line change
Expand Up @@ -225,31 +225,26 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
* field of the VMCB. Therefore, we set up the
* APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (4KB) here.
*/
static int avic_update_access_page(struct kvm *kvm, bool activate)
static int avic_alloc_access_page(struct kvm *kvm)
{
void __user *ret;
int r = 0;

mutex_lock(&kvm->slots_lock);
/*
* During kvm_destroy_vm(), kvm_pit_set_reinject() could trigger
* APICv mode change, which update APIC_ACCESS_PAGE_PRIVATE_MEMSLOT
* memory region. So, we need to ensure that kvm->mm == current->mm.
*/
if ((kvm->arch.apic_access_memslot_enabled == activate) ||
(kvm->mm != current->mm))

if (kvm->arch.apic_access_memslot_enabled)
goto out;

ret = __x86_set_memory_region(kvm,
APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
APIC_DEFAULT_PHYS_BASE,
activate ? PAGE_SIZE : 0);
PAGE_SIZE);
if (IS_ERR(ret)) {
r = PTR_ERR(ret);
goto out;
}

kvm->arch.apic_access_memslot_enabled = activate;
kvm->arch.apic_access_memslot_enabled = true;
out:
mutex_unlock(&kvm->slots_lock);
return r;
Expand All @@ -270,7 +265,7 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
if (kvm_apicv_activated(vcpu->kvm)) {
int ret;

ret = avic_update_access_page(vcpu->kvm, true);
ret = avic_alloc_access_page(vcpu->kvm);
if (ret)
return ret;
}
Expand Down Expand Up @@ -918,10 +913,6 @@ bool svm_check_apicv_inhibit_reasons(ulong bit)
return supported & BIT(bit);
}

void svm_pre_update_apicv_exec_ctrl(struct kvm *kvm, bool activate)
{
avic_update_access_page(kvm, activate);
}

static inline int
avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
Expand Down
1 change: 0 additions & 1 deletion arch/x86/kvm/svm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -4581,7 +4581,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.set_virtual_apic_mode = svm_set_virtual_apic_mode,
.refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
.check_apicv_inhibit_reasons = svm_check_apicv_inhibit_reasons,
.pre_update_apicv_exec_ctrl = svm_pre_update_apicv_exec_ctrl,
.load_eoi_exitmap = svm_load_eoi_exitmap,
.hwapic_irr_update = svm_hwapic_irr_update,
.hwapic_isr_update = svm_hwapic_isr_update,
Expand Down
1 change: 0 additions & 1 deletion arch/x86/kvm/svm/svm.h
Original file line number Diff line number Diff line change
Expand Up @@ -534,7 +534,6 @@ void avic_post_state_restore(struct kvm_vcpu *vcpu);
void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
bool svm_check_apicv_inhibit_reasons(ulong bit);
void svm_pre_update_apicv_exec_ctrl(struct kvm *kvm, bool activate);
void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
Expand Down
21 changes: 8 additions & 13 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -9255,13 +9255,6 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv);

/*
* NOTE: Do not hold any lock prior to calling this.
*
* In particular, kvm_request_apicv_update() expects kvm->srcu not to be
* locked, because it calls __x86_set_memory_region() which does
* synchronize_srcu(&kvm->srcu).
*/
void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
{
unsigned long old, new, expected;
Expand All @@ -9282,14 +9275,16 @@ void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
old = cmpxchg(&kvm->arch.apicv_inhibit_reasons, expected, new);
} while (old != expected);

if (!!old == !!new)
return;
if (!!old != !!new) {
trace_kvm_apicv_update_request(activate, bit);
kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_UPDATE);
if (new) {
unsigned long gfn = gpa_to_gfn(APIC_DEFAULT_PHYS_BASE);

trace_kvm_apicv_update_request(activate, bit);
if (kvm_x86_ops.pre_update_apicv_exec_ctrl)
static_call(kvm_x86_pre_update_apicv_exec_ctrl)(kvm, activate);
kvm_zap_gfn_range(kvm, gfn, gfn+1);
}
}

kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_UPDATE);
}
EXPORT_SYMBOL_GPL(kvm_request_apicv_update);

Expand Down

0 comments on commit 36222b1

Please sign in to comment.