Skip to content

Commit

Permalink
KVM: x86: Always use 32-bit SMRAM save state for 32-bit kernels
Browse files Browse the repository at this point in the history
[ Upstream commit b68f3cc ]

Invoking the 64-bit variation on a 32-bit kenrel will crash the guest,
trigger a WARN, and/or lead to a buffer overrun in the host, e.g.
rsm_load_state_64() writes r8-r15 unconditionally, but enum kvm_reg and
thus x86_emulate_ctxt._regs only define r8-r15 for CONFIG_X86_64.

KVM allows userspace to report long mode support via CPUID, even though
the guest is all but guaranteed to crash if it actually tries to enable
long mode.  But, a pure 32-bit guest that is ignorant of long mode will
happily plod along.

SMM complicates things as 64-bit CPUs use a different SMRAM save state
area.  KVM handles this correctly for 64-bit kernels, e.g. uses the
legacy save state map if userspace has hid long mode from the guest,
but doesn't fare well when userspace reports long mode support on a
32-bit host kernel (32-bit KVM doesn't support 64-bit guests).

Since the alternative is to crash the guest, e.g. by not loading state
or explicitly requesting shutdown, unconditionally use the legacy SMRAM
save state map for 32-bit KVM.  If a guest has managed to get far enough
to handle SMIs when running under a weird/buggy userspace hypervisor,
then don't deliberately crash the guest since there are no downsides
(from KVM's perspective) to allow it to continue running.

Fixes: 660a5d5 ("KVM: x86: save/load state on SMM switch")
Cc: [email protected]
Signed-off-by: Sean Christopherson <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
Signed-off-by: Sasha Levin <[email protected]>
  • Loading branch information
Sean Christopherson authored and gregkh committed Sep 16, 2019
1 parent 7a74d80 commit df5d4ea
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 4 deletions.
10 changes: 10 additions & 0 deletions arch/x86/kvm/emulate.c
Original file line number Diff line number Diff line change
Expand Up @@ -2331,12 +2331,16 @@ static int em_lseg(struct x86_emulate_ctxt *ctxt)

static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
{
#ifdef CONFIG_X86_64
u32 eax, ebx, ecx, edx;

eax = 0x80000001;
ecx = 0;
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
return edx & bit(X86_FEATURE_LM);
#else
return false;
#endif
}

#define GET_SMSTATE(type, smbase, offset) \
Expand Down Expand Up @@ -2381,6 +2385,7 @@ static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
return X86EMUL_CONTINUE;
}

#ifdef CONFIG_X86_64
static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
{
struct desc_struct desc;
Expand All @@ -2399,6 +2404,7 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
return X86EMUL_CONTINUE;
}
#endif

static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
u64 cr0, u64 cr3, u64 cr4)
Expand Down Expand Up @@ -2499,6 +2505,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
}

#ifdef CONFIG_X86_64
static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
{
struct desc_struct desc;
Expand Down Expand Up @@ -2560,6 +2567,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)

return X86EMUL_CONTINUE;
}
#endif

static int em_rsm(struct x86_emulate_ctxt *ctxt)
{
Expand Down Expand Up @@ -2616,9 +2624,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
if (ctxt->ops->pre_leave_smm(ctxt, smbase))
return X86EMUL_UNHANDLEABLE;

#ifdef CONFIG_X86_64
if (emulator_has_longmode(ctxt))
ret = rsm_load_state_64(ctxt, smbase + 0x8000);
else
#endif
ret = rsm_load_state_32(ctxt, smbase + 0x8000);

if (ret != X86EMUL_CONTINUE) {
Expand Down
10 changes: 6 additions & 4 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -7227,9 +7227,9 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
}

#ifdef CONFIG_X86_64
static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
{
#ifdef CONFIG_X86_64
struct desc_ptr dt;
struct kvm_segment seg;
unsigned long val;
Expand Down Expand Up @@ -7279,10 +7279,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)

for (i = 0; i < 6; i++)
enter_smm_save_seg_64(vcpu, buf, i);
#else
WARN_ON_ONCE(1);
#endif
}
#endif

static void enter_smm(struct kvm_vcpu *vcpu)
{
Expand All @@ -7293,9 +7291,11 @@ static void enter_smm(struct kvm_vcpu *vcpu)

trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
memset(buf, 0, 512);
#ifdef CONFIG_X86_64
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
enter_smm_save_state_64(vcpu, buf);
else
#endif
enter_smm_save_state_32(vcpu, buf);

/*
Expand Down Expand Up @@ -7353,8 +7353,10 @@ static void enter_smm(struct kvm_vcpu *vcpu)
kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);

#ifdef CONFIG_X86_64
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
kvm_x86_ops->set_efer(vcpu, 0);
#endif

kvm_update_cpuid(vcpu);
kvm_mmu_reset_context(vcpu);
Expand Down

0 comments on commit df5d4ea

Please sign in to comment.