KVM: Pull extra page fault information into struct x86_exception
Currently page fault cr2 and nesting infomation are carried outside
the fault data structure. Instead they are placed in the vcpu struct,
which results in confusion as global variables are manipulated instead
of passing parameters.
Fix this issue by adding address and nested fields to struct x86_exception,
so this struct can carry all information associated with a fault.
Signed-off-by: Avi Kivity <avi@redhat.com>
Tested-by: Joerg Roedel <joerg.roedel@amd.com>
Tested-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9ce0414..d359500 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2736,9 +2736,10 @@
return vcpu->arch.cr3;
}
-static void inject_page_fault(struct kvm_vcpu *vcpu)
+static void inject_page_fault(struct kvm_vcpu *vcpu,
+ struct x86_exception *fault)
{
- vcpu->arch.mmu.inject_page_fault(vcpu);
+ vcpu->arch.mmu.inject_page_fault(vcpu, fault);
}
static void paging_free(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index ad5a5a2..d5a0a11 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -279,8 +279,8 @@
if (rsvd_fault)
walker->fault.error_code |= PFERR_RSVD_MASK;
- vcpu->arch.fault.address = addr;
- vcpu->arch.fault.error_code = walker->fault.error_code;
+ walker->fault.address = addr;
+ walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
trace_kvm_mmu_walker_error(walker->fault.error_code);
return 0;
@@ -568,7 +568,7 @@
*/
if (!r) {
pgprintk("%s: guest page fault\n", __func__);
- inject_page_fault(vcpu);
+ inject_page_fault(vcpu, &walker.fault);
vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
return 0;
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 28274cf..b985cb8 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1647,14 +1647,15 @@
force_new_asid(vcpu);
}
-static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu)
+static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
+ struct x86_exception *fault)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->control.exit_code = SVM_EXIT_NPF;
svm->vmcb->control.exit_code_hi = 0;
- svm->vmcb->control.exit_info_1 = vcpu->arch.fault.error_code;
- svm->vmcb->control.exit_info_2 = vcpu->arch.fault.address;
+ svm->vmcb->control.exit_info_1 = fault->error_code;
+ svm->vmcb->control.exit_info_2 = fault->address;
nested_svm_vmexit(svm);
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a7a7dec..47e5a41 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -334,23 +334,19 @@
}
EXPORT_SYMBOL_GPL(kvm_requeue_exception);
-void kvm_inject_page_fault(struct kvm_vcpu *vcpu)
+void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
{
- unsigned error_code = vcpu->arch.fault.error_code;
-
++vcpu->stat.pf_guest;
- vcpu->arch.cr2 = vcpu->arch.fault.address;
- kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
+ vcpu->arch.cr2 = fault->address;
+ kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
}
-void kvm_propagate_fault(struct kvm_vcpu *vcpu)
+void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
{
- if (mmu_is_nested(vcpu) && !vcpu->arch.fault.nested)
- vcpu->arch.nested_mmu.inject_page_fault(vcpu);
+ if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
+ vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
else
- vcpu->arch.mmu.inject_page_fault(vcpu);
-
- vcpu->arch.fault.nested = false;
+ vcpu->arch.mmu.inject_page_fault(vcpu, fault);
}
void kvm_inject_nmi(struct kvm_vcpu *vcpu)
@@ -3610,8 +3606,6 @@
/* NPT walks are always user-walks */
access |= PFERR_USER_MASK;
t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception);
- if (t_gpa == UNMAPPED_GVA)
- vcpu->arch.fault.nested = true;
return t_gpa;
}
@@ -4259,7 +4253,7 @@
{
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
if (ctxt->exception.vector == PF_VECTOR)
- kvm_propagate_fault(vcpu);
+ kvm_propagate_fault(vcpu, &ctxt->exception);
else if (ctxt->exception.error_code_valid)
kvm_queue_exception_e(vcpu, ctxt->exception.vector,
ctxt->exception.error_code);
@@ -6264,6 +6258,8 @@
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{
+ struct x86_exception fault;
+
trace_kvm_async_pf_not_present(work->arch.token, work->gva);
kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
@@ -6272,15 +6268,20 @@
kvm_x86_ops->get_cpl(vcpu) == 0))
kvm_make_request(KVM_REQ_APF_HALT, vcpu);
else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
- vcpu->arch.fault.error_code = 0;
- vcpu->arch.fault.address = work->arch.token;
- kvm_inject_page_fault(vcpu);
+ fault.vector = PF_VECTOR;
+ fault.error_code_valid = true;
+ fault.error_code = 0;
+ fault.nested_page_fault = false;
+ fault.address = work->arch.token;
+ kvm_inject_page_fault(vcpu, &fault);
}
}
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{
+ struct x86_exception fault;
+
trace_kvm_async_pf_ready(work->arch.token, work->gva);
if (is_error_page(work->page))
work->arch.token = ~0; /* broadcast wakeup */
@@ -6289,9 +6290,12 @@
if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
- vcpu->arch.fault.error_code = 0;
- vcpu->arch.fault.address = work->arch.token;
- kvm_inject_page_fault(vcpu);
+ fault.vector = PF_VECTOR;
+ fault.error_code_valid = true;
+ fault.error_code = 0;
+ fault.nested_page_fault = false;
+ fault.address = work->arch.token;
+ kvm_inject_page_fault(vcpu, &fault);
}
vcpu->arch.apf.halted = false;
}