KVM: Add instruction fetch checking when walking guest page table

This patch adds instruction fetch checking when walking guest page table,
to implement SMEP when emulating instead of executing natively.

Signed-off-by: Yang, Wei <wei.y.yang@intel.com>
Signed-off-by: Shan, Haitao <haitao.shan@intel.com>
Signed-off-by: Li, Xin <xin.li@intel.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 9d03ad4..1caeb4d 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -246,6 +246,12 @@
 			gfn_t gfn;
 			u32 ac;
 
+			/* check if the kernel is fetching from user page */
+			if (unlikely(pte_access & PT_USER_MASK) &&
+			    kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
+				if (fetch_fault && !user_fault)
+					eperm = true;
+
 			gfn = gpte_to_gfn_lvl(pte, lvl);
 			gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) >> PAGE_SHIFT;
 
@@ -305,7 +311,8 @@
 
 	walker->fault.error_code |= write_fault | user_fault;
 
-	if (fetch_fault && mmu->nx)
+	if (fetch_fault && (mmu->nx ||
+			    kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
 		walker->fault.error_code |= PFERR_FETCH_MASK;
 	if (rsvd_fault)
 		walker->fault.error_code |= PFERR_RSVD_MASK;