summaryrefslogtreecommitdiff
path: root/target-i386/helper.c
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@linux.intel.com>2012-09-26 13:18:43 -0700
committerAnthony Liguori <aliguori@us.ibm.com>2012-10-01 08:04:22 -0500
commita9321a4d49d65d29c2926a51aedc5b91a01f3591 (patch)
tree5703f3f012c43f9edfabe2ff26d4b0047d7e8925 /target-i386/helper.c
parent4a19e505df659dd25a77fb790399744f3e1f971c (diff)
downloadqemu-a9321a4d49d65d29c2926a51aedc5b91a01f3591.tar.gz
x86: Implement SMEP and SMAP
This patch implements Supervisor Mode Execution Prevention (SMEP) and Supervisor Mode Access Prevention (SMAP) for x86. The purpose of the patch, obviously, is to help kernel developers debug the support for those features. A fair bit of the code relates to the handling of CPUID features. The CPUID code probably would get greatly simplified if all the feature bit words were unified into a single vector object, but in the interest of producing a minimal patch for SMEP/SMAP, and because I had very limited time for this project, I followed the existing style. [ v2: don't change the definition of the qemu64 CPU shorthand, since that breaks loading old snapshots. Per Anthony Liguori this can be fixed once the CPU feature set is snapshot. Change the coding style slightly to conform to checkpatch.pl. ] Signed-off-by: H. Peter Anvin <hpa@linux.intel.com> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Diffstat (limited to 'target-i386/helper.c')
-rw-r--r--target-i386/helper.c150
1 files changed, 121 insertions, 29 deletions
diff --git a/target-i386/helper.c b/target-i386/helper.c
index 8a5da3d7c0..c635667d60 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -443,17 +443,27 @@ void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
#if defined(DEBUG_MMU)
printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
#endif
- if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
- (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
+ if ((new_cr4 ^ env->cr[4]) &
+ (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
+ CR4_SMEP_MASK | CR4_SMAP_MASK)) {
tlb_flush(env, 1);
}
/* SSE handling */
- if (!(env->cpuid_features & CPUID_SSE))
+ if (!(env->cpuid_features & CPUID_SSE)) {
new_cr4 &= ~CR4_OSFXSR_MASK;
- if (new_cr4 & CR4_OSFXSR_MASK)
+ }
+ env->hflags &= ~HF_OSFXSR_MASK;
+ if (new_cr4 & CR4_OSFXSR_MASK) {
env->hflags |= HF_OSFXSR_MASK;
- else
- env->hflags &= ~HF_OSFXSR_MASK;
+ }
+
+ if (!(env->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)) {
+ new_cr4 &= ~CR4_SMAP_MASK;
+ }
+ env->hflags &= ~HF_SMAP_MASK;
+ if (new_cr4 & CR4_SMAP_MASK) {
+ env->hflags |= HF_SMAP_MASK;
+ }
env->cr[4] = new_cr4;
}
@@ -591,17 +601,38 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
/* 2 MB page */
page_size = 2048 * 1024;
ptep ^= PG_NX_MASK;
- if ((ptep & PG_NX_MASK) && is_write1 == 2)
+ if ((ptep & PG_NX_MASK) && is_write1 == 2) {
goto do_fault_protect;
- if (is_user) {
- if (!(ptep & PG_USER_MASK))
+ }
+ switch (mmu_idx) {
+ case MMU_USER_IDX:
+ if (!(ptep & PG_USER_MASK)) {
goto do_fault_protect;
- if (is_write && !(ptep & PG_RW_MASK))
+ }
+ if (is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
- } else {
+ }
+ break;
+
+ case MMU_KERNEL_IDX:
+ if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
+ (ptep & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
+ /* fall through */
+ case MMU_KSMAP_IDX:
+ if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
+ (ptep & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
if ((env->cr[0] & CR0_WP_MASK) &&
- is_write && !(ptep & PG_RW_MASK))
+ is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
+ }
+ break;
+
+ default: /* cannot happen */
+ break;
}
is_dirty = is_write && !(pde & PG_DIRTY_MASK);
if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
@@ -635,15 +666,35 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
ptep ^= PG_NX_MASK;
if ((ptep & PG_NX_MASK) && is_write1 == 2)
goto do_fault_protect;
- if (is_user) {
- if (!(ptep & PG_USER_MASK))
+ switch (mmu_idx) {
+ case MMU_USER_IDX:
+ if (!(ptep & PG_USER_MASK)) {
goto do_fault_protect;
- if (is_write && !(ptep & PG_RW_MASK))
+ }
+ if (is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
- } else {
+ }
+ break;
+
+ case MMU_KERNEL_IDX:
+ if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
+ (ptep & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
+ /* fall through */
+ case MMU_KSMAP_IDX:
+ if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
+ (ptep & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
if ((env->cr[0] & CR0_WP_MASK) &&
- is_write && !(ptep & PG_RW_MASK))
+ is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
+ }
+ break;
+
+ default: /* cannot happen */
+ break;
}
is_dirty = is_write && !(pte & PG_DIRTY_MASK);
if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
@@ -670,15 +721,35 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
/* if PSE bit is set, then we use a 4MB page */
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
page_size = 4096 * 1024;
- if (is_user) {
- if (!(pde & PG_USER_MASK))
+ switch (mmu_idx) {
+ case MMU_USER_IDX:
+ if (!(pde & PG_USER_MASK)) {
goto do_fault_protect;
- if (is_write && !(pde & PG_RW_MASK))
+ }
+ if (is_write && !(pde & PG_RW_MASK)) {
goto do_fault_protect;
- } else {
+ }
+ break;
+
+ case MMU_KERNEL_IDX:
+ if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
+ (pde & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
+ /* fall through */
+ case MMU_KSMAP_IDX:
+ if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
+ (pde & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
if ((env->cr[0] & CR0_WP_MASK) &&
- is_write && !(pde & PG_RW_MASK))
+ is_write && !(pde & PG_RW_MASK)) {
goto do_fault_protect;
+ }
+ break;
+
+ default: /* cannot happen */
+ break;
}
is_dirty = is_write && !(pde & PG_DIRTY_MASK);
if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
@@ -707,15 +778,35 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
}
/* combine pde and pte user and rw protections */
ptep = pte & pde;
- if (is_user) {
- if (!(ptep & PG_USER_MASK))
+ switch (mmu_idx) {
+ case MMU_USER_IDX:
+ if (!(ptep & PG_USER_MASK)) {
goto do_fault_protect;
- if (is_write && !(ptep & PG_RW_MASK))
+ }
+ if (is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
- } else {
+ }
+ break;
+
+ case MMU_KERNEL_IDX:
+ if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
+ (ptep & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
+ /* fall through */
+ case MMU_KSMAP_IDX:
+ if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
+ (ptep & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
if ((env->cr[0] & CR0_WP_MASK) &&
- is_write && !(ptep & PG_RW_MASK))
+ is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
+ }
+ break;
+
+ default: /* cannot happen */
+ break;
}
is_dirty = is_write && !(pte & PG_DIRTY_MASK);
if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
@@ -762,8 +853,9 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
if (is_user)
error_code |= PG_ERROR_U_MASK;
if (is_write1 == 2 &&
- (env->efer & MSR_EFER_NXE) &&
- (env->cr[4] & CR4_PAE_MASK))
+ (((env->efer & MSR_EFER_NXE) &&
+ (env->cr[4] & CR4_PAE_MASK)) ||
+ (env->cr[4] & CR4_SMEP_MASK)))
error_code |= PG_ERROR_I_D_MASK;
if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
/* cr2 is not modified in case of exceptions */