spin_unlock(&d->page_alloc_lock);
}
+smap_check_policy_t smap_policy_change(struct vcpu *v,
+ smap_check_policy_t new_policy)
+{
+ smap_check_policy_t old_policy = v->arch.smap_check_policy;
+ v->arch.smap_check_policy = new_policy;
+ return old_policy;
+}
+
/*
* The hole may be at or above the 44-bit boundary, so we need to determine
* the total bit count until reaching 32 significant (not squashed out) bits
}
/* Update per-VCPU guest runstate shared memory area (if registered). */
-bool_t update_runstate_area(const struct vcpu *v)
+bool_t update_runstate_area(struct vcpu *v)
{
+ bool_t rc;
+ smap_check_policy_t smap_policy;
+
if ( guest_handle_is_null(runstate_guest(v)) )
return 1;
+ smap_policy = smap_policy_change(v, SMAP_CHECK_ENABLED);
+
if ( has_32bit_shinfo(v->domain) )
{
struct compat_vcpu_runstate_info info;
XLAT_vcpu_runstate_info(&info, &v->runstate);
__copy_to_guest(v->runstate_guest.compat, &info, 1);
- return 1;
+ rc = 1;
}
+ else
+ rc = __copy_to_guest(runstate_guest(v), &v->runstate, 1) !=
+ sizeof(v->runstate);
- return __copy_to_guest(runstate_guest(v), &v->runstate, 1) !=
- sizeof(v->runstate);
+ smap_policy_change(v, smap_policy);
+
+ return rc;
}
static void _update_runstate_area(struct vcpu *v)
struct segment_register seg;
const struct cpu_user_regs *regs = guest_cpu_user_regs();
- hvm_get_segment_register(v, x86_seg_ss, &seg);
-
/* SMEP: kernel-mode instruction fetches from user-mode mappings
* should fault. Unlike NX or invalid bits, we're looking for _all_
* entries in the walk to have _PAGE_USER set, so we need to do the
* whole walk as if it were a user-mode one and then invert the answer. */
smep = hvm_smep_enabled(v) && (pfec & PFEC_insn_fetch);
- /*
- * SMAP: kernel-mode data accesses from user-mode mappings should fault
- * A fault is considered as a SMAP violation if the following
- * conditions come true:
- * - X86_CR4_SMAP is set in CR4
- * - A user page is accessed
- * - CPL = 3 or X86_EFLAGS_AC is clear
- * - Page fault in kernel mode
- */
- smap = hvm_smap_enabled(v) &&
- ((seg.attr.fields.dpl == 3) || !(regs->eflags & X86_EFLAGS_AC));
+ switch ( v->arch.smap_check_policy )
+ {
+ case SMAP_CHECK_HONOR_CPL_AC:
+ hvm_get_segment_register(v, x86_seg_ss, &seg);
+
+ /*
+ * SMAP: kernel-mode data accesses from user-mode mappings
+ * should fault.
+ * A fault is considered as a SMAP violation if the following
+ * conditions come true:
+ * - X86_CR4_SMAP is set in CR4
+ * - A user page is accessed
+ * - CPL = 3 or X86_EFLAGS_AC is clear
+ * - Page fault in kernel mode
+ */
+ smap = hvm_smap_enabled(v) &&
+ ((seg.attr.fields.dpl == 3) ||
+ !(regs->eflags & X86_EFLAGS_AC));
+ break;
+ case SMAP_CHECK_ENABLED:
+ smap = hvm_smap_enabled(v);
+ break;
+ default:
+ ASSERT(v->arch.smap_check_policy == SMAP_CHECK_DISABLED);
+ break;
+ }
}
if ( smep || smap )
struct vcpu_time_info pending_system_time;
};
+typedef enum __packed {
+ SMAP_CHECK_HONOR_CPL_AC, /* honor the guest's CPL and AC */
+ SMAP_CHECK_ENABLED, /* enable the check */
+ SMAP_CHECK_DISABLED, /* disable the check */
+} smap_check_policy_t;
+
struct arch_vcpu
{
/*
* and thus should be saved/restored. */
bool_t nonlazy_xstate_used;
+ /*
+ * The SMAP check policy when updating runstate_guest(v) and the
+ * secondary system time.
+ */
+ smap_check_policy_t smap_check_policy;
+
struct vmce vmce;
struct paging_vcpu paging;
XEN_GUEST_HANDLE(vcpu_time_info_t) time_info_guest;
} __cacheline_aligned;
+smap_check_policy_t smap_policy_change(struct vcpu *v,
+ smap_check_policy_t new_policy);
+
/* Shorthands to improve code legibility. */
#define hvm_vmx hvm_vcpu.u.vmx
#define hvm_svm hvm_vcpu.u.svm
-bool_t update_runstate_area(const struct vcpu *);
+bool_t update_runstate_area(struct vcpu *);
bool_t update_secondary_system_time(const struct vcpu *,
struct vcpu_time_info *);