/* x86 emulator support for the shadow code
*/
+/*
+ * Returns a mapped pointer to write to, or one of the following error
+ * indicators.
+ */
+#define MAPPING_UNHANDLEABLE ERR_PTR(~(long)X86EMUL_UNHANDLEABLE)
+#define MAPPING_EXCEPTION ERR_PTR(~(long)X86EMUL_EXCEPTION)
+#define MAPPING_SILENT_FAIL ERR_PTR(~(long)X86EMUL_OKAY)
+static void *sh_emulate_map_dest(struct vcpu *v, unsigned long vaddr,
+ unsigned int bytes,
+ struct sh_emulate_ctxt *sh_ctxt);
+static void sh_emulate_unmap_dest(struct vcpu *v, void *addr,
+ unsigned int bytes,
+ struct sh_emulate_ctxt *sh_ctxt);
+
/*
* Callers which pass a known in-range x86_segment can rely on the return
* pointer being valid. Other callers must explicitly check for errors.
container_of(ctxt, struct sh_emulate_ctxt, ctxt);
struct vcpu *v = current;
unsigned long addr;
+ void *ptr;
int rc;
/* How many emulations could we save if we unshadowed on stack writes? */
if ( rc || !bytes )
return rc;
- return v->arch.paging.mode->shadow.x86_emulate_write(
- v, addr, p_data, bytes, sh_ctxt);
+ /* Unaligned writes are only acceptable on HVM */
+ if ( (addr & (bytes - 1)) && !is_hvm_vcpu(v) )
+ return X86EMUL_UNHANDLEABLE;
+
+ ptr = sh_emulate_map_dest(v, addr, bytes, sh_ctxt);
+ if ( IS_ERR(ptr) )
+ return ~PTR_ERR(ptr);
+
+ paging_lock(v->domain);
+ memcpy(ptr, p_data, bytes);
+
+ if ( tb_init_done )
+ v->arch.paging.mode->shadow.trace_emul_write_val(ptr, addr,
+ p_data, bytes);
+
+ sh_emulate_unmap_dest(v, ptr, bytes, sh_ctxt);
+ shadow_audit_tables(v);
+ paging_unlock(v->domain);
+
+ return X86EMUL_OKAY;
}
static int
struct sh_emulate_ctxt *sh_ctxt =
container_of(ctxt, struct sh_emulate_ctxt, ctxt);
struct vcpu *v = current;
- unsigned long addr, old, new;
+ unsigned long addr, old, new, prev;
+ void *ptr;
int rc;
if ( bytes > sizeof(long) )
if ( rc )
return rc;
+ /* Unaligned writes are only acceptable on HVM */
+ if ( (addr & (bytes - 1)) && !is_hvm_vcpu(v) )
+ return X86EMUL_UNHANDLEABLE;
+
+ ptr = sh_emulate_map_dest(v, addr, bytes, sh_ctxt);
+ if ( IS_ERR(ptr) )
+ return ~PTR_ERR(ptr);
+
old = new = 0;
memcpy(&old, p_old, bytes);
memcpy(&new, p_new, bytes);
- rc = v->arch.paging.mode->shadow.x86_emulate_cmpxchg(
- v, addr, &old, new, bytes, sh_ctxt);
+ paging_lock(v->domain);
+ switch ( bytes )
+ {
+ case 1: prev = cmpxchg((uint8_t *)ptr, old, new); break;
+ case 2: prev = cmpxchg((uint16_t *)ptr, old, new); break;
+ case 4: prev = cmpxchg((uint32_t *)ptr, old, new); break;
+ case 8: prev = cmpxchg((uint64_t *)ptr, old, new); break;
+ default:
+ SHADOW_PRINTK("cmpxchg size %u is not supported\n", bytes);
+ prev = ~old;
+ }
+
+ if ( prev != old )
+ {
+ memcpy(p_old, &prev, bytes);
+ rc = X86EMUL_CMPXCHG_FAILED;
+ }
+
+ SHADOW_DEBUG(EMULATE,
+ "va %#lx was %#lx expected %#lx wanted %#lx now %#lx bytes %u\n",
+ addr, prev, old, new, *(unsigned long *)ptr, bytes);
- memcpy(p_old, &old, bytes);
+ sh_emulate_unmap_dest(v, ptr, bytes, sh_ctxt);
+ shadow_audit_tables(v);
+ paging_unlock(v->domain);
return rc;
}
* returned, page references will be held on sh_ctxt->mfn[0] and
* sh_ctxt->mfn[1] iff !INVALID_MFN.
*/
-void *sh_emulate_map_dest(struct vcpu *v, unsigned long vaddr,
- unsigned int bytes,
- struct sh_emulate_ctxt *sh_ctxt)
+static void *sh_emulate_map_dest(struct vcpu *v, unsigned long vaddr,
+ unsigned int bytes,
+ struct sh_emulate_ctxt *sh_ctxt)
{
struct domain *d = v->domain;
void *map;
* Tidy up after the emulated write: mark pages dirty, verify the new
* contents, and undo the mapping.
*/
-void sh_emulate_unmap_dest(struct vcpu *v, void *addr, unsigned int bytes,
- struct sh_emulate_ctxt *sh_ctxt)
+static void sh_emulate_unmap_dest(struct vcpu *v, void *addr,
+ unsigned int bytes,
+ struct sh_emulate_ctxt *sh_ctxt)
{
u32 b1 = bytes, b2 = 0, shflags;
#endif
static DEFINE_PER_CPU(guest_pa_t,trace_emulate_write_val);
+static void trace_emulate_write_val(const void *ptr, unsigned long vaddr,
+ const void *src, unsigned int bytes)
+{
+#if GUEST_PAGING_LEVELS == 3
+ if ( vaddr == this_cpu(trace_emulate_initial_va) )
+ memcpy(&this_cpu(trace_emulate_write_val), src, bytes);
+ else if ( (vaddr & ~(GUEST_PTE_SIZE - 1)) ==
+ this_cpu(trace_emulate_initial_va) )
+ {
+ TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_EMULATE_FULL_PT);
+ memcpy(&this_cpu(trace_emulate_write_val),
+ (typeof(ptr))((unsigned long)ptr & ~(GUEST_PTE_SIZE - 1)),
+ GUEST_PTE_SIZE);
+ }
+#else
+ memcpy(&this_cpu(trace_emulate_write_val), src, bytes);
+#endif
+}
+
static inline void trace_shadow_emulate(guest_l1e_t gl1e, unsigned long va)
{
if ( tb_init_done )
}
#endif
-/**************************************************************************/
-/* Handling guest writes to pagetables. */
-
-static int
-sh_x86_emulate_write(struct vcpu *v, unsigned long vaddr, void *src,
- u32 bytes, struct sh_emulate_ctxt *sh_ctxt)
-{
- void *addr;
-
- /* Unaligned writes are only acceptable on HVM */
- if ( (vaddr & (bytes - 1)) && !is_hvm_vcpu(v) )
- return X86EMUL_UNHANDLEABLE;
-
- addr = sh_emulate_map_dest(v, vaddr, bytes, sh_ctxt);
- if ( IS_ERR(addr) )
- return ~PTR_ERR(addr);
-
- paging_lock(v->domain);
- memcpy(addr, src, bytes);
-
- if ( tb_init_done )
- {
-#if GUEST_PAGING_LEVELS == 3
- if ( vaddr == this_cpu(trace_emulate_initial_va) )
- memcpy(&this_cpu(trace_emulate_write_val), src, bytes);
- else if ( (vaddr & ~(0x7UL)) == this_cpu(trace_emulate_initial_va) )
- {
- TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_EMULATE_FULL_PT);
- memcpy(&this_cpu(trace_emulate_write_val),
- (void *)(((unsigned long) addr) & ~(0x7UL)), GUEST_PTE_SIZE);
- }
-#else
- memcpy(&this_cpu(trace_emulate_write_val), src, bytes);
-#endif
- }
-
- sh_emulate_unmap_dest(v, addr, bytes, sh_ctxt);
- shadow_audit_tables(v);
- paging_unlock(v->domain);
- return X86EMUL_OKAY;
-}
-
-static int
-sh_x86_emulate_cmpxchg(struct vcpu *v, unsigned long vaddr,
- unsigned long *p_old, unsigned long new,
- unsigned int bytes, struct sh_emulate_ctxt *sh_ctxt)
-{
- void *addr;
- unsigned long prev, old = *p_old;
- int rv = X86EMUL_OKAY;
-
- /* Unaligned writes are only acceptable on HVM */
- if ( (vaddr & (bytes - 1)) && !is_hvm_vcpu(v) )
- return X86EMUL_UNHANDLEABLE;
-
- addr = sh_emulate_map_dest(v, vaddr, bytes, sh_ctxt);
- if ( IS_ERR(addr) )
- return ~PTR_ERR(addr);
-
- paging_lock(v->domain);
- switch ( bytes )
- {
- case 1: prev = cmpxchg(((u8 *)addr), old, new); break;
- case 2: prev = cmpxchg(((u16 *)addr), old, new); break;
- case 4: prev = cmpxchg(((u32 *)addr), old, new); break;
- case 8: prev = cmpxchg(((u64 *)addr), old, new); break;
- default:
- SHADOW_PRINTK("cmpxchg of size %i is not supported\n", bytes);
- prev = ~old;
- }
-
- if ( prev != old )
- {
- *p_old = prev;
- rv = X86EMUL_CMPXCHG_FAILED;
- }
-
- SHADOW_DEBUG(EMULATE, "va %#lx was %#lx expected %#lx"
- " wanted %#lx now %#lx bytes %u\n",
- vaddr, prev, old, new, *(unsigned long *)addr, bytes);
-
- sh_emulate_unmap_dest(v, addr, bytes, sh_ctxt);
- shadow_audit_tables(v);
- paging_unlock(v->domain);
- return rv;
-}
-
/**************************************************************************/
/* Audit tools */
.write_p2m_entry = shadow_write_p2m_entry,
.guest_levels = GUEST_PAGING_LEVELS,
.shadow.detach_old_tables = sh_detach_old_tables,
- .shadow.x86_emulate_write = sh_x86_emulate_write,
- .shadow.x86_emulate_cmpxchg = sh_x86_emulate_cmpxchg,
.shadow.write_guest_entry = sh_write_guest_entry,
.shadow.cmpxchg_guest_entry = sh_cmpxchg_guest_entry,
.shadow.make_monitor_table = sh_make_monitor_table,
.shadow.guess_wrmap = sh_guess_wrmap,
#endif
.shadow.pagetable_dying = sh_pagetable_dying,
+ .shadow.trace_emul_write_val = trace_emulate_write_val,
.shadow.shadow_levels = SHADOW_PAGING_LEVELS,
};
* With user_only == 1, unhooks only the user-mode mappings. */
void shadow_unhook_mappings(struct domain *d, mfn_t smfn, int user_only);
-/* Returns a mapped pointer to write to, or one of the following error
- * indicators. */
-#define MAPPING_UNHANDLEABLE ERR_PTR(~(long)X86EMUL_UNHANDLEABLE)
-#define MAPPING_EXCEPTION ERR_PTR(~(long)X86EMUL_EXCEPTION)
-#define MAPPING_SILENT_FAIL ERR_PTR(~(long)X86EMUL_OKAY)
-void *sh_emulate_map_dest(struct vcpu *v, unsigned long vaddr,
- unsigned int bytes, struct sh_emulate_ctxt *sh_ctxt);
-void sh_emulate_unmap_dest(struct vcpu *v, void *addr, unsigned int bytes,
- struct sh_emulate_ctxt *sh_ctxt);
-
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Allow a shadowed page to go out of sync */
int sh_unsync(struct vcpu *v, mfn_t gmfn);
#define sh_unhook_64b_mappings INTERNAL_NAME(sh_unhook_64b_mappings)
#define sh_paging_mode INTERNAL_NAME(sh_paging_mode)
#define sh_detach_old_tables INTERNAL_NAME(sh_detach_old_tables)
-#define sh_x86_emulate_write INTERNAL_NAME(sh_x86_emulate_write)
-#define sh_x86_emulate_cmpxchg INTERNAL_NAME(sh_x86_emulate_cmpxchg)
#define sh_audit_l1_table INTERNAL_NAME(sh_audit_l1_table)
#define sh_audit_fl1_table INTERNAL_NAME(sh_audit_fl1_table)
#define sh_audit_l2_table INTERNAL_NAME(sh_audit_l2_table)
struct shadow_paging_mode {
#ifdef CONFIG_SHADOW_PAGING
void (*detach_old_tables )(struct vcpu *v);
- int (*x86_emulate_write )(struct vcpu *v, unsigned long va,
- void *src, u32 bytes,
- struct sh_emulate_ctxt *sh_ctxt);
- int (*x86_emulate_cmpxchg )(struct vcpu *v, unsigned long va,
- unsigned long *old,
- unsigned long new,
- unsigned int bytes,
- struct sh_emulate_ctxt *sh_ctxt);
bool (*write_guest_entry )(struct vcpu *v, intpte_t *p,
intpte_t new, mfn_t gmfn);
bool (*cmpxchg_guest_entry )(struct vcpu *v, intpte_t *p,
int (*guess_wrmap )(struct vcpu *v,
unsigned long vaddr, mfn_t gmfn);
void (*pagetable_dying )(struct vcpu *v, paddr_t gpa);
+ void (*trace_emul_write_val )(const void *ptr, unsigned long vaddr,
+ const void *src, unsigned int bytes);
#endif
/* For outsiders to tell what mode we're in */
unsigned int shadow_levels;