* Keir Fraser <keir@xen.org>
*/
+#include <xen/domain_page.h>
#include <asm/x86_emulate.h>
#include <asm/asm_defns.h> /* mark_regs_dirty() */
#include <asm/processor.h> /* current_cpu_info */
/* Avoid namespace pollution. */
#undef cmpxchg
#undef cpuid
+#undef wbinvd
#define cpu_has_amd_erratum(nr) \
cpu_has_amd_erratum(¤t_cpu_data, AMD_ERRATUM_##nr)
+#define get_stub(stb) ({ \
+ BUILD_BUG_ON(STUB_BUF_SIZE / 2 < MAX_INST_LEN + 1); \
+ (stb).addr = this_cpu(stubs.addr) + STUB_BUF_SIZE / 2; \
+ ((stb).ptr = map_domain_page(this_cpu(stubs.mfn))) + \
+ ((stb).addr & ~PAGE_MASK); \
+})
+#define put_stub(stb) ({ \
+ if ( (stb).ptr ) \
+ { \
+ unmap_domain_page((stb).ptr); \
+ (stb).ptr = NULL; \
+ } \
+})
+
#include "x86_emulate/x86_emulate.c"
} while (0)
#define emulate_fpu_insn_stub(_bytes...) \
-do{ uint8_t stub[] = { _bytes, 0xc3 }; \
- struct fpu_insn_ctxt fic = { .insn_bytes = sizeof(stub)-1 }; \
+do { \
+ uint8_t *buf = get_stub(stub); \
+ unsigned int _nr = sizeof((uint8_t[]){ _bytes }); \
+ struct fpu_insn_ctxt fic = { .insn_bytes = _nr }; \
+ memcpy(buf, ((uint8_t[]){ _bytes, 0xc3 }), _nr + 1); \
get_fpu(X86EMUL_FPU_fpu, &fic); \
- (*(void(*)(void))stub)(); \
+ stub.func(); \
put_fpu(&fic); \
+ put_stub(stub); \
} while (0)
static unsigned long _get_rep_prefix(
struct operand src = { .reg = REG_POISON };
struct operand dst = { .reg = REG_POISON };
enum x86_swint_type swint_type;
+ struct x86_emulate_stub stub = {};
DECLARE_ALIGNED(mmval_t, mmval);
/*
* Data operand effective address (usually computed from ModRM).
done:
_put_fpu();
+ put_stub(stub);
return rc;
twobyte_insn:
/* {,v}movss xmm,xmm/m32 */
/* {,v}movsd xmm,xmm/m64 */
{
- uint8_t stub[] = { 0x3e, 0x3e, 0x0f, b, modrm, 0xc3 };
- struct fpu_insn_ctxt fic = { .insn_bytes = sizeof(stub)-1 };
-
+ uint8_t *buf = get_stub(stub);
+ struct fpu_insn_ctxt fic = { .insn_bytes = 5 };
+
+ buf[0] = 0x3e;
+ buf[1] = 0x3e;
+ buf[2] = 0x0f;
+ buf[3] = b;
+ buf[4] = modrm;
+ buf[5] = 0xc3;
if ( vex.opcx == vex_none )
{
if ( vex.pfx & VEX_PREFIX_DOUBLE_MASK )
else
vcpu_must_have_sse();
ea.bytes = 16;
- SET_SSE_PREFIX(stub[0], vex.pfx);
+ SET_SSE_PREFIX(buf[0], vex.pfx);
get_fpu(X86EMUL_FPU_xmm, &fic);
}
else
/* convert memory operand to (%rAX) */
rex_prefix &= ~REX_B;
vex.b = 1;
- stub[4] &= 0x38;
+ buf[4] &= 0x38;
}
if ( !rc )
{
- copy_REX_VEX(stub, rex_prefix, vex);
- asm volatile ( "call *%0" : : "r" (stub), "a" (mmvalp)
+ copy_REX_VEX(buf, rex_prefix, vex);
+ asm volatile ( "call *%0" : : "r" (stub.func), "a" (mmvalp)
: "memory" );
}
put_fpu(&fic);
+ put_stub(stub);
if ( !rc && (b & 1) && (ea.type == OP_MEM) )
rc = ops->write(ea.mem.seg, ea.mem.off, mmvalp,
ea.bytes, ctxt);
/* {,v}movdq{a,u} xmm,xmm/m128 */
/* vmovdq{a,u} ymm,ymm/m256 */
{
- uint8_t stub[] = { 0x3e, 0x3e, 0x0f, b, modrm, 0xc3 };
- struct fpu_insn_ctxt fic = { .insn_bytes = sizeof(stub)-1 };
-
+ uint8_t *buf = get_stub(stub);
+ struct fpu_insn_ctxt fic = { .insn_bytes = 5 };
+
+ buf[0] = 0x3e;
+ buf[1] = 0x3e;
+ buf[2] = 0x0f;
+ buf[3] = b;
+ buf[4] = modrm;
+ buf[5] = 0xc3;
if ( vex.opcx == vex_none )
{
switch ( vex.pfx )
case vex_66:
case vex_f3:
vcpu_must_have_sse2();
- stub[0] = 0x66; /* movdqa */
+ buf[0] = 0x66; /* movdqa */
get_fpu(X86EMUL_FPU_xmm, &fic);
ea.bytes = 16;
break;
/* convert memory operand to (%rAX) */
rex_prefix &= ~REX_B;
vex.b = 1;
- stub[4] &= 0x38;
+ buf[4] &= 0x38;
}
if ( !rc )
{
- copy_REX_VEX(stub, rex_prefix, vex);
- asm volatile ( "call *%0" : : "r" (stub), "a" (mmvalp)
+ copy_REX_VEX(buf, rex_prefix, vex);
+ asm volatile ( "call *%0" : : "r" (stub.func), "a" (mmvalp)
: "memory" );
}
put_fpu(&fic);
+ put_stub(stub);
if ( !rc && (b != 0x6f) && (ea.type == OP_MEM) )
rc = ops->write(ea.mem.seg, ea.mem.off, mmvalp,
ea.bytes, ctxt);
cannot_emulate:
_put_fpu();
+ put_stub(stub);
return X86EMUL_UNHANDLEABLE;
}