hvmemul_ctxt->insn_buf_eip = regs->eip;
hvmemul_ctxt->insn_buf_bytes =
+ hvm_get_insn_bytes(curr, hvmemul_ctxt->insn_buf)
+ ? :
(hvm_virtual_to_linear_addr(
x86_seg_cs, &hvmemul_ctxt->seg_reg[x86_seg_cs],
regs->eip, sizeof(hvmemul_ctxt->insn_buf),
vmcb_set_general1_intercepts(vmcb, general1_intercepts);
}
+static unsigned int svm_get_insn_bytes(struct vcpu *v, uint8_t *buf)
+{
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ unsigned int len = v->arch.hvm_svm.cached_insn_len;
+
+ if ( len != 0 )
+ {
+ /* Latch and clear the cached instruction. */
+ memcpy(buf, vmcb->guest_ins, 15);
+ v->arch.hvm_svm.cached_insn_len = 0;
+ }
+
+ return len;
+}
+
static void svm_init_hypercall_page(struct domain *d, void *hypercall_page)
{
char *p;
.msr_write_intercept = svm_msr_write_intercept,
.invlpg_intercept = svm_invlpg_intercept,
.set_rdtsc_exiting = svm_set_rdtsc_exiting,
+ .get_insn_bytes = svm_get_insn_bytes,
.nhvm_vcpu_initialise = nsvm_vcpu_initialise,
.nhvm_vcpu_destroy = nsvm_vcpu_destroy,
(unsigned long)regs->ecx, (unsigned long)regs->edx,
(unsigned long)regs->esi, (unsigned long)regs->edi);
- if ( paging_fault(va, regs) )
+ if ( cpu_has_svm_decode )
+ v->arch.hvm_svm.cached_insn_len = vmcb->guest_ins_len & 0xf;
+ rc = paging_fault(va, regs);
+ v->arch.hvm_svm.cached_insn_len = 0;
+
+ if ( rc )
{
if ( trace_will_trace_event(TRC_SHADOW) )
break;
case VMEXIT_NPF:
perfc_incra(svmexits, VMEXIT_NPF_PERFC);
regs->error_code = vmcb->exitinfo1;
+ if ( cpu_has_svm_decode )
+ v->arch.hvm_svm.cached_insn_len = vmcb->guest_ins_len & 0xf;
svm_do_nested_pgfault(v, regs, vmcb->exitinfo2);
+ v->arch.hvm_svm.cached_insn_len = 0;
break;
case VMEXIT_IRET: {
int (*cpu_up)(void);
void (*cpu_down)(void);
+ /* Copy up to 15 bytes from cached instruction bytes at current rIP. */
+ unsigned int (*get_insn_bytes)(struct vcpu *v, uint8_t *buf);
+
/* Instruction intercepts: non-void return values are X86EMUL codes. */
void (*cpuid_intercept)(
unsigned int *eax, unsigned int *ebx,
hvm_funcs.cpu_down();
}
+static inline unsigned int hvm_get_insn_bytes(struct vcpu *v, uint8_t *buf)
+{
+ return (hvm_funcs.get_insn_bytes ? hvm_funcs.get_insn_bytes(v, buf) : 0);
+}
+
enum hvm_task_switch_reason { TSW_jmp, TSW_iret, TSW_call_or_int };
void hvm_task_switch(
uint16_t tss_sel, enum hvm_task_switch_reason taskswitch_reason,
int launch_core;
bool_t vmcb_in_sync; /* VMCB sync'ed with VMSAVE? */
+ /* VMCB has a cached instruction from #PF/#NPF Decode Assist? */
+ uint8_t cached_insn_len; /* Zero if no cached instruction. */
+
/* Upper four bytes are undefined in the VMCB, therefore we can't
* use the fields in the VMCB. Write a 64bit value and then read a 64bit
* value is fine unless there's a VMRUN/VMEXIT in between which clears