static unsigned prev_eip = 0;
enum vm86_mode mode = 0;
+static struct regs saved_rm_regs;
+
#ifdef DEBUG
int traceset = 0;
oldctx.esp = regs->uesp;
oldctx.eflags = regs->eflags;
+ memset(&saved_rm_regs, 0, sizeof(struct regs));
+
/* reload all segment registers */
if (!load_seg(regs->cs, &oldctx.cs_base,
&oldctx.cs_limit, &oldctx.cs_arbytes))
load_seg(0, &oldctx.es_base,
&oldctx.es_limit, &oldctx.es_arbytes);
oldctx.es_sel = 0;
+ saved_rm_regs.ves = regs->ves;
}
if (load_seg(regs->uss, &oldctx.ss_base,
load_seg(0, &oldctx.ss_base,
&oldctx.ss_limit, &oldctx.ss_arbytes);
oldctx.ss_sel = 0;
+ saved_rm_regs.uss = regs->uss;
}
if (load_seg(regs->vds, &oldctx.ds_base,
load_seg(0, &oldctx.ds_base,
&oldctx.ds_limit, &oldctx.ds_arbytes);
oldctx.ds_sel = 0;
+ saved_rm_regs.vds = regs->vds;
}
if (load_seg(regs->vfs, &oldctx.fs_base,
load_seg(0, &oldctx.fs_base,
&oldctx.fs_limit, &oldctx.fs_arbytes);
oldctx.fs_sel = 0;
+ saved_rm_regs.vfs = regs->vfs;
}
if (load_seg(regs->vgs, &oldctx.gs_base,
load_seg(0, &oldctx.gs_base,
&oldctx.gs_limit, &oldctx.gs_arbytes);
oldctx.gs_sel = 0;
+ saved_rm_regs.vgs = regs->vgs;
}
/* initialize jump environment to warp back to protected mode */
if (regs->uss >= HIGHMEM)
panic("%%ss 0x%lx higher than 1MB", regs->uss);
regs->uss = address(regs, regs->uss, 0) >> 4;
+ } else {
+ regs->uss = saved_rm_regs.uss;
}
if (regs->vds != 0) {
if (regs->vds >= HIGHMEM)
panic("%%ds 0x%lx higher than 1MB", regs->vds);
regs->vds = address(regs, regs->vds, 0) >> 4;
+ } else {
+ regs->vds = saved_rm_regs.vds;
}
if (regs->ves != 0) {
if (regs->ves >= HIGHMEM)
panic("%%es 0x%lx higher than 1MB", regs->ves);
regs->ves = address(regs, regs->ves, 0) >> 4;
+ } else {
+ regs->ves = saved_rm_regs.ves;
}
/* this should get us into 16-bit mode */
panic("jmpl");
}
+static void
+jmpl_indirect(struct regs *regs, int prefix, unsigned modrm)
+{
+ unsigned n = regs->eip;
+ unsigned cs, eip;
+ unsigned addr;
+
+ addr = operand(prefix, regs, modrm);
+
+ if (mode == VM86_REAL_TO_PROTECTED) { /* jump to protected mode */
+ eip = (prefix & DATA32) ? read32(addr) : read16(addr);
+ addr += (prefix & DATA32) ? 4 : 2;
+ cs = read16(addr);
+
+ TRACE((regs, (regs->eip - n) + 1, "jmpl 0x%x:0x%x", cs, eip));
+
+ regs->cs = cs;
+ regs->eip = eip;
+ set_mode(regs, VM86_PROTECTED);
+ } else if (mode == VM86_PROTECTED_TO_REAL) { /* jump to real mode */
+ eip = (prefix & DATA32) ? read32(addr) : read16(addr);
+ addr += (prefix & DATA32) ? 4 : 2;
+ cs = read16(addr);
+
+ TRACE((regs, (regs->eip - n) + 1, "jmpl 0x%x:0x%x", cs, eip));
+
+ regs->cs = cs;
+ regs->eip = eip;
+ set_mode(regs, VM86_REAL);
+ } else
+ panic("jmpl");
+}
+
static void
retl(struct regs *regs, int prefix)
{
}
goto invalid;
+ case 0xFF: /* jmpl (indirect) */
+ if ((mode == VM86_REAL_TO_PROTECTED) ||
+ (mode == VM86_PROTECTED_TO_REAL)) {
+ unsigned modrm = fetch8(regs);
+
+ switch((modrm >> 3) & 7) {
+ case 5:
+ jmpl_indirect(regs, prefix, modrm);
+ return OPC_INVALID;
+
+ default:
+ break;
+ }
+
+ }
+ goto invalid;
+
case 0xEB: /* short jump */
if ((mode == VM86_REAL_TO_PROTECTED) ||
(mode == VM86_PROTECTED_TO_REAL)) {