From: kaf24@firebug.cl.cam.ac.uk Date: Mon, 5 Jun 2006 14:18:13 +0000 (+0100) Subject: [VMXASSIST] Extensions to vmxassist to handle additional mode switch cases. X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~15972^2~49^2~9 X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=3e19af6e66e439c5297a8c885f256674c17aff50;p=xen.git [VMXASSIST] Extensions to vmxassist to handle additional mode switch cases. Extends vmxassist to handle two cases related to mode switching found while experimenting with different boot loaders. The first case is use of the JMP instruction with memory location operands to complete a switch to protected mode. This patch adds emulation for this form of the JMP instruction to vmxassist. The second case is where boot loader code does not save/restore a non-zero SS register across a protected mode traversal. Zeroing the SS register in vmxassist results in all sorts of problems in the domU after returning back to real mode. This patch stores segment register values before entering protected mode and correctly restores the old values (instead of an incorrect zero value) when reentering real mode. Signed-off-by: Kevin Tronkowski --- diff --git a/tools/firmware/vmxassist/vm86.c b/tools/firmware/vmxassist/vm86.c index e18efc1c64..edb7dd685b 100644 --- a/tools/firmware/vmxassist/vm86.c +++ b/tools/firmware/vmxassist/vm86.c @@ -37,6 +37,8 @@ static unsigned prev_eip = 0; enum vm86_mode mode = 0; +static struct regs saved_rm_regs; + #ifdef DEBUG int traceset = 0; @@ -795,6 +797,8 @@ protected_mode(struct regs *regs) oldctx.esp = regs->uesp; oldctx.eflags = regs->eflags; + memset(&saved_rm_regs, 0, sizeof(struct regs)); + /* reload all segment registers */ if (!load_seg(regs->cs, &oldctx.cs_base, &oldctx.cs_limit, &oldctx.cs_arbytes)) @@ -808,6 +812,7 @@ protected_mode(struct regs *regs) load_seg(0, &oldctx.es_base, &oldctx.es_limit, &oldctx.es_arbytes); oldctx.es_sel = 0; + saved_rm_regs.ves = regs->ves; } if (load_seg(regs->uss, &oldctx.ss_base, @@ -817,6 +822,7 @@ protected_mode(struct regs *regs) load_seg(0, &oldctx.ss_base, &oldctx.ss_limit, &oldctx.ss_arbytes); oldctx.ss_sel = 0; + saved_rm_regs.uss = regs->uss; } if (load_seg(regs->vds, &oldctx.ds_base, @@ -826,6 +832,7 @@ protected_mode(struct regs *regs) load_seg(0, &oldctx.ds_base, &oldctx.ds_limit, &oldctx.ds_arbytes); oldctx.ds_sel = 0; + saved_rm_regs.vds = regs->vds; } if (load_seg(regs->vfs, &oldctx.fs_base, @@ -835,6 +842,7 @@ protected_mode(struct regs *regs) load_seg(0, &oldctx.fs_base, &oldctx.fs_limit, &oldctx.fs_arbytes); oldctx.fs_sel = 0; + saved_rm_regs.vfs = regs->vfs; } if (load_seg(regs->vgs, &oldctx.gs_base, @@ -844,6 +852,7 @@ protected_mode(struct regs *regs) load_seg(0, &oldctx.gs_base, &oldctx.gs_limit, &oldctx.gs_arbytes); oldctx.gs_sel = 0; + saved_rm_regs.vgs = regs->vgs; } /* initialize jump environment to warp back to protected mode */ @@ -880,16 +889,22 @@ real_mode(struct regs *regs) if (regs->uss >= HIGHMEM) panic("%%ss 0x%lx higher than 1MB", regs->uss); regs->uss = address(regs, regs->uss, 0) >> 4; + } else { + regs->uss = saved_rm_regs.uss; } if (regs->vds != 0) { if (regs->vds >= HIGHMEM) panic("%%ds 0x%lx higher than 1MB", regs->vds); regs->vds = address(regs, regs->vds, 0) >> 4; + } else { + regs->vds = saved_rm_regs.vds; } if (regs->ves != 0) { if (regs->ves >= HIGHMEM) panic("%%es 0x%lx higher than 1MB", regs->ves); regs->ves = address(regs, regs->ves, 0) >> 4; + } else { + regs->ves = saved_rm_regs.ves; } /* this should get us into 16-bit mode */ @@ -981,6 +996,39 @@ jmpl(struct regs *regs, int prefix) panic("jmpl"); } +static void +jmpl_indirect(struct regs *regs, int prefix, unsigned modrm) +{ + unsigned n = regs->eip; + unsigned cs, eip; + unsigned addr; + + addr = operand(prefix, regs, modrm); + + if (mode == VM86_REAL_TO_PROTECTED) { /* jump to protected mode */ + eip = (prefix & DATA32) ? read32(addr) : read16(addr); + addr += (prefix & DATA32) ? 4 : 2; + cs = read16(addr); + + TRACE((regs, (regs->eip - n) + 1, "jmpl 0x%x:0x%x", cs, eip)); + + regs->cs = cs; + regs->eip = eip; + set_mode(regs, VM86_PROTECTED); + } else if (mode == VM86_PROTECTED_TO_REAL) { /* jump to real mode */ + eip = (prefix & DATA32) ? read32(addr) : read16(addr); + addr += (prefix & DATA32) ? 4 : 2; + cs = read16(addr); + + TRACE((regs, (regs->eip - n) + 1, "jmpl 0x%x:0x%x", cs, eip)); + + regs->cs = cs; + regs->eip = eip; + set_mode(regs, VM86_REAL); + } else + panic("jmpl"); +} + static void retl(struct regs *regs, int prefix) { @@ -1306,6 +1354,23 @@ opcode(struct regs *regs) } goto invalid; + case 0xFF: /* jmpl (indirect) */ + if ((mode == VM86_REAL_TO_PROTECTED) || + (mode == VM86_PROTECTED_TO_REAL)) { + unsigned modrm = fetch8(regs); + + switch((modrm >> 3) & 7) { + case 5: + jmpl_indirect(regs, prefix, modrm); + return OPC_INVALID; + + default: + break; + } + + } + goto invalid; + case 0xEB: /* short jump */ if ((mode == VM86_REAL_TO_PROTECTED) || (mode == VM86_PROTECTED_TO_REAL)) {