ctxt->cr3_phys[vcpu] = vcpu_ctxt->ctrlreg[3];
if ( ctxt->cr3_virt[vcpu] )
munmap(ctxt->cr3_virt[vcpu], PAGE_SIZE);
- ctxt->cr3_virt[vcpu] = xc_map_foreign_range(xc_handle, ctxt->domid,
- PAGE_SIZE, PROT_READ, ctxt->cr3_phys[vcpu] >> PAGE_SHIFT);
+ ctxt->cr3_virt[vcpu] = xc_map_foreign_range(
+ xc_handle, ctxt->domid, PAGE_SIZE, PROT_READ,
+ xen_cr3_to_pfn(ctxt->cr3_phys[vcpu]));
if ( ctxt->cr3_virt[vcpu] == NULL )
return 0;
}
alloc_pt(l2tab, vl2tab, pl2tab);
vl2e = &vl2tab[l2_table_offset(dsi_v_start)];
if (shadow_mode_enabled)
- ctxt->ctrlreg[3] = pl2tab;
+ ctxt->ctrlreg[3] = xen_pfn_to_cr3(pl2tab >> PAGE_SHIFT);
else
- ctxt->ctrlreg[3] = l2tab;
+ ctxt->ctrlreg[3] = xen_pfn_to_cr3(l2tab >> PAGE_SHIFT);
for ( count = 0; count < ((v_end - dsi_v_start) >> PAGE_SHIFT); count++ )
{
alloc_pt(l3tab, vl3tab, pl3tab);
vl3e = &vl3tab[l3_table_offset_pae(dsi_v_start)];
if (shadow_mode_enabled)
- ctxt->ctrlreg[3] = pl3tab;
+ ctxt->ctrlreg[3] = xen_pfn_to_cr3(pl3tab >> PAGE_SHIFT);
else
- ctxt->ctrlreg[3] = l3tab;
+ ctxt->ctrlreg[3] = xen_pfn_to_cr3(l3tab >> PAGE_SHIFT);
for ( count = 0; count < ((v_end - dsi_v_start) >> PAGE_SHIFT); count++)
{
alloc_pt(l4tab, vl4tab, pl4tab);
vl4e = &vl4tab[l4_table_offset(dsi_v_start)];
if (shadow_mode_enabled)
- ctxt->ctrlreg[3] = pl4tab;
+ ctxt->ctrlreg[3] = xen_pfn_to_cr3(pl4tab >> PAGE_SHIFT);
else
- ctxt->ctrlreg[3] = l4tab;
+ ctxt->ctrlreg[3] = xen_pfn_to_cr3(l4tab >> PAGE_SHIFT);
for ( count = 0; count < ((v_end-dsi_v_start)>>PAGE_SHIFT); count++)
{
if ( dsi.pae_kernel )
{
if ( pin_table(xc_handle, MMUEXT_PIN_L3_TABLE,
- ctxt->ctrlreg[3] >> PAGE_SHIFT, dom) )
+ xen_cr3_to_pfn(ctxt->ctrlreg[3]), dom) )
goto error_out;
}
else
{
if ( pin_table(xc_handle, MMUEXT_PIN_L2_TABLE,
- ctxt->ctrlreg[3] >> PAGE_SHIFT, dom) )
+ xen_cr3_to_pfn(ctxt->ctrlreg[3]), dom) )
goto error_out;
}
}
* correct protection for the page
*/
if ( pin_table(xc_handle, MMUEXT_PIN_L4_TABLE,
- ctxt->ctrlreg[3] >> PAGE_SHIFT, dom) )
+ xen_cr3_to_pfn(ctxt->ctrlreg[3]), dom) )
goto error_out;
#endif
}
/* Uncanonicalise the page table base pointer. */
- pfn = ctxt.ctrlreg[3] >> PAGE_SHIFT;
+ pfn = xen_cr3_to_pfn(ctxt.ctrlreg[3]);
if (pfn >= max_pfn) {
ERR("PT base is bad: pfn=%lu max_pfn=%lu type=%08lx",
goto out;
}
- ctxt.ctrlreg[3] = p2m[pfn] << PAGE_SHIFT;
+ ctxt.ctrlreg[3] = xen_pfn_to_cr3(p2m[pfn]);
/* clear any pending events and the selector */
memset(&(shared_info->evtchn_pending[0]), 0,
}
/* Canonicalise the page table base pointer. */
- if ( !MFN_IS_IN_PSEUDOPHYS_MAP(ctxt.ctrlreg[3] >> PAGE_SHIFT) ) {
+ if ( !MFN_IS_IN_PSEUDOPHYS_MAP(xen_cr3_to_pfn(ctxt.ctrlreg[3])) ) {
ERR("PT base is not in range of pseudophys map");
goto out;
}
- ctxt.ctrlreg[3] = mfn_to_pfn(ctxt.ctrlreg[3] >> PAGE_SHIFT) <<
- PAGE_SHIFT;
+ ctxt.ctrlreg[3] =
+ xen_pfn_to_cr3(mfn_to_pfn(xen_cr3_to_pfn(ctxt.ctrlreg[3])));
if (!write_exact(io_fd, &ctxt, sizeof(ctxt)) ||
!write_exact(io_fd, live_shinfo, PAGE_SIZE)) {
fprintf(stderr, "failed to retreive vcpu context\n");
goto out;
}
- cr3 = ctx.ctrlreg[3];
+ cr3 = ((unsigned long long)xen_cr3_to_pfn(ctx.ctrlreg[3])) << PAGE_SHIFT;
/* Page Map Level 4 */
static void *v[MAX_VIRT_CPUS];
l2 = xc_map_foreign_range(
- xc_handle, current_domid, PAGE_SIZE, PROT_READ, ctxt[cpu].ctrlreg[3] >> PAGE_SHIFT);
+ xc_handle, current_domid, PAGE_SIZE, PROT_READ,
+ xen_cr3_to_pfn(ctxt[cpu].ctrlreg[3]));
if ( l2 == NULL )
return NULL;
static void *v[MAX_VIRT_CPUS];
l3 = xc_map_foreign_range(
- xc_handle, current_domid, PAGE_SIZE, PROT_READ, ctxt[cpu].ctrlreg[3] >> PAGE_SHIFT);
+ xc_handle, current_domid, PAGE_SIZE, PROT_READ,
+ xen_cr3_to_pfn(ctxt[cpu].ctrlreg[3]));
if ( l3 == NULL )
return NULL;
if ((ctxt[cpu].ctrlreg[4] & 0x20) == 0 ) /* legacy ia32 mode */
return map_domain_va_32(xc_handle, cpu, guest_va, perm);
- l4 = xc_map_foreign_range( xc_handle, current_domid, PAGE_SIZE,
- PROT_READ, ctxt[cpu].ctrlreg[3] >> PAGE_SHIFT);
+ l4 = xc_map_foreign_range(
+ xc_handle, current_domid, PAGE_SIZE, PROT_READ,
+ xen_cr3_to_pfn(ctxt[cpu].ctrlreg[3]));
if ( l4 == NULL )
return NULL;
static long nr_pages = 0;
static unsigned long *p2m_array = NULL;
static unsigned long *m2p_array = NULL;
-static unsigned long pages_offset;
-static unsigned long cr3[MAX_VIRT_CPUS];
+static unsigned long pages_offset;
+static unsigned long cr3[MAX_VIRT_CPUS];
/* --------------------- */
munmap(cr3_virt[cpu], PAGE_SIZE);
v = mmap(
NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd,
- map_mtop_offset(cr3_phys[cpu]));
+ map_mtop_offset(xen_cr3_to_pfn(cr3_phys[cpu])));
if (v == MAP_FAILED)
{
perror("mmap failed");
sizeof(vcpu_guest_context_t)*nr_vcpus)
return -1;
- for (i = 0; i < nr_vcpus; i++) {
+ for (i = 0; i < nr_vcpus; i++)
cr3[i] = ctxt[i].ctrlreg[3];
- }
+
if ((p2m_array = malloc(nr_pages * sizeof(unsigned long))) == NULL)
{
printf("Could not allocate p2m_array\n");
return -1;
}
+
if (read(domfd, p2m_array, sizeof(unsigned long)*nr_pages) !=
sizeof(unsigned long)*nr_pages)
return -1;
}
bzero(m2p_array, sizeof(unsigned long)* 1 << 20);
- for (i = 0; i < nr_pages; i++) {
+ for (i = 0; i < nr_pages; i++)
m2p_array[p2m_array[i]] = i;
- }
-
}
return 0;
}