}
#if defined(__i386__) || defined(__x86_64__)
- if (xc_get_pfn_list(xc_handle, domid, page_array, nr_pages) != nr_pages) {
+ for ( i = 0; i < tmp_nr_pages; i++)
+ page_array[i] = i;
+ if (xc_domain_translate_gpfn_list(xc_handle, domid, tmp_nr_pages,
+ page_array, page_array)) {
fprintf(logfile, "xc_get_pfn_list returned error %d\n", errno);
exit(-1);
}
- if (ram_size > HVM_BELOW_4G_RAM_END)
- for (i = 0; i < nr_pages - (HVM_BELOW_4G_RAM_END >> PAGE_SHIFT); i++)
- page_array[tmp_nr_pages - 1 - i] = page_array[nr_pages - 1 - i];
-
phys_ram_base = xc_map_foreign_batch(xc_handle, domid,
PROT_READ|PROT_WRITE, page_array,
tmp_nr_pages);
static int setup_guest(int xc_handle,
uint32_t dom, int memsize,
char *image, unsigned long image_size,
- unsigned long nr_pages,
vcpu_guest_context_t *ctxt,
unsigned long shared_info_frame,
unsigned int vcpus,
unsigned long *store_mfn)
{
xen_pfn_t *page_array = NULL;
- unsigned long count, i;
- unsigned long long ptr;
- xc_mmu_t *mmu = NULL;
-
+ unsigned long i, nr_pages = (unsigned long)memsize << (20 - PAGE_SHIFT);
+ unsigned long shared_page_nr;
shared_info_t *shared_info;
void *e820_page;
-
struct domain_setup_info dsi;
uint64_t v_end;
- unsigned long shared_page_nr;
-
memset(&dsi, 0, sizeof(struct domain_setup_info));
if ( (parseelfimage(image, image_size, &dsi)) != 0 )
goto error_out;
}
- /* memsize is in megabytes */
v_end = (unsigned long long)memsize << 20;
IPRINTF("VIRTUAL MEMORY ARRANGEMENT:\n"
goto error_out;
}
- if ( xc_get_pfn_list(xc_handle, dom, page_array, nr_pages) != nr_pages )
+ for ( i = 0; i < nr_pages; i++ )
+ page_array[i] = i;
+ for ( i = HVM_BELOW_4G_RAM_END >> PAGE_SHIFT; i < nr_pages; i++ )
+ page_array[i] += HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
+
+ if ( xc_domain_memory_populate_physmap(xc_handle, dom, nr_pages,
+ 0, 0, page_array) )
{
- PERROR("Could not get the page frame list.\n");
+ PERROR("Could not allocate memory for HVM guest.\n");
goto error_out;
}
- /* HVM domains must be put into shadow mode at the start of day. */
- /* XXX *After* xc_get_pfn_list()!! */
- if ( xc_shadow_control(xc_handle, dom, XEN_DOMCTL_SHADOW_OP_ENABLE,
- NULL, 0, NULL,
- XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT |
- XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE |
- XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL,
- NULL) )
+ if ( xc_domain_translate_gpfn_list(xc_handle, dom, nr_pages,
+ page_array, page_array) )
{
- PERROR("Could not enable shadow paging for domain.\n");
+ PERROR("Could not translate addresses of HVM guest.\n");
goto error_out;
- }
+ }
loadelfimage(image, xc_handle, dom, page_array, &dsi);
- if ( (mmu = xc_init_mmu_updates(xc_handle, dom)) == NULL )
- goto error_out;
-
- /* Write the machine->phys table entries. */
- for ( count = 0; count < nr_pages; count++ )
- {
- unsigned long gpfn_count_skip;
-
- ptr = (unsigned long long)page_array[count] << PAGE_SHIFT;
-
- gpfn_count_skip = 0;
-
- /*
- * physical address space from HVM_BELOW_4G_RAM_END to 4G is reserved
- * for PCI devices MMIO. So if HVM has more than HVM_BELOW_4G_RAM_END
- * RAM, memory beyond HVM_BELOW_4G_RAM_END will go to 4G above.
- */
- if ( count >= (HVM_BELOW_4G_RAM_END >> PAGE_SHIFT) )
- gpfn_count_skip = HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
-
- if ( xc_add_mmu_update(xc_handle, mmu,
- ptr | MMU_MACHPHYS_UPDATE,
- count + gpfn_count_skip) )
- goto error_out;
- }
-
if ( set_hvm_info(xc_handle, dom, page_array, vcpus, acpi) )
{
ERROR("Couldn't set hvm info for HVM guest.\n");
if ( xc_clear_domain_page(xc_handle, dom, *store_mfn) )
goto error_out;
- /* Send the page update requests down to the hypervisor. */
- if ( xc_finish_mmu_updates(xc_handle, mmu) )
- goto error_out;
-
- free(mmu);
free(page_array);
- /*
- * Initial register values:
- */
ctxt->user_regs.eip = dsi.v_kernentry;
return 0;
error_out:
- free(mmu);
free(page_array);
return -1;
}
struct xen_domctl launch_domctl, domctl;
int rc, i;
vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
- unsigned long nr_pages;
- xen_capabilities_info_t xen_caps;
if ( (image == NULL) || (image_size == 0) )
{
goto error_out;
}
- if ( (rc = xc_version(xc_handle, XENVER_capabilities, &xen_caps)) != 0 )
- {
- PERROR("Failed to get xen version info");
- goto error_out;
- }
-
- if ( !strstr(xen_caps, "hvm") )
- {
- PERROR("CPU doesn't support HVM extensions or "
- "the extensions are not enabled");
- goto error_out;
- }
-
- if ( (nr_pages = xc_get_tot_pages(xc_handle, domid)) < 0 )
- {
- PERROR("Could not find total pages for domain");
- goto error_out;
- }
-
if ( lock_pages(&st_ctxt, sizeof(st_ctxt) ) )
{
PERROR("%s: ctxt mlock failed", __func__);
goto error_out;
}
-#if 0
- /* HVM domains must be put into shadow mode at the start of day */
- if ( xc_shadow_control(xc_handle, domid, XEN_DOMCTL_SHADOW_OP_ENABLE,
- NULL, 0, NULL,
- XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT |
- XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE |
- XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL,
- NULL) )
- {
- PERROR("Could not enable shadow paging for domain.\n");
- goto error_out;
- }
-#endif
-
memset(ctxt, 0, sizeof(*ctxt));
-
ctxt->flags = VGCF_HVM_GUEST;
- if ( setup_guest(xc_handle, domid, memsize, image, image_size, nr_pages,
+
+ if ( setup_guest(xc_handle, domid, memsize, image, image_size,
ctxt, domctl.u.getdomaininfo.shared_info_frame,
vcpus, pae, acpi, apic, store_evtchn, store_mfn) < 0)
{
shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
self.info['shadow_memory'] = shadow_cur
- # initial memory reservation
- xc.domain_memory_increase_reservation(self.domid, reservation, 0,
- 0)
+ # Initial memory reservation
+ if not (self._infoIsSet('image') and
+ sxp.name(self.info['image']) == "hvm"):
+ xc.domain_memory_increase_reservation(
+ self.domid, reservation, 0, 0)
self._createChannels()
def getRequiredAvailableMemory(self, mem_kb):
# Add 8 MiB overhead for QEMU's video RAM.
- return self.getRequiredInitialReservation(mem_kb) + 8192
+ return mem_kb + 8192
def getRequiredInitialReservation(self, mem_kb):
- page_kb = 4
- # This was derived emperically:
- # 2.4 MB overhead per 1024 MB RAM
- # + 4 to avoid low-memory condition
- extra_mb = (2.4/1024) * (mem_kb/1024.0) + 4;
- extra_pages = int( math.ceil( extra_mb*1024 / page_kb ))
- return mem_kb + extra_pages * page_kb
+ return mem_kb
def getRequiredShadowMemory(self, shadow_mem_kb, maxmem_kb):
- # The given value is the configured value -- we need to include the
- # overhead due to getRequiredInitialReservation.
- maxmem_kb = self.getRequiredInitialReservation(maxmem_kb)
-
# 256 pages (1MB) per vcpu,
# plus 1 page per MiB of RAM for the P2M map,
# plus 1 page per MiB of RAM to shadow the resident processes.
{
l1_pgentry_t gdt_l1e;
int vcpuid, pdpt_order;
- int i;
-
- if ( is_hvm_domain(d) && !hvm_enabled )
- {
- gdprintk(XENLOG_WARNING, "Attempt to create a HVM guest "
- "on a non-VT/AMDV platform.\n");
- return -EINVAL;
- }
+ int i, rc = -ENOMEM;
pdpt_order = get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t));
d->arch.mm_perdomain_pt = alloc_xenheap_pages(pdpt_order);
if ( d->arch.mm_perdomain_pt == NULL )
- goto fail_nomem;
+ goto fail;
memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE << pdpt_order);
/*
d->arch.mm_perdomain_l3 = alloc_xenheap_page();
if ( (d->arch.mm_perdomain_l2 == NULL) ||
(d->arch.mm_perdomain_l3 == NULL) )
- goto fail_nomem;
+ goto fail;
memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE);
for ( i = 0; i < (1 << pdpt_order); i++ )
d->arch.ioport_caps =
rangeset_new(d, "I/O Ports", RANGESETF_prettyprint_hex);
if ( d->arch.ioport_caps == NULL )
- goto fail_nomem;
+ goto fail;
if ( (d->shared_info = alloc_xenheap_page()) == NULL )
- goto fail_nomem;
+ goto fail;
memset(d->shared_info, 0, PAGE_SIZE);
share_xen_page_with_guest(
virt_to_page(d->shared_info), d, XENSHARE_writable);
}
+ if ( is_hvm_domain(d) )
+ {
+ if ( !hvm_enabled )
+ {
+ gdprintk(XENLOG_WARNING, "Attempt to create a HVM guest "
+ "on a non-VT/AMDV platform.\n");
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ rc = shadow_enable(d, SHM2_refcounts|SHM2_translate|SHM2_external);
+ if ( rc != 0 )
+ goto fail;
+ }
+
return 0;
- fail_nomem:
+ fail:
free_xenheap_page(d->shared_info);
#ifdef __x86_64__
free_xenheap_page(d->arch.mm_perdomain_l2);
free_xenheap_page(d->arch.mm_perdomain_l3);
#endif
free_xenheap_pages(d->arch.mm_perdomain_pt, pdpt_order);
- return -ENOMEM;
+ return rc;
}
void arch_domain_destroy(struct domain *d)
spin_lock(&d->page_alloc_lock);
- if ( is_hvm_domain(d) && shadow_mode_translate(d) )
+ list_ent = d->page_list.next;
+ for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
{
- /* HVM domain: scan P2M to get guaranteed physmap order. */
- for ( i = 0, gmfn = 0;
- (i < max_pfns) && (i < d->tot_pages);
- i++, gmfn++ )
+ mfn = page_to_mfn(list_entry(
+ list_ent, struct page_info, list));
+ if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
+ i, &mfn, 1) )
{
- if ( unlikely(i == (HVM_BELOW_4G_MMIO_START>>PAGE_SHIFT)) )
- {
- /* skip MMIO range */
- gmfn += HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
- }
- mfn = gmfn_to_mfn(d, gmfn);
- if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
- i, &mfn, 1) )
- {
- ret = -EFAULT;
- break;
- }
- }
- }
- else
- {
- /* Other guests: return in order of ownership list. */
- list_ent = d->page_list.next;
- for ( i = 0;
- (i < max_pfns) && (list_ent != &d->page_list);
- i++ )
- {
- mfn = page_to_mfn(list_entry(
- list_ent, struct page_info, list));
- if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
- i, &mfn, 1) )
- {
- ret = -EFAULT;
- break;
- }
- list_ent = mfn_to_page(mfn)->list.next;
+ ret = -EFAULT;
+ break;
}
+ list_ent = mfn_to_page(mfn)->list.next;
}
spin_unlock(&d->page_alloc_lock);
sh_update_paging_modes(v);
}
-static int shadow_enable(struct domain *d, u32 mode)
+int shadow_enable(struct domain *d, u32 mode)
/* Turn on "permanent" shadow features: external, translate, refcount.
* Can only be called once on a domain, and these features cannot be
* disabled.
if ( shadow_mode_log_dirty(d) )
if ( (rc = shadow_log_dirty_disable(d)) != 0 )
return rc;
+ if ( is_hvm_domain(d) )
+ return -EINVAL;
if ( d->arch.shadow.mode & SHM2_enable )
if ( (rc = shadow_test_disable(d)) != 0 )
return rc;
/**************************************************************************/
/* Entry points into the shadow code */
+/* Enable arbitrary shadow mode. */
+int shadow_enable(struct domain *d, u32 mode);
+
/* Turning on shadow test mode */
int shadow_test_enable(struct domain *d);