return rc;
}
- if ( dom->superpages )
+ /* try to claim pages for early warning of insufficient memory avail */
+ if ( dom->claim_enabled )
{
- int count = dom->total_pages >> SUPERPAGE_2MB_SHIFT;
- xen_pfn_t extents[count];
-
- dom->p2m_size = dom->total_pages;
- dom->p2m_host = xc_dom_malloc(dom, sizeof(xen_pfn_t) *
- dom->p2m_size);
- if ( dom->p2m_host == NULL )
- return -EINVAL;
-
- DOMPRINTF("Populating memory with %d superpages", count);
- for ( pfn = 0; pfn < count; pfn++ )
- extents[pfn] = pfn << SUPERPAGE_2MB_SHIFT;
- rc = xc_domain_populate_physmap_exact(dom->xch, dom->guest_domid,
- count, SUPERPAGE_2MB_SHIFT, 0,
- extents);
+ rc = xc_domain_claim_pages(dom->xch, dom->guest_domid,
+ dom->total_pages);
if ( rc )
return rc;
+ }
- /* Expand the returned mfn into the p2m array */
- pfn = 0;
- for ( i = 0; i < count; i++ )
- {
- mfn = extents[i];
- for ( j = 0; j < SUPERPAGE_2MB_NR_PFNS; j++, pfn++ )
- dom->p2m_host[pfn] = mfn + j;
- }
+ /* Setup dummy vNUMA information if it's not provided. Note
+ * that this is a valid state if libxl doesn't provide any
+ * vNUMA information.
+ *
+ * The dummy values make libxc allocate all pages from
+ * arbitrary physical nodes. This is the expected behaviour if
+ * no vNUMA configuration is provided to libxc.
+ *
+ * Note that the following hunk is just for the convenience of
+ * allocation code. No defaulting happens in libxc.
+ */
+ if ( dom->nr_vmemranges == 0 )
+ {
+ nr_vmemranges = 1;
+ vmemranges = dummy_vmemrange;
+ vmemranges[0].start = 0;
+ vmemranges[0].end = (uint64_t)dom->total_pages << PAGE_SHIFT;
+ vmemranges[0].flags = 0;
+ vmemranges[0].nid = 0;
+
+ nr_vnodes = 1;
+ vnode_to_pnode = dummy_vnode_to_pnode;
+ vnode_to_pnode[0] = XC_NUMA_NO_NODE;
}
else
{
- /* try to claim pages for early warning of insufficient memory avail */
- if ( dom->claim_enabled ) {
- rc = xc_domain_claim_pages(dom->xch, dom->guest_domid,
- dom->total_pages);
- if ( rc )
- return rc;
- }
+ nr_vmemranges = dom->nr_vmemranges;
+ nr_vnodes = dom->nr_vnodes;
+ vmemranges = dom->vmemranges;
+ vnode_to_pnode = dom->vnode_to_pnode;
+ }
- /* Setup dummy vNUMA information if it's not provided. Note
- * that this is a valid state if libxl doesn't provide any
- * vNUMA information.
- *
- * The dummy values make libxc allocate all pages from
- * arbitrary physical nodes. This is the expected behaviour if
- * no vNUMA configuration is provided to libxc.
- *
- * Note that the following hunk is just for the convenience of
- * allocation code. No defaulting happens in libxc.
- */
- if ( dom->nr_vmemranges == 0 )
- {
- nr_vmemranges = 1;
- vmemranges = dummy_vmemrange;
- vmemranges[0].start = 0;
- vmemranges[0].end = (uint64_t)dom->total_pages << PAGE_SHIFT;
- vmemranges[0].flags = 0;
- vmemranges[0].nid = 0;
-
- nr_vnodes = 1;
- vnode_to_pnode = dummy_vnode_to_pnode;
- vnode_to_pnode[0] = XC_NUMA_NO_NODE;
- }
- else
- {
- nr_vmemranges = dom->nr_vmemranges;
- nr_vnodes = dom->nr_vnodes;
- vmemranges = dom->vmemranges;
- vnode_to_pnode = dom->vnode_to_pnode;
- }
+ total = dom->p2m_size = 0;
+ for ( i = 0; i < nr_vmemranges; i++ )
+ {
+ total += ((vmemranges[i].end - vmemranges[i].start) >> PAGE_SHIFT);
+ dom->p2m_size = max(dom->p2m_size,
+ (xen_pfn_t)(vmemranges[i].end >> PAGE_SHIFT));
+ }
+ if ( total != dom->total_pages )
+ {
+ xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
+ "%s: vNUMA page count mismatch (0x%"PRIpfn" != 0x%"PRIpfn")",
+ __func__, total, dom->total_pages);
+ return -EINVAL;
+ }
- total = dom->p2m_size = 0;
- for ( i = 0; i < nr_vmemranges; i++ )
- {
- total += ((vmemranges[i].end - vmemranges[i].start)
- >> PAGE_SHIFT);
- dom->p2m_size =
- dom->p2m_size > (vmemranges[i].end >> PAGE_SHIFT) ?
- dom->p2m_size : (vmemranges[i].end >> PAGE_SHIFT);
- }
- if ( total != dom->total_pages )
- {
- xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
- "%s: vNUMA page count mismatch (0x%"PRIpfn" != 0x%"PRIpfn")",
- __func__, total, dom->total_pages);
- return -EINVAL;
- }
+ dom->p2m_host = xc_dom_malloc(dom, sizeof(xen_pfn_t) * dom->p2m_size);
+ if ( dom->p2m_host == NULL )
+ return -EINVAL;
+ for ( pfn = 0; pfn < dom->p2m_size; pfn++ )
+ dom->p2m_host[pfn] = INVALID_P2M_ENTRY;
- dom->p2m_host = xc_dom_malloc(dom, sizeof(xen_pfn_t) *
- dom->p2m_size);
- if ( dom->p2m_host == NULL )
- return -EINVAL;
- for ( pfn = 0; pfn < dom->p2m_size; pfn++ )
- dom->p2m_host[pfn] = INVALID_P2M_ENTRY;
+ /* allocate guest memory */
+ for ( i = 0; i < nr_vmemranges; i++ )
+ {
+ unsigned int memflags;
+ uint64_t pages, super_pages;
+ unsigned int pnode = vnode_to_pnode[vmemranges[i].nid];
+ xen_pfn_t extents[SUPERPAGE_BATCH_SIZE];
+ xen_pfn_t pfn_base_idx;
- /* allocate guest memory */
- for ( i = 0; i < nr_vmemranges; i++ )
- {
- unsigned int memflags;
- uint64_t pages, super_pages;
- unsigned int pnode = vnode_to_pnode[vmemranges[i].nid];
- xen_pfn_t extents[SUPERPAGE_BATCH_SIZE];
- xen_pfn_t pfn_base_idx;
-
- memflags = 0;
- if ( pnode != XC_NUMA_NO_NODE )
- memflags |= XENMEMF_exact_node(pnode);
-
- pages = (vmemranges[i].end - vmemranges[i].start)
- >> PAGE_SHIFT;
- super_pages = pages >> SUPERPAGE_2MB_SHIFT;
- pfn_base = vmemranges[i].start >> PAGE_SHIFT;
-
- for ( pfn = pfn_base; pfn < pfn_base+pages; pfn++ )
- dom->p2m_host[pfn] = pfn;
-
- pfn_base_idx = pfn_base;
- while (super_pages) {
- uint64_t count =
- min_t(uint64_t, super_pages,SUPERPAGE_BATCH_SIZE);
- super_pages -= count;
-
- for ( pfn = pfn_base_idx, j = 0;
- pfn < pfn_base_idx + (count << SUPERPAGE_2MB_SHIFT);
- pfn += SUPERPAGE_2MB_NR_PFNS, j++ )
- extents[j] = dom->p2m_host[pfn];
- rc = xc_domain_populate_physmap(dom->xch, dom->guest_domid, count,
- SUPERPAGE_2MB_SHIFT, memflags,
- extents);
- if ( rc < 0 )
- return rc;
-
- /* Expand the returned mfns into the p2m array. */
- pfn = pfn_base_idx;
- for ( j = 0; j < rc; j++ )
- {
- mfn = extents[j];
- for ( k = 0; k < SUPERPAGE_2MB_NR_PFNS; k++, pfn++ )
- dom->p2m_host[pfn] = mfn + k;
- }
- pfn_base_idx = pfn;
- }
+ memflags = 0;
+ if ( pnode != XC_NUMA_NO_NODE )
+ memflags |= XENMEMF_exact_node(pnode);
- for ( j = pfn_base_idx - pfn_base; j < pages; j += allocsz )
- {
- allocsz = pages - j;
- if ( allocsz > 1024*1024 )
- allocsz = 1024*1024;
+ pages = (vmemranges[i].end - vmemranges[i].start) >> PAGE_SHIFT;
+ super_pages = pages >> SUPERPAGE_2MB_SHIFT;
+ pfn_base = vmemranges[i].start >> PAGE_SHIFT;
- rc = xc_domain_populate_physmap_exact(dom->xch,
- dom->guest_domid, allocsz, 0, memflags,
- &dom->p2m_host[pfn_base+j]);
+ for ( pfn = pfn_base; pfn < pfn_base+pages; pfn++ )
+ dom->p2m_host[pfn] = pfn;
- if ( rc )
- {
- if ( pnode != XC_NUMA_NO_NODE )
- xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
- "%s: failed to allocate 0x%"PRIx64" pages (v=%d, p=%d)",
- __func__, pages, i, pnode);
- else
- xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
- "%s: failed to allocate 0x%"PRIx64" pages",
- __func__, pages);
- return rc;
- }
+ pfn_base_idx = pfn_base;
+ while ( super_pages ) {
+ uint64_t count = min_t(uint64_t, super_pages, SUPERPAGE_BATCH_SIZE);
+ super_pages -= count;
+
+ for ( pfn = pfn_base_idx, j = 0;
+ pfn < pfn_base_idx + (count << SUPERPAGE_2MB_SHIFT);
+ pfn += SUPERPAGE_2MB_NR_PFNS, j++ )
+ extents[j] = dom->p2m_host[pfn];
+ rc = xc_domain_populate_physmap(dom->xch, dom->guest_domid, count,
+ SUPERPAGE_2MB_SHIFT, memflags,
+ extents);
+ if ( rc < 0 )
+ return rc;
+
+ /* Expand the returned mfns into the p2m array. */
+ pfn = pfn_base_idx;
+ for ( j = 0; j < rc; j++ )
+ {
+ mfn = extents[j];
+ for ( k = 0; k < SUPERPAGE_2MB_NR_PFNS; k++, pfn++ )
+ dom->p2m_host[pfn] = mfn + k;
}
- rc = 0;
+ pfn_base_idx = pfn;
}
- /* Ensure no unclaimed pages are left unused.
- * OK to call if hadn't done the earlier claim call. */
- (void)xc_domain_claim_pages(dom->xch, dom->guest_domid,
- 0 /* cancels the claim */);
+ for ( j = pfn_base_idx - pfn_base; j < pages; j += allocsz )
+ {
+ allocsz = min_t(uint64_t, 1024 * 1024, pages - j);
+ rc = xc_domain_populate_physmap_exact(dom->xch, dom->guest_domid,
+ allocsz, 0, memflags, &dom->p2m_host[pfn_base + j]);
+
+ if ( rc )
+ {
+ if ( pnode != XC_NUMA_NO_NODE )
+ xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
+ "%s: failed to allocate 0x%"PRIx64" pages (v=%d, p=%d)",
+ __func__, pages, i, pnode);
+ else
+ xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
+ "%s: failed to allocate 0x%"PRIx64" pages",
+ __func__, pages);
+ return rc;
+ }
+ }
+ rc = 0;
}
+ /* Ensure no unclaimed pages are left unused.
+ * OK to call if hadn't done the earlier claim call. */
+ xc_domain_claim_pages(dom->xch, dom->guest_domid, 0 /* cancel claim */);
+
return rc;
}