copy_edd();
- /* Make the PFNs in the Xen hole reserved. */
if (!MOUNT_ROOT_RDONLY)
root_mountflags &= ~MS_RDONLY;
init_mm.start_code = (unsigned long) _text;
console_use_vt = 0;
#endif
}
-
-
}
static int
int tmp;
int bad_ppro;
unsigned long pfn;
+ unsigned long hole_start, hole_size;
contiguous_bitmap = alloc_bootmem_low_pages(
(max_low_pfn + 2*BITS_PER_LONG) >> 3);
}
/* Make the Xen hole reserved. */
- unsigned long hole_start, hole_size;
hole_size = xen_pfn_hole_size();
hole_start = xen_pfn_hole_start();
- for (pfn = hole_start; pfn < hole_start + hole_size; pfn++) {
- printk("<0>Reserve %lx for hole.\n",
- pfn);
+ for (pfn = hole_start; pfn < hole_start + hole_size; pfn++)
SetPageReserved(pfn_to_page(pfn));
- BUG_ON(!PageReserved(pfn_to_page(pfn)));
- }
reservedpages = 0;
for (tmp = 0; tmp < max_low_pfn; tmp++)
pgd_test_and_unpin(pgd);
- memset(pgd, 0, 10);
if (PTRS_PER_PMD == 1 || !pgd)
return pgd;
if (hole_len == -1) {
hole_start = xen_pfn_hole_start();
hole_len = xen_pfn_hole_size();
- printk("<0>Xen hole at [%lx,%lx).\n", hole_start,
- hole_start + hole_len);
}
return pfn >= hole_start && pfn < hole_start + hole_len;
}
/* balloon_append: add the given page to the balloon. */
static void balloon_append(struct page *page)
{
- BUG_ON(PageReserved(page));
- if (page_is_xen_hole(page_to_pfn(page))) {
- printk("<0>Attempt to add reserved pfn %lx to balloon.\n",
+ if (page_is_xen_hole(page_to_pfn(page)))
+ panic("Attempt to add Xen-reserved pfn %lx to balloon.\n",
page_to_pfn(page));
- BUG();
- }
/* Lowmem is re-populated first, so highmem pages go at list tail. */
if (PageHighMem(page)) {
list_add_tail(PAGE_TO_LIST(page), &ballooned_pages);
xen_machphys_update(frame_list[i], pfn);
#endif
- printk("<0>Balloon allocated %lx.\n", pfn);
/* Link back into the page tables if not highmem. */
if (pfn < max_low_pfn) {
int ret;
- pgd_t *pgd = pgd_offset_k((unsigned long)__va(pfn << PAGE_SHIFT));
- printk("pgd is %lx.\n", *(unsigned long *)pgd);
- (void)copy_from_user(&ret,
- (unsigned long *)__va(pfn << PAGE_SHIFT),
- 4);
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
pfn_pte_ma(frame_list[i], PAGE_KERNEL),
0);
BUG_ON(ret);
- printk("<0>Rehooked va; pte now %lx.\n",
- *(unsigned long *)virt_to_ptep(__va(pfn << PAGE_SHIFT)));
- *(unsigned long *)__va(pfn << PAGE_SHIFT) =
- 0xf001;
- printk("<0>Touched va.\n");
}
/* Relinquish the page back to the allocator. */
reservation.domid = DOMID_SELF;
/* Tell the ballon driver what is going on. */
-//SOS22 balloon_update_driver_allowance(i);
+ balloon_update_driver_allowance(i);
/* Zap PTEs and give away pages in one big multicall. */
(void)HYPERVISOR_multicall(rx_mcl, i+1);
}
/* Some pages are no longer absent... */
-//SOS22 balloon_update_driver_allowance(-work_done);
+ balloon_update_driver_allowance(-work_done);
/* Do all the remapping work, and M2P updates, in one big hypercall. */
if (likely((mcl - rx_mcl) != 0)) {
spin_lock(&mm->page_table_lock);
if (!new)
return NULL;
-
/*
* Because we dropped the lock, we should re-check the
* entry, as somebody else could have populated it..
#define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
#endif
-#define NR_GRANT_FRAMES 4
-
#ifdef __ia64__
#define get_tot_pages xc_get_max_pages
#else
if ( unlikely(cmpxchg_user(pl1e, o, n) != 0) ||
unlikely(o != l1e_get_intpte(ol1e)) )
{
- printf("Failed to update %" PRIpte " -> %" PRIpte
+ MEM_LOG("Failed to update %" PRIpte " -> %" PRIpte
": saw %" PRIpte,
l1e_get_intpte(ol1e),
l1e_get_intpte(nl1e),
l1_pgentry_t ol1e;
struct domain *d = current->domain;
- shadow_sync_all(d);
- if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) ) {
- printf("copy_from_user1 failed %p, l2 %lx.\n", pl1e,
- *(unsigned long *)&__linear_l2_table[l2_table_offset((unsigned long)pl1e)]);
+ if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) )
return 0;
- }
- if ( unlikely(shadow_mode_refcounts(d)) ) {
+ if ( unlikely(shadow_mode_refcounts(d)) )
return update_l1e(pl1e, ol1e, nl1e);
- }
if ( l1e_get_flags(nl1e) & _PAGE_PRESENT )
{
perfc_incrc(calls_to_update_va);
- if ( unlikely(!__addr_ok(va) && !shadow_mode_external(d)) ) {
- printf("Bad update_va_mapping.\n");
+ if ( unlikely(!__addr_ok(va) && !shadow_mode_external(d)) )
return -EINVAL;
- }
LOCK_BIGLOCK(d);
if ( unlikely(shadow_mode_enabled(d)) )
check_pagetable(v, "pre-va"); /* debug */
- shadow_sync_all(d);
-
if ( unlikely(!mod_l1_entry(&linear_pg_table[l1_linear_offset(va)],
- val)) ) {
- printf("mod_l1_entry failed.\n");
+ val)) )
rc = -EINVAL;
- }
if ( likely(rc == 0) && unlikely(shadow_mode_enabled(d)) )
{
}
rc = shadow_do_update_va_mapping(va, val, v);
- if (rc)
- printf("shadow_do_update_va_mapping says %d.\n", rc);
+
check_pagetable(v, "post-va"); /* debug */
}
l2_pgentry_t *snapshot2 = snapshot;
l1_pgentry_t *shadow2 = shadow;
- printf("Update hl2 shadow.\n");
-
ASSERT(shadow_mode_write_all(d) || shadow_mode_wr_pt_pte(d));
BUG_ON(!shadow_mode_refcounts(d)); // not yet implemented
ASSERT(d->grant_table != NULL);
(void)put_user(GNTST_okay, &uop->status);
for ( i = 0; i < op.nr_frames; i++ ) {
- mfn = gnttab_shared_mfn(d, d->grant_table, i);
- if (shadow_mode_translate(d))
- mfn = __mfn_to_gpfn(d, mfn);
+ mfn = __mfn_to_gpfn(d, gnttab_shared_mfn(d, d->grant_table, i));
(void)put_user(mfn, &op.frame_list[i]);
}
}
goto out;
for ( j = 0; j < (1 << extent_order); j++ ) {
- printf("Populating %lx with %lx.\n",
- pfn + j, mfn + j);
if (shadow_mode_translate(d))
set_p2m_entry(d, pfn + j, mfn + j, &cache1, &cache2);
set_pfn_from_mfn(mfn + j, pfn + j);
#define SHADOW_SNAPSHOT_ELSEWHERE (-1L)
/************************************************************************/
-#define SHADOW_DEBUG 1
+#define SHADOW_DEBUG 0
#define SHADOW_VERBOSE_DEBUG 0
#define SHADOW_VVERBOSE_DEBUG 0
#define SHADOW_VVVERBOSE_DEBUG 0
domid_t domain; /* domain to be affected */
unsigned long mfn; /* machine frame to be initialised */
} dom0_hypercall_init_t;
-
+
typedef struct {
uint32_t cmd;
uint32_t interface_version; /* DOM0_INTERFACE_VERSION */