if (mm->context.size) {
if (mm == current->active_mm)
clear_LDT();
- make_pages_writable(mm->context.ldt,
- make_mmu_pages_writable(mm->context.ldt,
- (mm->context.size * LDT_ENTRY_SIZE) /
- PAGE_SIZE);
++ make_pages_writable(mm->context.ldt,
+ (mm->context.size * LDT_ENTRY_SIZE) /
+ PAGE_SIZE);
if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(mm->context.ldt);
else
copy_edd();
++ /* Make the PFNs in the Xen hole reserved. */
if (!MOUNT_ROOT_RDONLY)
root_mountflags &= ~MS_RDONLY;
init_mm.start_code = (unsigned long) _text;
console_use_vt = 0;
#endif
}
++
++
}
static int
BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
++unsigned long xen_pfn_hole_start(void)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_PFN_HOLE_BASE;
++ return HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF);
++}
++
++unsigned long xen_pfn_hole_size(void)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_PFN_HOLE_SIZE;
++ return HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF);
++}
++
void xen_new_user_pt(unsigned long ptr)
{
struct mmuext_op op;
*/
unsigned long *contiguous_bitmap;
++#ifndef CONFIG_XEN_SHADOW_MODE
static void contiguous_bitmap_set(
unsigned long first_page, unsigned long nr_pages)
{
balloon_unlock(flags);
}
++#else
++int xen_create_contiguous_region(
++ unsigned long vstat, unsigned int order, unsigned int address_bits)
++{
++ if (order >= 1)
++ BUG();
++ return 0;
++}
++
++void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
++{
++}
++#endif
#ifdef __i386__
int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
totalram_pages++;
}
++ /* Make the Xen hole reserved. */
++ unsigned long hole_start, hole_size;
++ hole_size = xen_pfn_hole_size();
++ hole_start = xen_pfn_hole_start();
++ for (pfn = hole_start; pfn < hole_start + hole_size; pfn++) {
++ printk("<0>Reserve %lx for hole.\n",
++ pfn);
++ SetPageReserved(pfn_to_page(pfn));
++ BUG_ON(!PageReserved(pfn_to_page(pfn)));
++ }
++
reservedpages = 0;
for (tmp = 0; tmp < max_low_pfn; tmp++)
/*
pgd_test_and_unpin(pgd);
++ memset(pgd, 0, 10);
if (PTRS_PER_PMD == 1 || !pgd)
return pgd;
kmem_cache_free(pgd_cache, pgd);
}
--#ifndef CONFIG_XEN_SHADOW_MODE
-void make_lowmem_mmu_page_readonly(void *va)
++
+void make_lowmem_page_readonly(void *va)
{
- pte_t *pte = virt_to_ptep(va);
- int rc = HYPERVISOR_update_va_mapping(
+ pte_t *pte;
+ int rc;
+
- if (xen_feature(writable_mmu_structures))
- return;
-
+ pte = virt_to_ptep(va);
+ rc = HYPERVISOR_update_va_mapping(
(unsigned long)va, pte_wrprotect(*pte), 0);
BUG_ON(rc);
}
-void make_lowmem_mmu_page_writable(void *va)
+void make_lowmem_page_writable(void *va)
{
- pte_t *pte = virt_to_ptep(va);
- int rc = HYPERVISOR_update_va_mapping(
+ pte_t *pte;
+ int rc;
+
- if (xen_feature(writable_mmu_structures))
- return;
-
+ pte = virt_to_ptep(va);
+ rc = HYPERVISOR_update_va_mapping(
(unsigned long)va, pte_mkwrite(*pte), 0);
BUG_ON(rc);
}
-void make_mmu_page_readonly(void *va)
++
+void make_page_readonly(void *va)
{
- pte_t *pte = virt_to_ptep(va);
- int rc = HYPERVISOR_update_va_mapping(
+ pte_t *pte;
+ int rc;
+
- if (xen_feature(writable_mmu_structures))
- return;
-
+ pte = virt_to_ptep(va);
+ rc = HYPERVISOR_update_va_mapping(
(unsigned long)va, pte_wrprotect(*pte), 0);
if (rc) /* fallback? */
xen_l1_entry_update(pte, pte_wrprotect(*pte));
}
}
-void make_mmu_page_writable(void *va)
+void make_page_writable(void *va)
{
- pte_t *pte = virt_to_ptep(va);
- int rc = HYPERVISOR_update_va_mapping(
+ pte_t *pte;
+ int rc;
+
- if (xen_feature(writable_mmu_structures))
- return;
-
+ pte = virt_to_ptep(va);
+ rc = HYPERVISOR_update_va_mapping(
(unsigned long)va, pte_mkwrite(*pte), 0);
if (rc) /* fallback? */
xen_l1_entry_update(pte, pte_mkwrite(*pte));
}
}
- void make_pages_readonly(void *va, unsigned int nr)
-void make_mmu_pages_readonly(void *va, unsigned int nr)
++void make_pages_readonly(void *va, unsigned nr)
{
- if (xen_feature(writable_mmu_structures))
- return;
+ while (nr-- != 0) {
+ make_page_readonly(va);
+ va = (void *)((unsigned long)va + PAGE_SIZE);
+ }
+}
- void make_pages_writable(void *va, unsigned int nr)
++void make_pages_writable(void *va, unsigned nr)
+{
while (nr-- != 0) {
- make_mmu_page_readonly(va);
+ make_page_writable(va);
va = (void *)((unsigned long)va + PAGE_SIZE);
}
}
- #endif /* CONFIG_XEN_SHADOW_MODE */
+
++#ifndef CONFIG_XEN_SHADOW_MODE
++void make_lowmem_mmu_page_readonly(void *va)
++{
++ if (xen_feature(writable_mmu_structures))
++ return;
++ make_lowmem_page_readonly(va);
++}
++
++void make_lowmem_mmu_page_writable(void *va)
++{
++ if (xen_feature(writable_mmu_structures))
++ return;
++ make_lowmem_page_writable(va);
++}
++
++void make_mmu_page_readonly(void *va)
++{
++ if (xen_feature(writable_mmu_structures))
++ return;
++ make_page_readonly(va);
++}
++
++void make_mmu_page_writable(void *va)
++{
++ if (xen_feature(writable_mmu_structures))
++ return;
++ make_page_writable(va);
++}
++
++void make_mmu_pages_readonly(void *va, unsigned int nr)
++{
++ if (xen_feature(writable_mmu_structures))
++ return;
++ make_pages_readonly(va, nr);
++}
++
+ void make_mmu_pages_writable(void *va, unsigned int nr)
+ {
+ if (xen_feature(writable_mmu_structures))
+ return;
- while (nr-- != 0) {
- make_mmu_page_writable(va);
- va = (void *)((unsigned long)va + PAGE_SIZE);
- }
++ make_pages_writable(va, nr);
+ }
-#endif /* CONFIG_XEN_SHADOW_MODE */
++#endif
static inline void pgd_walk_set_prot(void *pt, pgprot_t flags)
{
static void __pgd_pin(pgd_t *pgd)
{
-- pgd_walk(pgd, PAGE_KERNEL_RO);
++ if (!xen_feature(writable_mmu_structures))
++ pgd_walk(pgd, PAGE_KERNEL_RO);
xen_pgd_pin(__pa(pgd));
set_bit(PG_pinned, &virt_to_page(pgd)->flags);
}
static void __pgd_unpin(pgd_t *pgd)
{
xen_pgd_unpin(__pa(pgd));
-- pgd_walk(pgd, PAGE_KERNEL);
++ if (!xen_feature(writable_mmu_structures))
++ pgd_walk(pgd, PAGE_KERNEL);
clear_bit(PG_pinned, &virt_to_page(pgd)->flags);
}
extra-y += vmlinux.lds
--obj-y := evtchn.o reboot.o gnttab.o
++obj-y := evtchn.o reboot.o gnttab.o features.o
obj-$(CONFIG_PROC_FS) += xen_proc.o
obj-$(CONFIG_NET) += skbuff.o
}
int
--gnttab_grant_foreign_transfer(domid_t domid)
++gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
{
int ref;
if (unlikely((ref = get_free_entry()) == -1))
return -ENOSPC;
--
-- shared[ref].frame = 0;
-- shared[ref].domid = domid;
-- wmb();
-- shared[ref].flags = GTF_accept_transfer;
++ gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
return ref;
}
void
--gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid)
++gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
++ unsigned long pfn)
{
-- shared[ref].frame = 0;
++ shared[ref].frame = pfn;
shared[ref].domid = domid;
wmb();
shared[ref].flags = GTF_accept_transfer;
#endif
++#ifndef CONFIG_XEN_SHADOW_MODE
static int __do_suspend(void *ignore)
{
int i, j, k, fpp;
return err;
}
++#else
++static int __do_suspend(void *ignore)
++{
++ printk(KERN_WARNING "Don't do suspend in shadow mode\n");
++ return -EOPNOTSUPP;
++}
++#endif
static int shutdown_process(void *__unused)
{
#define WPRINTK(fmt, args...) \
printk(KERN_WARNING "xen_mem: " fmt, ##args)
++static int page_is_xen_hole(unsigned long pfn)
++{
++ static unsigned long hole_start, hole_len = -1;
++ if (hole_len == -1) {
++ hole_start = xen_pfn_hole_start();
++ hole_len = xen_pfn_hole_size();
++ printk("<0>Xen hole at [%lx,%lx).\n", hole_start,
++ hole_start + hole_len);
++ }
++ return pfn >= hole_start && pfn < hole_start + hole_len;
++}
++
/* balloon_append: add the given page to the balloon. */
static void balloon_append(struct page *page)
{
++ BUG_ON(PageReserved(page));
++ if (page_is_xen_hole(page_to_pfn(page))) {
++ printk("<0>Attempt to add reserved pfn %lx to balloon.\n",
++ page_to_pfn(page));
++ BUG();
++ }
/* Lowmem is re-populated first, so highmem pages go at list tail. */
if (PageHighMem(page)) {
list_add_tail(PAGE_TO_LIST(page), &ballooned_pages);
balloon_lock(flags);
- reservation.extent_start = mfn_list;
+ page = balloon_first_page();
+ for (i = 0; i < nr_pages; i++) {
+ BUG_ON(page == NULL);
+ frame_list[i] = page_to_pfn(page);;
++ BUG_ON(page_is_xen_hole(frame_list[i]));
+ page = balloon_next_page(page);
+ }
+
+ reservation.extent_start = frame_list;
reservation.nr_extents = nr_pages;
rc = HYPERVISOR_memory_op(
- XENMEM_increase_reservation, &reservation);
+ XENMEM_populate_physmap, &reservation);
if (rc < nr_pages) {
int ret;
/* We hit the Xen hard limit: reprobe. */
BUG_ON(page == NULL);
pfn = page_to_pfn(page);
++#ifndef CONFIG_XEN_SHADOW_MODE
++ /* In shadow mode, Xen handles this part for us. */
BUG_ON(phys_to_machine_mapping_valid(pfn));
/* Update P->M and M->P tables. */
- set_phys_to_machine(pfn, mfn_list[i]);
- xen_machphys_update(mfn_list[i], pfn);
-
+ set_phys_to_machine(pfn, frame_list[i]);
+ xen_machphys_update(frame_list[i], pfn);
-
++#endif
++
++ printk("<0>Balloon allocated %lx.\n", pfn);
/* Link back into the page tables if not highmem. */
if (pfn < max_low_pfn) {
int ret;
++ pgd_t *pgd = pgd_offset_k((unsigned long)__va(pfn << PAGE_SHIFT));
++ printk("pgd is %lx.\n", *(unsigned long *)pgd);
++ (void)copy_from_user(&ret,
++ (unsigned long *)__va(pfn << PAGE_SHIFT),
++ 4);
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
- pfn_pte_ma(mfn_list[i], PAGE_KERNEL),
+ pfn_pte_ma(frame_list[i], PAGE_KERNEL),
0);
BUG_ON(ret);
++ printk("<0>Rehooked va; pte now %lx.\n",
++ *(unsigned long *)virt_to_ptep(__va(pfn << PAGE_SHIFT)));
++ *(unsigned long *)__va(pfn << PAGE_SHIFT) =
++ 0xf001;
++ printk("<0>Touched va.\n");
}
/* Relinquish the page back to the allocator. */
BUG_ON((signed short)ref < 0);
np->grant_rx_ref[id] = ref;
gnttab_grant_foreign_transfer_ref(ref,
-- np->xbdev->otherend_id);
++ np->xbdev->otherend_id,
++ __pa(skb->head) >> PAGE_SHIFT);
RING_GET_REQUEST(&np->rx, req_prod + i)->gref = ref;
rx_pfn_array[i] = virt_to_mfn(skb->head);
reservation.domid = DOMID_SELF;
/* Tell the ballon driver what is going on. */
-- balloon_update_driver_allowance(i);
++//SOS22 balloon_update_driver_allowance(i);
/* Zap PTEs and give away pages in one big multicall. */
(void)HYPERVISOR_multicall(rx_mcl, i+1);
np->stats.rx_bytes += rx->status;
/* Remap the page. */
++#ifndef CONFIG_XEN_SHADOW_MODE
mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
mmu->val = __pa(skb->head) >> PAGE_SHIFT;
mmu++;
++#endif
MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
pfn_pte_ma(mfn, PAGE_KERNEL), 0);
mcl++;
}
/* Some pages are no longer absent... */
-- balloon_update_driver_allowance(-work_done);
++//SOS22 balloon_update_driver_allowance(-work_done);
/* Do all the remapping work, and M2P updates, in one big hypercall. */
if (likely((mcl - rx_mcl) != 0)) {
if ((unsigned long)np->rx_skbs[i] < __PAGE_OFFSET)
continue;
gnttab_grant_foreign_transfer_ref(
-- np->grant_rx_ref[i], np->xbdev->otherend_id);
++ np->grant_rx_ref[i], np->xbdev->otherend_id,
++ __pa(np->rx_skbs[i]->data) >> PAGE_SHIFT);
RING_GET_REQUEST(&np->rx, requeue_idx)->gref =
np->grant_rx_ref[i];
RING_GET_REQUEST(&np->rx, requeue_idx)->id = i;
void xen_set_ldt(unsigned long ptr, unsigned long bytes);
void xen_machphys_update(unsigned long mfn, unsigned long pfn);
++unsigned long xen_pfn_hole_start(void);
++unsigned long xen_pfn_hole_size(void);
++
#ifdef CONFIG_SMP
#include <linux/cpumask.h>
void xen_tlb_flush_all(void);
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
++/* Definitions for machine and pseudophysical addresses. */
++#ifdef CONFIG_X86_PAE
++typedef unsigned long long paddr_t;
++typedef unsigned long long maddr_t;
++#else
++typedef unsigned long paddr_t;
++typedef unsigned long maddr_t;
++#endif
++
/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
#define INVALID_P2M_ENTRY (~0UL)
++#ifndef CONFIG_XEN_SHADOW_MODE
#define FOREIGN_FRAME(m) ((m) | (1UL<<31))
extern unsigned long *phys_to_machine_mapping;
#define pfn_to_mfn(pfn) \
phys_to_machine_mapping[pfn] = mfn;
}
--/* Definitions for machine and pseudophysical addresses. */
--#ifdef CONFIG_X86_PAE
--typedef unsigned long long paddr_t;
--typedef unsigned long long maddr_t;
--#else
--typedef unsigned long paddr_t;
--typedef unsigned long maddr_t;
--#endif
--
static inline maddr_t phys_to_machine(paddr_t phys)
{
maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
return phys;
}
--
++#else
++#define pfn_to_mfn(p) (p)
++#define mfn_to_pfn(m) (m)
++#define phys_to_machine(p) (p)
++#define machine_to_phys(m) (m)
++static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
++{
++ BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
++}
++#endif
/*
* These are used to make use of C type-checking..
*/
({ \
unsigned long mfn = pte_mfn(_pte); \
unsigned long pfn = mfn_to_pfn(mfn); \
-- if ((pfn >= max_mapnr) || (phys_to_machine_mapping[pfn] != mfn))\
++ if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn)) \
pfn = max_mapnr; /* special: force !pfn_valid() */ \
pfn; \
})
({ \
unsigned long mfn = pte_mfn(_pte); \
unsigned long pfn = mfn_to_pfn(mfn); \
-- if ((pfn >= max_mapnr) || (phys_to_machine_mapping[pfn] != mfn))\
++ if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn)) \
pfn = max_mapnr; /* special: force !pfn_valid() */ \
pfn; \
})
ptep_set_access_flags(__vma, __address, __ptep, __entry, 1); \
} while (0)
- #ifndef CONFIG_XEN_SHADOW_MODE
+void make_lowmem_page_readonly(void *va);
+void make_lowmem_page_writable(void *va);
+void make_page_readonly(void *va);
+void make_page_writable(void *va);
+void make_pages_readonly(void *va, unsigned int nr);
+void make_pages_writable(void *va, unsigned int nr);
+ #ifndef CONFIG_XEN_SHADOW_MODE
+ void make_lowmem_mmu_page_readonly(void *va);
+ void make_lowmem_mmu_page_writable(void *va);
+ void make_mmu_page_readonly(void *va);
+ void make_mmu_page_writable(void *va);
+ void make_mmu_pages_readonly(void *va, unsigned int nr);
+ void make_mmu_pages_writable(void *va, unsigned int nr);
#else
- #define make_lowmem_page_readonly(_va) ((void)0)
- #define make_lowmem_page_writable(_va) ((void)0)
- #define make_page_readonly(_va) ((void)0)
- #define make_page_writable(_va) ((void)0)
- #define make_pages_readonly(_va, _nr) ((void)0)
- #define make_pages_writable(_va, _nr) ((void)0)
+ #define make_lowmem_mmu_page_readonly(_va) ((void)0)
+ #define make_lowmem_mmu_page_writable(_va) ((void)0)
+ #define make_mmu_page_readonly(_va) ((void)0)
+ #define make_mmu_page_writable(_va) ((void)0)
+ #define make_mmu_pages_readonly(_va, _nr) ((void)0)
+ #define make_mmu_pages_writable(_va, _nr) ((void)0)
#endif
#define virt_to_ptep(__va) \
void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
unsigned long page);
--int gnttab_grant_foreign_transfer(domid_t domid);
++int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
unsigned long frame, int readonly);
--void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid);
++void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
++ unsigned long pfn);
#ifdef __ia64__
#define gnttab_map_vaddr(map) __va(map.dev_bus_addr)
spin_lock(&mm->page_table_lock);
if (!new)
return NULL;
++
/*
* Because we dropped the lock, we should re-check the
* entry, as somebody else could have populated it..
if ( unlikely(cmpxchg_user(pl1e, o, n) != 0) ||
unlikely(o != l1e_get_intpte(ol1e)) )
{
-- MEM_LOG("Failed to update %" PRIpte " -> %" PRIpte
++ printf("Failed to update %" PRIpte " -> %" PRIpte
": saw %" PRIpte,
l1e_get_intpte(ol1e),
l1e_get_intpte(nl1e),
l1_pgentry_t ol1e;
struct domain *d = current->domain;
-- if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) )
++ shadow_sync_all(d);
++ if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) ) {
++ printf("copy_from_user1 failed %p, l2 %lx.\n", pl1e,
++ *(unsigned long *)&__linear_l2_table[l2_table_offset((unsigned long)pl1e)]);
return 0;
++ }
-- if ( unlikely(shadow_mode_refcounts(d)) )
++ if ( unlikely(shadow_mode_refcounts(d)) ) {
return update_l1e(pl1e, ol1e, nl1e);
++ }
if ( l1e_get_flags(nl1e) & _PAGE_PRESENT )
{
perfc_incrc(calls_to_update_va);
-- if ( unlikely(!__addr_ok(va) && !shadow_mode_external(d)) )
++ if ( unlikely(!__addr_ok(va) && !shadow_mode_external(d)) ) {
++ printf("Bad update_va_mapping.\n");
return -EINVAL;
++ }
LOCK_BIGLOCK(d);
if ( unlikely(shadow_mode_enabled(d)) )
check_pagetable(v, "pre-va"); /* debug */
++ shadow_sync_all(d);
++
if ( unlikely(!mod_l1_entry(&linear_pg_table[l1_linear_offset(va)],
-- val)) )
++ val)) ) {
++ printf("mod_l1_entry failed.\n");
rc = -EINVAL;
++ }
if ( likely(rc == 0) && unlikely(shadow_mode_enabled(d)) )
{
}
rc = shadow_do_update_va_mapping(va, val, v);
--
++ if (rc)
++ printf("shadow_do_update_va_mapping says %d.\n", rc);
check_pagetable(v, "post-va"); /* debug */
}
l2_pgentry_t *guest2 = guest;
l2_pgentry_t *snapshot2 = snapshot;
l1_pgentry_t *shadow2 = shadow;
--
++
++ printf("Update hl2 shadow.\n");
++
ASSERT(shadow_mode_write_all(d) || shadow_mode_wr_pt_pte(d));
BUG_ON(!shadow_mode_refcounts(d)); // not yet implemented
#include <asm/current.h>
#include <public/nmi.h>
#include <public/version.h>
++#include <asm/shadow.h>
void cmdline_parse(char *cmdline)
{
return -EFAULT;
return 0;
}
- fi.submap = 0;
+
+ case XENVER_get_features:
+ {
+ xen_feature_info_t fi;
+
+ if ( copy_from_user(&fi, arg, sizeof(fi)) )
+ return -EFAULT;
+
+ switch ( fi.submap_idx )
+ {
+ case 0:
++ if (shadow_mode_wr_pt_pte(current->domain))
++ fi.submap = XENFEAT_writable_mmu_structures;
++ else
++ fi.submap = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ( copy_to_user(arg, &fi, sizeof(fi)) )
+ return -EFAULT;
+ return 0;
+ }
+
}
return -ENOSYS;
return nr_extents;
}
-
++
+ static long
+ populate_physmap(
+ struct domain *d,
+ unsigned long *extent_list,
+ unsigned int nr_extents,
+ unsigned int extent_order,
+ unsigned int flags,
+ int *preempted)
+ {
- struct pfn_info *page;
- unsigned long i, j, pfn, mfn;
++ struct pfn_info *page;
++ unsigned long i, j, pfn, mfn;
++ struct domain_mmap_cache cache1, cache2;
+
+ if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
+ return 0;
+
+ if ( (extent_order != 0) &&
+ !multipage_allocation_permitted(current->domain) )
+ return 0;
+
++ if (shadow_mode_translate(d)) {
++ domain_mmap_cache_init(&cache1);
++ domain_mmap_cache_init(&cache2);
++ shadow_lock(d);
++ }
++
+ for ( i = 0; i < nr_extents; i++ )
+ {
+ if ( hypercall_preempt_check() )
+ {
+ *preempted = 1;
- return i;
++ goto out;
+ }
+
+ if ( unlikely((page = alloc_domheap_pages(
+ d, extent_order, flags)) == NULL) )
+ {
+ DPRINTK("Could not allocate order=%d extent: "
+ "id=%d flags=%x (%ld of %d)\n",
+ extent_order, d->domain_id, flags, i, nr_extents);
- return i;
++ goto out;
+ }
+
+ mfn = page_to_pfn(page);
+
+ if ( unlikely(__get_user(pfn, &extent_list[i]) != 0) )
- return i;
++ goto out;
+
- for ( j = 0; j < (1 << extent_order); j++ )
++ for ( j = 0; j < (1 << extent_order); j++ ) {
++ printf("Populating %lx with %lx.\n",
++ pfn + j, mfn + j);
++ if (shadow_mode_translate(d))
++ set_p2m_entry(d, pfn + j, mfn + j, &cache1, &cache2);
+ set_pfn_from_mfn(mfn + j, pfn + j);
++ }
+
- /* Inform the domain of the new page's machine address. */
- if ( __put_user(mfn, &extent_list[i]) != 0 )
- return i;
++ if (!shadow_mode_translate(d)) {
++ /* Inform the domain of the new page's machine address. */
++ if ( __put_user(mfn, &extent_list[i]) != 0 )
++ goto out;
++ }
+ }
+
- return nr_extents;
++ out:
++ if (shadow_mode_translate(d)) {
++ shadow_unlock(d);
++ domain_mmap_cache_destroy(&cache1);
++ domain_mmap_cache_destroy(&cache2);
++ }
++
++ return i;
+ }
static long
decrease_reservation(
int *preempted)
{
struct pfn_info *page;
- unsigned long i, j, mpfn, mfn;
- unsigned long i, j, mfn;
++ unsigned long i, j, gpfn, mfn;
if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
return 0;
return i;
}
- if ( unlikely(__get_user(mpfn, &extent_list[i]) != 0) )
- if ( unlikely(__get_user(mfn, &extent_list[i]) != 0) )
++ if ( unlikely(__get_user(gpfn, &extent_list[i]) != 0) )
return i;
for ( j = 0; j < (1 << extent_order); j++ )
{
- mfn = __gpfn_to_mfn(d, mpfn + j);
- if ( unlikely((mfn + j) >= max_page) )
++ mfn = __gpfn_to_mfn(d, gpfn + j);
+ if ( unlikely(mfn >= max_page) )
{
-- DPRINTK("Domain %u page number out of range (%lx >= %lx)\n",
- d->domain_id, mfn, max_page);
- d->domain_id, mfn + j, max_page);
++ DPRINTK("Domain %u page number out of range (%lx(%lx) >= %lx)\n",
++ d->domain_id, mfn, gpfn, max_page);
return i;
}
if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
put_page(page);
-- shadow_sync_and_drop_references(d, page);
--
++ if (shadow_mode_translate(d)) {
++ struct domain_mmap_cache c1, c2;
++ domain_mmap_cache_init(&c1);
++ domain_mmap_cache_init(&c2);
++ shadow_lock(d);
++ shadow_sync_and_drop_references(d, page);
++ set_p2m_entry(d, gpfn + j, -1, &c1, &c2);
++ set_pfn_from_mfn(mfn + j, INVALID_M2P_ENTRY);
++ shadow_unlock(d);
++ domain_mmap_cache_destroy(&c1);
++ domain_mmap_cache_destroy(&c2);
++ }
put_page(page);
}
}
#define SHADOW_SNAPSHOT_ELSEWHERE (-1L)
/************************************************************************/
--#define SHADOW_DEBUG 0
++#define SHADOW_DEBUG 1
#define SHADOW_VERBOSE_DEBUG 0
#define SHADOW_VVERBOSE_DEBUG 0
#define SHADOW_VVVERBOSE_DEBUG 0