int construct_dom0(struct domain *d)
{
struct kernel_info kinfo = {};
+ struct vcpu *saved_current;
int rc, i, cpu;
struct vcpu *v = d->vcpu[0];
if ( rc < 0 )
return rc;
- /* The following loads use the domain's p2m */
+ /*
+ * The following loads use the domain's p2m and require current to
+ * be a vcpu of the domain, temporarily switch
+ */
+ saved_current = current;
p2m_restore_state(v);
+ set_current(v);
/*
* kernel_load will determine the placement of the kernel as well
initrd_load(&kinfo);
dtb_load(&kinfo);
+ /* Now that we are done restore the original p2m and current. */
+ set_current(saved_current);
+ p2m_restore_state(saved_current);
+
discard_initial_modules();
v->is_initialised = 1;
#include <xen/config.h>
#include <xen/lib.h>
#include <xen/domain_page.h>
+#include <xen/sched.h>
+#include <asm/current.h>
#include <asm/mm.h>
#include <asm/guest_access.h>
while ( len )
{
- paddr_t g;
void *p;
unsigned size = min(len, (unsigned)PAGE_SIZE - offset);
+ struct page_info *page;
- if ( gvirt_to_maddr((vaddr_t) to, &g, GV2M_WRITE) )
+ page = get_page_from_gva(current->domain, (vaddr_t) to, GV2M_WRITE);
+ if ( page == NULL )
return len;
- p = map_domain_page(g>>PAGE_SHIFT);
+ p = __map_domain_page(page);
p += offset;
memcpy(p, from, size);
if ( flush_dcache )
clean_xen_dcache_va_range(p, size);
unmap_domain_page(p - offset);
+ put_page(page);
len -= size;
from += size;
to += size;
while ( len )
{
- paddr_t g;
void *p;
unsigned size = min(len, (unsigned)PAGE_SIZE - offset);
+ struct page_info *page;
- if ( gvirt_to_maddr((vaddr_t) to, &g, GV2M_WRITE) )
+ page = get_page_from_gva(current->domain, (vaddr_t) to, GV2M_WRITE);
+ if ( page == NULL )
return len;
- p = map_domain_page(g>>PAGE_SHIFT);
+ p = __map_domain_page(page);
p += offset;
memset(p, 0x00, size);
unmap_domain_page(p - offset);
+ put_page(page);
len -= size;
to += size;
/*
while ( len )
{
- paddr_t g;
void *p;
unsigned size = min(len, (unsigned)(PAGE_SIZE - offset));
+ struct page_info *page;
- if ( gvirt_to_maddr((vaddr_t) from & PAGE_MASK, &g, GV2M_READ) )
+ page = get_page_from_gva(current->domain, (vaddr_t) from, GV2M_READ);
+ if ( page == NULL )
return len;
- p = map_domain_page(g>>PAGE_SHIFT);
+ p = __map_domain_page(page);
p += ((vaddr_t)from & (~PAGE_MASK));
memcpy(to, p, size);
unmap_domain_page(p);
+ put_page(page);
len -= size;
from += size;
to += size;
return p >> PAGE_SHIFT;
}
+struct page_info *get_page_from_gva(struct domain *d, vaddr_t va,
+ unsigned long flags)
+{
+ struct p2m_domain *p2m = &d->arch.p2m;
+ struct page_info *page = NULL;
+ paddr_t maddr;
+
+ ASSERT(d == current->domain);
+
+ spin_lock(&p2m->lock);
+
+ if ( gvirt_to_maddr(va, &maddr, flags) )
+ goto err;
+
+ if ( !mfn_valid(maddr >> PAGE_SHIFT) )
+ goto err;
+
+ page = mfn_to_page(maddr >> PAGE_SHIFT);
+ ASSERT(page);
+
+ if ( unlikely(!get_page(page, d)) )
+ page = NULL;
+
+err:
+ spin_unlock(&p2m->lock);
+ return page;
+}
+
/*
* Local variables:
* mode: C
{
int i;
vaddr_t sp;
- paddr_t stack_phys;
+ struct page_info *page;
void *mapped;
unsigned long *stack, addr;
printk("Guest stack trace from sp=%"PRIvaddr":\n ", sp);
- if ( gvirt_to_maddr(sp, &stack_phys, GV2M_READ) )
+ if ( sp & ( sizeof(long) - 1 ) )
+ {
+ printk("Stack is misaligned\n");
+ return;
+ }
+
+ page = get_page_from_gva(current->domain, sp, GV2M_READ);
+ if ( page == NULL )
{
printk("Failed to convert stack to physical address\n");
return;
}
- mapped = map_domain_page(stack_phys >> PAGE_SHIFT);
+ mapped = __map_domain_page(page);
stack = mapped + (sp & ~PAGE_MASK);
printk("Stack empty.");
printk("\n");
unmap_domain_page(mapped);
-
+ put_page(page);
}
#define STACK_BEFORE_EXCEPTION(regs) ((register_t*)(regs)->sp)
void put_page(struct page_info *page);
int get_page(struct page_info *page, struct domain *domain);
+struct page_info *get_page_from_gva(struct domain *d, vaddr_t va,
+ unsigned long flags);
+
/*
* The MPT (machine->physical mapping table) is an array of word-sized
* values, indexed on machine frame number. It is expected that guest OSes
#define MATTR_DEV 0x1
#define MATTR_MEM 0xf
-/* Flags for gvirt_to_maddr */
+/* Flags for get_page_from_gva, gvirt_to_maddr etc */
#define GV2M_READ (0u<<0)
#define GV2M_WRITE (1u<<0)