*
* physical-to-machine mappings for automatically-translated domains.
*
+ * Parts of this code are Copyright (c) 2009 by Citrix (R&D) Ltd. (Patrick Colp)
* Parts of this code are Copyright (c) 2007 by Advanced Micro Devices.
* Parts of this code are Copyright (c) 2006-2007 by XenSource Inc.
* Parts of this code are Copyright (c) 2006 by Michael A Fetterman
#include <asm/p2m.h>
#include <asm/hvm/vmx/vmx.h> /* ept_p2m_init() */
#include <xen/iommu.h>
+#include <asm/mem_event.h>
+#include <public/mem_event.h>
+#include <xen/event.h>
/* Debugging and auditing of the P2M code? */
#define P2M_AUDIT 0
return rc;
}
+int p2m_mem_paging_nominate(struct domain *d, unsigned long gfn)
+{
+ struct page_info *page;
+ p2m_type_t p2mt;
+ mfn_t mfn;
+ int ret;
+
+ mfn = gfn_to_mfn(d, gfn, &p2mt);
+
+ /* Check if mfn is valid */
+ ret = -EINVAL;
+ if ( !mfn_valid(mfn) )
+ goto out;
+
+ /* Check p2m type */
+ ret = -EAGAIN;
+ if ( !p2m_is_pageable(p2mt) )
+ goto out;
+
+ /* Check for io memory page */
+ if ( is_iomem_page(mfn_x(mfn)) )
+ goto out;
+
+ /* Check page count and type */
+ page = mfn_to_page(mfn);
+ if ( (page->count_info & (PGC_count_mask | PGC_allocated)) !=
+ (1 | PGC_allocated) )
+ goto out;
+
+ if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_none )
+ goto out;
+
+ /* Fix p2m entry */
+ p2m_lock(d->arch.p2m);
+ set_p2m_entry(d, gfn, mfn, 0, p2m_ram_paging_out);
+ p2m_unlock(d->arch.p2m);
+
+ ret = 0;
+
+ out:
+ return ret;
+}
+
+int p2m_mem_paging_evict(struct domain *d, unsigned long gfn)
+{
+ struct page_info *page;
+ p2m_type_t p2mt;
+ mfn_t mfn;
+
+ /* Get mfn */
+ mfn = gfn_to_mfn(d, gfn, &p2mt);
+ if ( unlikely(!mfn_valid(mfn)) )
+ return -EINVAL;
+
+ if ( (p2mt == p2m_ram_paged) || (p2mt == p2m_ram_paging_in) ||
+ (p2mt == p2m_ram_paging_in_start) )
+ return -EINVAL;
+
+ /* Get the page so it doesn't get modified under Xen's feet */
+ page = mfn_to_page(mfn);
+ if ( unlikely(!get_page(page, d)) )
+ return -EINVAL;
+
+ /* Decrement guest domain's ref count of the page */
+ if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
+ put_page(page);
+
+ /* Remove mapping from p2m table */
+ p2m_lock(d->arch.p2m);
+ set_p2m_entry(d, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paged);
+ p2m_unlock(d->arch.p2m);
+
+ /* Put the page back so it gets freed */
+ put_page(page);
+
+ return 0;
+}
+
+void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
+{
+ struct vcpu *v = current;
+ mem_event_request_t req;
+ p2m_type_t p2mt;
+
+ memset(&req, 0, sizeof(req));
+
+ /* Check that there's space on the ring for this request */
+ if ( mem_event_check_ring(d) )
+ return;
+
+ /* Fix p2m mapping */
+ /* XXX: It seems inefficient to have this here, as it's only needed
+ * in one case (ept guest accessing paging out page) */
+ gfn_to_mfn(d, gfn, &p2mt);
+ if ( p2mt != p2m_ram_paging_out )
+ {
+ p2m_lock(d->arch.p2m);
+ set_p2m_entry(d, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paging_in_start);
+ p2m_unlock(d->arch.p2m);
+ }
+
+ /* Pause domain */
+ if ( v->domain->domain_id == d->domain_id )
+ {
+ vcpu_pause_nosync(v);
+ req.flags |= MEM_EVENT_FLAG_PAUSED;
+ }
+
+ /* Send request to pager */
+ req.gfn = gfn;
+ req.p2mt = p2mt;
+ req.vcpu_id = v->vcpu_id;
+
+ mem_event_put_request(d, &req);
+}
+
+int p2m_mem_paging_prep(struct domain *d, unsigned long gfn)
+{
+ struct page_info *page;
+
+ /* Get a free page */
+ page = alloc_domheap_page(d, 0);
+ if ( unlikely(page == NULL) )
+ return -EINVAL;
+
+ /* Fix p2m mapping */
+ p2m_lock(d->arch.p2m);
+ set_p2m_entry(d, gfn, page_to_mfn(page), 0, p2m_ram_paging_in);
+ p2m_unlock(d->arch.p2m);
+
+ return 0;
+}
+
+void p2m_mem_paging_resume(struct domain *d)
+{
+ mem_event_response_t rsp;
+ p2m_type_t p2mt;
+ mfn_t mfn;
+
+ /* Pull the response off the ring */
+ mem_event_get_response(d, &rsp);
+
+ /* Fix p2m entry */
+ mfn = gfn_to_mfn(d, rsp.gfn, &p2mt);
+ p2m_lock(d->arch.p2m);
+ set_p2m_entry(d, rsp.gfn, mfn, 0, p2m_ram_rw);
+ p2m_unlock(d->arch.p2m);
+
+ /* Unpause domain */
+ if ( rsp.flags & MEM_EVENT_FLAG_PAUSED )
+ vcpu_unpause(d->vcpu[rsp.vcpu_id]);
+
+ /* Unpause any domains that were paused because the ring was full */
+ mem_event_unpause_vcpus(d);
+}
+
+
/*
* Local variables:
* mode: C
#ifdef's everywhere else. */
p2m_grant_map_rw = 7, /* Read/write grant mapping */
p2m_grant_map_ro = 8, /* Read-only grant mapping */
+
+ p2m_ram_paging_out = 9, /* Memory that is being paged out */
+ p2m_ram_paged = 10, /* Memory that has been paged out */
+ p2m_ram_paging_in = 11, /* Memory that is being paged in */
+ p2m_ram_paging_in_start = 12, /* Memory that is being paged in */
} p2m_type_t;
typedef enum {
#define p2m_to_mask(_t) (1UL << (_t))
/* RAM types, which map to real machine frames */
-#define P2M_RAM_TYPES (p2m_to_mask(p2m_ram_rw) \
- | p2m_to_mask(p2m_ram_logdirty) \
- | p2m_to_mask(p2m_ram_ro))
+#define P2M_RAM_TYPES (p2m_to_mask(p2m_ram_rw) \
+ | p2m_to_mask(p2m_ram_logdirty) \
+ | p2m_to_mask(p2m_ram_ro) \
+ | p2m_to_mask(p2m_ram_paging_out) \
+ | p2m_to_mask(p2m_ram_paged) \
+ | p2m_to_mask(p2m_ram_paging_in_start) \
+ | p2m_to_mask(p2m_ram_paging_in))
/* Grant mapping types, which map to a real machine frame in another
* VM */
#define P2M_MAGIC_TYPES (p2m_to_mask(p2m_populate_on_demand))
+/* Pageable types */
+#define P2M_PAGEABLE_TYPES (p2m_to_mask(p2m_ram_rw))
+
+#define P2M_PAGING_TYPES (p2m_to_mask(p2m_ram_paging_out) \
+ | p2m_to_mask(p2m_ram_paged) \
+ | p2m_to_mask(p2m_ram_paging_in_start) \
+ | p2m_to_mask(p2m_ram_paging_in))
+
+#define P2M_PAGED_TYPES (p2m_to_mask(p2m_ram_paged))
+
/* Useful predicates */
#define p2m_is_ram(_t) (p2m_to_mask(_t) & P2M_RAM_TYPES)
#define p2m_is_mmio(_t) (p2m_to_mask(_t) & P2M_MMIO_TYPES)
implementations, there's no way of synchronising against that. */
#define p2m_is_valid(_t) (p2m_to_mask(_t) & (P2M_RAM_TYPES | P2M_MMIO_TYPES))
#define p2m_has_emt(_t) (p2m_to_mask(_t) & (P2M_RAM_TYPES | p2m_to_mask(p2m_mmio_direct)))
+#define p2m_is_pageable(_t) (p2m_to_mask(_t) & P2M_PAGEABLE_TYPES)
+#define p2m_is_paging(_t) (p2m_to_mask(_t) & P2M_PAGING_TYPES)
+#define p2m_is_paged(_t) (p2m_to_mask(_t) & P2M_PAGED_TYPES)
/* Populate-on-demand */
#define POPULATE_ON_DEMAND_MFN (1<<9)
#define POD_PAGE_ORDER 9
+#define PAGING_MFN INVALID_MFN
struct p2m_domain {
/* Lock that protects updates to the p2m */
int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
+/* Check if a nominated gfn is valid to be paged out */
+int p2m_mem_paging_nominate(struct domain *d, unsigned long gfn);
+/* Evict a frame */
+int p2m_mem_paging_evict(struct domain *d, unsigned long gfn);
+/* Start populating a paged out frame */
+void p2m_mem_paging_populate(struct domain *d, unsigned long gfn);
+/* Prepare the p2m for paging a frame in */
+int p2m_mem_paging_prep(struct domain *d, unsigned long gfn);
+/* Resume normal operation (in case a domain was paused) */
+void p2m_mem_paging_resume(struct domain *d);
+
#endif /* _XEN_P2M_H */
/*