unsigned int op, uint64_t gfn, void *buffer)
{
xen_mem_paging_op_t mpo;
+ DECLARE_HYPERCALL_BOUNCE(buffer, XC_PAGE_SIZE,
+ XC_HYPERCALL_BUFFER_BOUNCE_IN);
+ int rc;
memset(&mpo, 0, sizeof(mpo));
mpo.op = op;
mpo.domain = domain_id;
mpo.gfn = gfn;
- mpo.buffer = (unsigned long) buffer;
- return do_memory_op(xch, XENMEM_paging_op, &mpo, sizeof(mpo));
+ if ( buffer )
+ {
+ if ( xc_hypercall_bounce_pre(xch, buffer) )
+ {
+ PERROR("Could not bounce memory for XENMEM_paging_op %u", op);
+ return -1;
+ }
+
+ set_xen_guest_handle(mpo.buffer, buffer);
+ }
+
+ rc = do_memory_op(xch, XENMEM_paging_op, &mpo, sizeof(mpo));
+
+ if ( buffer )
+ xc_hypercall_bounce_post(xch, buffer);
+
+ return rc;
}
int xc_mem_paging_enable(xc_interface *xch, uint32_t domain_id,
int xc_mem_paging_load(xc_interface *xch, uint32_t domain_id,
uint64_t gfn, void *buffer)
{
- int rc, old_errno;
-
errno = EINVAL;
if ( !buffer )
return -1;
- if ( ((unsigned long) buffer) & (XC_PAGE_SIZE - 1) )
- return -1;
-
- if ( mlock(buffer, XC_PAGE_SIZE) )
- return -1;
-
- rc = xc_mem_paging_memop(xch, domain_id,
- XENMEM_paging_op_prep,
- gfn, buffer);
-
- old_errno = errno;
- munlock(buffer, XC_PAGE_SIZE);
- errno = old_errno;
-
- return rc;
+ return xc_mem_paging_memop(xch, domain_id, XENMEM_paging_op_prep,
+ gfn, buffer);
}
* mfn if populate was called for gfn which was nominated but not evicted. In
* this case only the p2mt needs to be forwarded.
*/
-int p2m_mem_paging_prep(struct domain *d, unsigned long gfn_l, uint64_t buffer)
+int p2m_mem_paging_prep(struct domain *d, unsigned long gfn_l,
+ XEN_GUEST_HANDLE_64(const_uint8) buffer)
{
struct page_info *page = NULL;
p2m_type_t p2mt;
mfn_t mfn;
struct p2m_domain *p2m = p2m_get_hostp2m(d);
int ret, page_extant = 1;
- const void *user_ptr = (const void *) buffer;
- if ( user_ptr )
- /* Sanity check the buffer and bail out early if trouble */
- if ( (buffer & (PAGE_SIZE - 1)) ||
- (!access_ok(user_ptr, PAGE_SIZE)) )
- return -EINVAL;
+ if ( !guest_handle_okay(buffer, PAGE_SIZE) )
+ return -EINVAL;
gfn_lock(p2m, gfn, 0);
/* If the user did not provide a buffer, we disallow */
ret = -EINVAL;
- if ( unlikely(user_ptr == NULL) )
+ if ( unlikely(guest_handle_is_null(buffer)) )
goto out;
/* Get a free page */
ret = -ENOMEM;
page_extant = 0;
guest_map = map_domain_page(mfn);
- ret = copy_from_user(guest_map, user_ptr, PAGE_SIZE);
+ ret = copy_from_guest(guest_map, buffer, PAGE_SIZE);
unmap_domain_page(guest_map);
if ( ret )
{
/* Start populating a paged out frame */
void p2m_mem_paging_populate(struct domain *d, unsigned long gfn);
/* Prepare the p2m for paging a frame in */
-int p2m_mem_paging_prep(struct domain *d, unsigned long gfn, uint64_t buffer);
+int p2m_mem_paging_prep(struct domain *d, unsigned long gfn,
+ XEN_GUEST_HANDLE_64(const_uint8) buffer);
/* Resume normal operation (in case a domain was paused) */
struct vm_event_st;
void p2m_mem_paging_resume(struct domain *d, struct vm_event_st *rsp);
uint8_t op; /* XENMEM_paging_op_* */
domid_t domain;
- /* PAGING_PREP IN: buffer to immediately fill page in */
- uint64_aligned_t buffer;
- /* Other OPs */
- uint64_aligned_t gfn; /* IN: gfn of page being operated on */
+ /* IN: (XENMEM_paging_op_prep) buffer to immediately fill page from */
+ XEN_GUEST_HANDLE_64(const_uint8) buffer;
+ /* IN: gfn of page being operated on */
+ uint64_aligned_t gfn;
};
typedef struct xen_mem_paging_op xen_mem_paging_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_mem_paging_op_t);