unsigned long raw_copy_to_guest(void *to, const void *from, unsigned len)
{
/* XXX needs to handle faults */
- unsigned offset = ((unsigned long)to & ~PAGE_MASK);
+ unsigned offset = (vaddr_t)to & ~PAGE_MASK;
while ( len )
{
void *p;
unsigned size = min(len, (unsigned)PAGE_SIZE - offset);
- rc = gvirt_to_maddr((uint32_t) to, &g);
+ rc = gvirt_to_maddr((vaddr_t) to, &g);
if ( rc )
return rc;
unsigned long raw_clear_guest(void *to, unsigned len)
{
/* XXX needs to handle faults */
- unsigned offset = ((unsigned long)to & ~PAGE_MASK);
+ unsigned offset = (vaddr_t)to & ~PAGE_MASK;
while ( len )
{
void *p;
unsigned size = min(len, (unsigned)PAGE_SIZE - offset);
- rc = gvirt_to_maddr((uint32_t) to, &g);
+ rc = gvirt_to_maddr((vaddr_t) to, &g);
if ( rc )
return rc;
unsigned long raw_copy_from_guest(void *to, const void __user *from, unsigned len)
{
+ unsigned offset = (vaddr_t)from & ~PAGE_MASK;
+
while ( len )
{
int rc;
paddr_t g;
void *p;
- unsigned size = min(len, (unsigned)(PAGE_SIZE - ((unsigned)from & (~PAGE_MASK))));
+ unsigned size = min(len, (unsigned)(PAGE_SIZE - offset));
- rc = gvirt_to_maddr((uint32_t) from & PAGE_MASK, &g);
+ rc = gvirt_to_maddr((vaddr_t) from & PAGE_MASK, &g);
if ( rc )
return rc;
p = map_domain_page(g>>PAGE_SHIFT);
- p += ((unsigned long)from & (~PAGE_MASK));
+ p += ((vaddr_t)from & (~PAGE_MASK));
memcpy(to, p, size);
static inline paddr_t virt_to_maddr(const void *va)
{
- uint64_t par = va_to_par((uint32_t)va);
- return (par & PADDR_MASK & PAGE_MASK) | ((unsigned long) va & ~PAGE_MASK);
+ uint64_t par = va_to_par((vaddr_t)va);
+ return (par & PADDR_MASK & PAGE_MASK) | ((vaddr_t) va & ~PAGE_MASK);
}
static inline void *maddr_to_virt(paddr_t ma)
return (void *)(unsigned long) ma + XENHEAP_VIRT_START;
}
-static inline int gvirt_to_maddr(uint32_t va, paddr_t *pa)
+static inline int gvirt_to_maddr(vaddr_t va, paddr_t *pa)
{
uint64_t par = gva_to_ma_par(va);
if ( par & PAR_F )