#ifdef CONFIG_COMPAT
-int setup_arg_xlat_area(struct vcpu *v, l4_pgentry_t *l4tab)
-{
- struct domain *d = v->domain;
- unsigned i;
- struct page_info *pg;
-
- if ( !d->arch.mm_arg_xlat_l3 )
- {
- pg = alloc_domheap_page(NULL, 0);
- if ( !pg )
- return -ENOMEM;
- d->arch.mm_arg_xlat_l3 = page_to_virt(pg);
- clear_page(d->arch.mm_arg_xlat_l3);
- }
-
- l4tab[l4_table_offset(COMPAT_ARG_XLAT_VIRT_BASE)] =
- l4e_from_paddr(__pa(d->arch.mm_arg_xlat_l3), __PAGE_HYPERVISOR);
-
- for ( i = 0; i < COMPAT_ARG_XLAT_PAGES; ++i )
- {
- unsigned long va = COMPAT_ARG_XLAT_VIRT_START(v->vcpu_id) + i * PAGE_SIZE;
- l2_pgentry_t *l2tab;
- l1_pgentry_t *l1tab;
-
- if ( !l3e_get_intpte(d->arch.mm_arg_xlat_l3[l3_table_offset(va)]) )
- {
- pg = alloc_domheap_page(NULL, 0);
- if ( !pg )
- return -ENOMEM;
- clear_page(page_to_virt(pg));
- d->arch.mm_arg_xlat_l3[l3_table_offset(va)] = l3e_from_page(pg, __PAGE_HYPERVISOR);
- }
- l2tab = l3e_to_l2e(d->arch.mm_arg_xlat_l3[l3_table_offset(va)]);
- if ( !l2e_get_intpte(l2tab[l2_table_offset(va)]) )
- {
- pg = alloc_domheap_page(NULL, 0);
- if ( !pg )
- return -ENOMEM;
- clear_page(page_to_virt(pg));
- l2tab[l2_table_offset(va)] = l2e_from_page(pg, __PAGE_HYPERVISOR);
- }
- l1tab = l2e_to_l1e(l2tab[l2_table_offset(va)]);
- BUG_ON(l1e_get_intpte(l1tab[l1_table_offset(va)]));
- pg = alloc_domheap_page(NULL, 0);
- if ( !pg )
- return -ENOMEM;
- l1tab[l1_table_offset(va)] = l1e_from_page(pg, PAGE_HYPERVISOR);
- }
-
- return 0;
-}
-
-static void release_arg_xlat_area(struct domain *d)
-{
- if ( d->arch.mm_arg_xlat_l3 )
- {
- unsigned l3;
-
- for ( l3 = 0; l3 < L3_PAGETABLE_ENTRIES; ++l3 )
- {
- if ( l3e_get_intpte(d->arch.mm_arg_xlat_l3[l3]) )
- {
- l2_pgentry_t *l2tab = l3e_to_l2e(d->arch.mm_arg_xlat_l3[l3]);
- unsigned l2;
-
- for ( l2 = 0; l2 < L2_PAGETABLE_ENTRIES; ++l2 )
- {
- if ( l2e_get_intpte(l2tab[l2]) )
- {
- l1_pgentry_t *l1tab = l2e_to_l1e(l2tab[l2]);
- unsigned l1;
-
- for ( l1 = 0; l1 < L1_PAGETABLE_ENTRIES; ++l1 )
- {
- if ( l1e_get_intpte(l1tab[l1]) )
- free_domheap_page(l1e_get_page(l1tab[l1]));
- }
- free_domheap_page(l2e_get_page(l2tab[l2]));
- }
- }
- free_domheap_page(l3e_get_page(d->arch.mm_arg_xlat_l3[l3]));
- }
- }
- free_domheap_page(virt_to_page(d->arch.mm_arg_xlat_l3));
- }
-}
-
static int setup_compat_l4(struct vcpu *v)
{
struct page_info *pg = alloc_domheap_page(NULL, 0);
l4_pgentry_t *l4tab;
- int rc;
if ( pg == NULL )
return -ENOMEM;
l4e_from_paddr(__pa(v->domain->arch.mm_perdomain_l3),
__PAGE_HYPERVISOR);
- if ( (rc = setup_arg_xlat_area(v, l4tab)) < 0 )
- {
- free_domheap_page(pg);
- return rc;
- }
-
v->arch.guest_table = pagetable_from_page(pg);
v->arch.guest_table_user = v->arch.guest_table;
return 0;
d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
- release_arg_xlat_area(d);
/* switch gdt */
gdt_l1e = l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
undo_and_fail:
d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
- release_arg_xlat_area(d);
gdt_l1e = l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
while ( vcpuid-- != 0 )
{
}
#else
-#define release_arg_xlat_area(d) ((void)0)
#define setup_compat_l4(v) 0
#define release_compat_l4(v) ((void)0)
#endif
free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3));
#endif
- if ( is_pv_32on64_domain(d) )
- release_arg_xlat_area(d);
-
free_xenheap_page(d->shared_info);
}
l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
if ( is_pv_32on64_domain(d) )
- {
v->arch.guest_table_user = v->arch.guest_table;
- if ( setup_arg_xlat_area(v, l4start) < 0 )
- panic("Not enough RAM for domain 0 hypercall argument translation.\n");
- }
l4tab += l4_table_offset(v_start);
mfn = alloc_spfn;
PFEC_page_present | pfec);
}
-DEFINE_PER_CPU(int, guest_handles_in_xen_space);
+#ifdef __x86_64__
+DEFINE_PER_CPU(bool_t, hvm_64bit_hcall);
+#endif
-unsigned long copy_to_user_hvm(void *to, const void *from, unsigned len)
+unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len)
{
int rc;
- if ( this_cpu(guest_handles_in_xen_space) )
+#ifdef __x86_64__
+ if ( !this_cpu(hvm_64bit_hcall) && is_compat_arg_xlat_range(to, len) )
{
memcpy(to, from, len);
return 0;
}
+#endif
rc = hvm_copy_to_guest_virt_nofault((unsigned long)to, (void *)from,
len, 0);
{
int rc;
- if ( this_cpu(guest_handles_in_xen_space) )
+#ifdef __x86_64__
+ if ( !this_cpu(hvm_64bit_hcall) && is_compat_arg_xlat_range(from, len) )
{
memcpy(to, from, len);
return 0;
}
+#endif
rc = hvm_copy_from_guest_virt_nofault(to, (unsigned long)from, len, 0);
return rc ? len : 0; /* fake a copy_from_user() return code */
uint32_t idx;
uint32_t gpfn;
} u;
- struct xen_add_to_physmap h;
+ struct xen_add_to_physmap *h = (void *)COMPAT_ARG_XLAT_VIRT_BASE;
if ( copy_from_guest(&u, arg, 1) )
return -EFAULT;
- h.domid = u.domid;
- h.space = u.space;
- h.idx = u.idx;
- h.gpfn = u.gpfn;
-
- this_cpu(guest_handles_in_xen_space) = 1;
- rc = hvm_memory_op(cmd, guest_handle_from_ptr(&h, void));
- this_cpu(guest_handles_in_xen_space) = 0;
+ h->domid = u.domid;
+ h->space = u.space;
+ h->idx = u.idx;
+ h->gpfn = u.gpfn;
+ rc = hvm_memory_op(cmd, guest_handle_from_ptr(h, void));
break;
}
switch ( mode )
{
#ifdef __x86_64__
- case 8:
+ case 8:
#endif
case 4:
case 2:
HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%u(%lx, %lx, %lx, %lx, %lx)", eax,
regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8);
+ this_cpu(hvm_64bit_hcall) = 1;
regs->rax = hvm_hypercall64_table[eax](regs->rdi,
regs->rsi,
regs->rdx,
regs->r10,
- regs->r8);
+ regs->r8);
+ this_cpu(hvm_64bit_hcall) = 0;
}
else
#endif
pl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
l4e_from_page(virt_to_page(d->arch.mm_perdomain_l3),
__PAGE_HYPERVISOR);
- if ( is_pv_32on64_domain(d) )
- pl4e[l4_table_offset(COMPAT_ARG_XLAT_VIRT_BASE)] =
- l4e_from_page(virt_to_page(d->arch.mm_arg_xlat_l3),
- __PAGE_HYPERVISOR);
return 1;
perfc_incr(calls_to_update_va);
- if ( unlikely(!__addr_ok(va) && !paging_mode_external(d)) )
+ if ( unlikely(!access_ok(va, 1) && !paging_mode_external(d)) )
return -EINVAL;
rc = xsm_update_va_mapping(current->domain, val);
__PAGE_HYPERVISOR);
}
- if ( is_pv_32on64_domain(v->domain) )
- {
- /* install compat arg xlat entry */
- sl4e[shadow_l4_table_offset(COMPAT_ARG_XLAT_VIRT_BASE)] =
- shadow_l4e_from_mfn(
- page_to_mfn(virt_to_page(d->arch.mm_arg_xlat_l3)),
- __PAGE_HYPERVISOR);
- }
-
sh_unmap_domain_page(sl4e);
}
#endif
case XENMEM_add_to_physmap:
{
struct compat_add_to_physmap cmp;
- struct xen_add_to_physmap *nat = (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id);
+ struct xen_add_to_physmap *nat = (void *)COMPAT_ARG_XLAT_VIRT_BASE;
if ( copy_from_guest(&cmp, arg, 1) )
return -EFAULT;
case XENMEM_set_memory_map:
{
struct compat_foreign_memory_map cmp;
- struct xen_foreign_memory_map *nat = (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id);
+ struct xen_foreign_memory_map *nat = (void *)COMPAT_ARG_XLAT_VIRT_BASE;
if ( copy_from_guest(&cmp, arg, 1) )
return -EFAULT;
case XENMEM_machine_memory_map:
{
struct compat_memory_map cmp;
- struct xen_memory_map *nat = (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id);
+ struct xen_memory_map *nat = (void *)COMPAT_ARG_XLAT_VIRT_BASE;
if ( copy_from_guest(&cmp, arg, 1) )
return -EFAULT;
if ( unlikely(!guest_handle_okay(cmp_uops, count)) )
return -EFAULT;
- set_xen_guest_handle(nat_ops, (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id));
+ set_xen_guest_handle(nat_ops, (void *)COMPAT_ARG_XLAT_VIRT_BASE);
for ( ; count; count -= i )
{
DEFINE_XEN_GUEST_HANDLE(compat_processor_csd_t);
DEFINE_XEN_GUEST_HANDLE(compat_processor_cx_t);
-#define xlat_page_start COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id)
+#define xlat_page_start ((unsigned long)COMPAT_ARG_XLAT_VIRT_BASE)
#define xlat_page_size COMPAT_ARG_XLAT_SIZE
#define xlat_page_left_size(xlat_page_current) \
(xlat_page_start + xlat_page_size - xlat_page_current)
unsigned int m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START;
#endif
+DEFINE_PER_CPU(char, compat_arg_xlat[COMPAT_ARG_XLAT_SIZE]);
+
/* Top-level master (and idle-domain) page directory. */
l4_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
idle_pg_table[L4_PAGETABLE_ENTRIES];
if ( copy_from_guest(&cmp, arg, 1) )
return -EFAULT;
- nat = (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id);
+ nat = (void *)COMPAT_ARG_XLAT_VIRT_BASE;
XLAT_vcpu_set_singleshot_timer(nat, &cmp);
rc = do_vcpu_op(cmd, vcpuid, guest_handle_from_ptr(nat, void));
break;
struct compat_gnttab_copy copy;
} cmp;
- set_xen_guest_handle(nat.uop, (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id));
+ set_xen_guest_handle(nat.uop, (void *)COMPAT_ARG_XLAT_VIRT_BASE);
switch ( cmd )
{
case GNTTABOP_setup_table:
struct compat_translate_gpfn_list xlat;
} cmp;
- set_xen_guest_handle(nat.hnd, (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id));
+ set_xen_guest_handle(nat.hnd, (void *)COMPAT_ARG_XLAT_VIRT_BASE);
split = 0;
switch ( op )
{
#endif
-#define COMPAT_ARG_XLAT_VIRT_BASE (1UL << ROOT_PAGETABLE_SHIFT)
-#define COMPAT_ARG_XLAT_SHIFT 0
-#define COMPAT_ARG_XLAT_PAGES (1U << COMPAT_ARG_XLAT_SHIFT)
-#define COMPAT_ARG_XLAT_SIZE (COMPAT_ARG_XLAT_PAGES << PAGE_SHIFT)
-#define COMPAT_ARG_XLAT_VIRT_START(vcpu_id) \
- (COMPAT_ARG_XLAT_VIRT_BASE + ((unsigned long)(vcpu_id) << \
- (PAGE_SHIFT + COMPAT_ARG_XLAT_SHIFT + 1)))
-
#define PGT_base_page_table PGT_l4_page_table
#define __HYPERVISOR_CS64 0xe008
#ifdef CONFIG_COMPAT
unsigned int hv_compat_vstart;
- l3_pgentry_t *mm_arg_xlat_l3;
#endif
/* I/O-port admin-specified access capabilities. */
#define __ASM_X86_HVM_GUEST_ACCESS_H__
#include <xen/percpu.h>
-DECLARE_PER_CPU(int, guest_handles_in_xen_space);
+DECLARE_PER_CPU(bool_t, hvm_64bit_hcall);
unsigned long copy_to_user_hvm(void *to, const void *from, unsigned len);
unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len);
int map_ldt_shadow_page(unsigned int);
#ifdef CONFIG_COMPAT
-int setup_arg_xlat_area(struct vcpu *, l4_pgentry_t *);
unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits);
#else
-# define setup_arg_xlat_area(vcpu, l4tab) 0
# define domain_clamp_alloc_bitsize(d, b) (b)
#endif
#ifndef __X86_PERCPU_H__
#define __X86_PERCPU_H__
-#define PERCPU_SHIFT 12
+#define PERCPU_SHIFT 13
#define PERCPU_SIZE (1UL << PERCPU_SHIFT)
/* Separate out the type, so (int[3], foo) works. */
({ \
long __pu_err = -EFAULT; \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- if (__addr_ok(__pu_addr)) \
+ if (access_ok(__pu_addr,size)) \
__put_user_size((x),__pu_addr,(size),__pu_err,-EFAULT); \
__pu_err; \
})
long __gu_err; \
__typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__get_user_size((x),__gu_addr,(size),__gu_err,-EFAULT); \
- if (!__addr_ok(__gu_addr)) __gu_err = -EFAULT; \
+ if (!access_ok(__gu_addr,size)) __gu_err = -EFAULT; \
__gu_err; \
})
#ifndef __i386_UACCESS_H
#define __i386_UACCESS_H
-#define __addr_ok(addr) ((unsigned long)(addr) < HYPERVISOR_VIRT_START)
-
/*
* Test whether a block of memory is a valid user space address.
* Returns 0 if the range is valid, nonzero otherwise.
#ifndef __X86_64_UACCESS_H
#define __X86_64_UACCESS_H
+#define COMPAT_ARG_XLAT_VIRT_BASE this_cpu(compat_arg_xlat)
+#define COMPAT_ARG_XLAT_SIZE PAGE_SIZE
+DECLARE_PER_CPU(char, compat_arg_xlat[COMPAT_ARG_XLAT_SIZE]);
+#define is_compat_arg_xlat_range(addr, size) ({ \
+ unsigned long __off; \
+ __off = (unsigned long)(addr) - (unsigned long)COMPAT_ARG_XLAT_VIRT_BASE; \
+ (__off | (__off + (unsigned long)(size))) <= PAGE_SIZE; \
+})
+
/*
* Valid if in +ve half of 48-bit address space, or above Xen-reserved area.
* This is also valid for range checks (addr, addr+size). As long as the
(((unsigned long)(addr) < (1UL<<48)) || \
((unsigned long)(addr) >= HYPERVISOR_VIRT_END))
-#define access_ok(addr, size) (__addr_ok(addr))
-
-#define array_access_ok(addr, count, size) (__addr_ok(addr))
+#define access_ok(addr, size) \
+ (__addr_ok(addr) || is_compat_arg_xlat_range(addr, size))
-#ifdef CONFIG_COMPAT
+#define array_access_ok(addr, count, size) \
+ (access_ok(addr, (count)*(size)))
#define __compat_addr_ok(addr) \
((unsigned long)(addr) < HYPERVISOR_COMPAT_VIRT_START(current->domain))
(likely((count) < (~0U / (size))) && \
compat_access_ok(addr, (count) * (size)))
-#endif
-
#define __put_user_size(x,ptr,size,retval,errret) \
do { \
retval = 0; \