32-on-64: Clean up and unify compat_arg_xlat_area handling.
authorKeir Fraser <keir.fraser@citrix.com>
Thu, 12 Jun 2008 14:22:35 +0000 (15:22 +0100)
committerKeir Fraser <keir.fraser@citrix.com>
Thu, 12 Jun 2008 14:22:35 +0000 (15:22 +0100)
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
19 files changed:
xen/arch/x86/domain.c
xen/arch/x86/domain_build.c
xen/arch/x86/hvm/hvm.c
xen/arch/x86/mm.c
xen/arch/x86/mm/shadow/multi.c
xen/arch/x86/x86_64/compat/mm.c
xen/arch/x86/x86_64/cpu_idle.c
xen/arch/x86/x86_64/mm.c
xen/common/compat/domain.c
xen/common/compat/grant_table.c
xen/common/compat/memory.c
xen/include/asm-x86/config.h
xen/include/asm-x86/domain.h
xen/include/asm-x86/hvm/guest_access.h
xen/include/asm-x86/mm.h
xen/include/asm-x86/percpu.h
xen/include/asm-x86/uaccess.h
xen/include/asm-x86/x86_32/uaccess.h
xen/include/asm-x86/x86_64/uaccess.h

index 3483efa8670efb13da85c8dc9c9be209561b71e9..6bc9ccc48dc912de80b5b9e6ee9fcc91a1de3837 100644 (file)
@@ -165,98 +165,10 @@ void free_vcpu_struct(struct vcpu *v)
 
 #ifdef CONFIG_COMPAT
 
-int setup_arg_xlat_area(struct vcpu *v, l4_pgentry_t *l4tab)
-{
-    struct domain *d = v->domain;
-    unsigned i;
-    struct page_info *pg;
-
-    if ( !d->arch.mm_arg_xlat_l3 )
-    {
-        pg = alloc_domheap_page(NULL, 0);
-        if ( !pg )
-            return -ENOMEM;
-        d->arch.mm_arg_xlat_l3 = page_to_virt(pg);
-        clear_page(d->arch.mm_arg_xlat_l3);
-    }
-
-    l4tab[l4_table_offset(COMPAT_ARG_XLAT_VIRT_BASE)] =
-        l4e_from_paddr(__pa(d->arch.mm_arg_xlat_l3), __PAGE_HYPERVISOR);
-
-    for ( i = 0; i < COMPAT_ARG_XLAT_PAGES; ++i )
-    {
-        unsigned long va = COMPAT_ARG_XLAT_VIRT_START(v->vcpu_id) + i * PAGE_SIZE;
-        l2_pgentry_t *l2tab;
-        l1_pgentry_t *l1tab;
-
-        if ( !l3e_get_intpte(d->arch.mm_arg_xlat_l3[l3_table_offset(va)]) )
-        {
-            pg = alloc_domheap_page(NULL, 0);
-            if ( !pg )
-                return -ENOMEM;
-            clear_page(page_to_virt(pg));
-            d->arch.mm_arg_xlat_l3[l3_table_offset(va)] = l3e_from_page(pg, __PAGE_HYPERVISOR);
-        }
-        l2tab = l3e_to_l2e(d->arch.mm_arg_xlat_l3[l3_table_offset(va)]);
-        if ( !l2e_get_intpte(l2tab[l2_table_offset(va)]) )
-        {
-            pg = alloc_domheap_page(NULL, 0);
-            if ( !pg )
-                return -ENOMEM;
-            clear_page(page_to_virt(pg));
-            l2tab[l2_table_offset(va)] = l2e_from_page(pg, __PAGE_HYPERVISOR);
-        }
-        l1tab = l2e_to_l1e(l2tab[l2_table_offset(va)]);
-        BUG_ON(l1e_get_intpte(l1tab[l1_table_offset(va)]));
-        pg = alloc_domheap_page(NULL, 0);
-        if ( !pg )
-            return -ENOMEM;
-        l1tab[l1_table_offset(va)] = l1e_from_page(pg, PAGE_HYPERVISOR);
-    }
-
-    return 0;
-}
-
-static void release_arg_xlat_area(struct domain *d)
-{
-    if ( d->arch.mm_arg_xlat_l3 )
-    {
-        unsigned l3;
-
-        for ( l3 = 0; l3 < L3_PAGETABLE_ENTRIES; ++l3 )
-        {
-            if ( l3e_get_intpte(d->arch.mm_arg_xlat_l3[l3]) )
-            {
-                l2_pgentry_t *l2tab = l3e_to_l2e(d->arch.mm_arg_xlat_l3[l3]);
-                unsigned l2;
-
-                for ( l2 = 0; l2 < L2_PAGETABLE_ENTRIES; ++l2 )
-                {
-                    if ( l2e_get_intpte(l2tab[l2]) )
-                    {
-                        l1_pgentry_t *l1tab = l2e_to_l1e(l2tab[l2]);
-                        unsigned l1;
-
-                        for ( l1 = 0; l1 < L1_PAGETABLE_ENTRIES; ++l1 )
-                        {
-                            if ( l1e_get_intpte(l1tab[l1]) )
-                                free_domheap_page(l1e_get_page(l1tab[l1]));
-                        }
-                        free_domheap_page(l2e_get_page(l2tab[l2]));
-                    }
-                }
-                free_domheap_page(l3e_get_page(d->arch.mm_arg_xlat_l3[l3]));
-            }
-        }
-        free_domheap_page(virt_to_page(d->arch.mm_arg_xlat_l3));
-    }
-}
-
 static int setup_compat_l4(struct vcpu *v)
 {
     struct page_info *pg = alloc_domheap_page(NULL, 0);
     l4_pgentry_t *l4tab;
-    int rc;
 
     if ( pg == NULL )
         return -ENOMEM;
@@ -272,12 +184,6 @@ static int setup_compat_l4(struct vcpu *v)
         l4e_from_paddr(__pa(v->domain->arch.mm_perdomain_l3),
                        __PAGE_HYPERVISOR);
 
-    if ( (rc = setup_arg_xlat_area(v, l4tab)) < 0 )
-    {
-        free_domheap_page(pg);
-        return rc;
-    }
-
     v->arch.guest_table = pagetable_from_page(pg);
     v->arch.guest_table_user = v->arch.guest_table;
 
@@ -309,7 +215,6 @@ int switch_native(struct domain *d)
         return 0;
 
     d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
-    release_arg_xlat_area(d);
 
     /* switch gdt */
     gdt_l1e = l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
@@ -359,7 +264,6 @@ int switch_compat(struct domain *d)
 
  undo_and_fail:
     d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
-    release_arg_xlat_area(d);
     gdt_l1e = l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
     while ( vcpuid-- != 0 )
     {
@@ -372,7 +276,6 @@ int switch_compat(struct domain *d)
 }
 
 #else
-#define release_arg_xlat_area(d) ((void)0)
 #define setup_compat_l4(v) 0
 #define release_compat_l4(v) ((void)0)
 #endif
@@ -585,9 +488,6 @@ void arch_domain_destroy(struct domain *d)
     free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3));
 #endif
 
-    if ( is_pv_32on64_domain(d) )
-        release_arg_xlat_area(d);
-
     free_xenheap_page(d->shared_info);
 }
 
index e602b273f39b6ee0c56f481132130faf5b308cba..bf990a5ec619b5774a93391ba048bf1866364dda 100644 (file)
@@ -592,11 +592,7 @@ int __init construct_dom0(
         l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
     v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
     if ( is_pv_32on64_domain(d) )
-    {
         v->arch.guest_table_user = v->arch.guest_table;
-        if ( setup_arg_xlat_area(v, l4start) < 0 )
-            panic("Not enough RAM for domain 0 hypercall argument translation.\n");
-    }
 
     l4tab += l4_table_offset(v_start);
     mfn = alloc_spfn;
index d6c56d8efeef01e68b49fb8cd8b0a8e1d4e2ccb6..9f0f8f42f95c0cb9cfe50f3493dbfc3b6c1e3ff2 100644 (file)
@@ -1571,17 +1571,21 @@ enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
                       PFEC_page_present | pfec);
 }
 
-DEFINE_PER_CPU(int, guest_handles_in_xen_space);
+#ifdef __x86_64__
+DEFINE_PER_CPU(bool_t, hvm_64bit_hcall);
+#endif
 
-unsigned long copy_to_user_hvm(void *to, const void *from, unsigned len)
+unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len)
 {
     int rc;
 
-    if ( this_cpu(guest_handles_in_xen_space) )
+#ifdef __x86_64__
+    if ( !this_cpu(hvm_64bit_hcall) && is_compat_arg_xlat_range(to, len) )
     {
         memcpy(to, from, len);
         return 0;
     }
+#endif
 
     rc = hvm_copy_to_guest_virt_nofault((unsigned long)to, (void *)from,
                                         len, 0);
@@ -1592,11 +1596,13 @@ unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len)
 {
     int rc;
 
-    if ( this_cpu(guest_handles_in_xen_space) )
+#ifdef __x86_64__
+    if ( !this_cpu(hvm_64bit_hcall) && is_compat_arg_xlat_range(from, len) )
     {
         memcpy(to, from, len);
         return 0;
     }
+#endif
 
     rc = hvm_copy_from_guest_virt_nofault(to, (unsigned long)from, len, 0);
     return rc ? len : 0; /* fake a copy_from_user() return code */
@@ -1878,20 +1884,17 @@ static long hvm_memory_op_compat32(int cmd, XEN_GUEST_HANDLE(void) arg)
             uint32_t idx;
             uint32_t gpfn;
         } u;
-        struct xen_add_to_physmap h;
+        struct xen_add_to_physmap *h = (void *)COMPAT_ARG_XLAT_VIRT_BASE;
 
         if ( copy_from_guest(&u, arg, 1) )
             return -EFAULT;
 
-        h.domid = u.domid;
-        h.space = u.space;
-        h.idx = u.idx;
-        h.gpfn = u.gpfn;
-
-        this_cpu(guest_handles_in_xen_space) = 1;
-        rc = hvm_memory_op(cmd, guest_handle_from_ptr(&h, void));
-        this_cpu(guest_handles_in_xen_space) = 0;
+        h->domid = u.domid;
+        h->space = u.space;
+        h->idx = u.idx;
+        h->gpfn = u.gpfn;
 
+        rc = hvm_memory_op(cmd, guest_handle_from_ptr(h, void));
         break;
     }
 
@@ -1934,7 +1937,7 @@ int hvm_do_hypercall(struct cpu_user_regs *regs)
     switch ( mode )
     {
 #ifdef __x86_64__
-    case 8:
+    case 8:        
 #endif
     case 4:
     case 2:
@@ -1963,11 +1966,13 @@ int hvm_do_hypercall(struct cpu_user_regs *regs)
         HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%u(%lx, %lx, %lx, %lx, %lx)", eax,
                     regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8);
 
+        this_cpu(hvm_64bit_hcall) = 1;
         regs->rax = hvm_hypercall64_table[eax](regs->rdi,
                                                regs->rsi,
                                                regs->rdx,
                                                regs->r10,
-                                               regs->r8);
+                                               regs->r8); 
+        this_cpu(hvm_64bit_hcall) = 0;
     }
     else
 #endif
index 1ee784230c3c3711f2c869aeab051cd7278ad5e1..5216f263f1049c9a244c827fc6507a298fb34e4b 100644 (file)
@@ -1253,10 +1253,6 @@ static int alloc_l4_table(struct page_info *page)
     pl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
         l4e_from_page(virt_to_page(d->arch.mm_perdomain_l3),
                       __PAGE_HYPERVISOR);
-    if ( is_pv_32on64_domain(d) )
-        pl4e[l4_table_offset(COMPAT_ARG_XLAT_VIRT_BASE)] =
-            l4e_from_page(virt_to_page(d->arch.mm_arg_xlat_l3),
-                          __PAGE_HYPERVISOR);
 
     return 1;
 
@@ -3008,7 +3004,7 @@ int do_update_va_mapping(unsigned long va, u64 val64,
 
     perfc_incr(calls_to_update_va);
 
-    if ( unlikely(!__addr_ok(va) && !paging_mode_external(d)) )
+    if ( unlikely(!access_ok(va, 1) && !paging_mode_external(d)) )
         return -EINVAL;
 
     rc = xsm_update_va_mapping(current->domain, val);
index deb12887f7d40acf6ec41b37c0625be7c7174e4c..a7e7dc1b22713710b533847d4a96e3a331053c3a 100644 (file)
@@ -1631,15 +1631,6 @@ void sh_install_xen_entries_in_l4(struct vcpu *v, mfn_t gl4mfn, mfn_t sl4mfn)
                                 __PAGE_HYPERVISOR);
     }
 
-    if ( is_pv_32on64_domain(v->domain) )
-    {
-        /* install compat arg xlat entry */
-        sl4e[shadow_l4_table_offset(COMPAT_ARG_XLAT_VIRT_BASE)] =
-            shadow_l4e_from_mfn(
-                    page_to_mfn(virt_to_page(d->arch.mm_arg_xlat_l3)),
-                    __PAGE_HYPERVISOR);
-    }
-
     sh_unmap_domain_page(sl4e);    
 }
 #endif
index a1de1bab27fdaf2056228b3e3638d631f02e9426..96030f924de7eafc5a77503e3d90c36f26a32dfa 100644 (file)
@@ -58,7 +58,7 @@ int compat_arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
     case XENMEM_add_to_physmap:
     {
         struct compat_add_to_physmap cmp;
-        struct xen_add_to_physmap *nat = (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id);
+        struct xen_add_to_physmap *nat = (void *)COMPAT_ARG_XLAT_VIRT_BASE;
 
         if ( copy_from_guest(&cmp, arg, 1) )
             return -EFAULT;
@@ -72,7 +72,7 @@ int compat_arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
     case XENMEM_set_memory_map:
     {
         struct compat_foreign_memory_map cmp;
-        struct xen_foreign_memory_map *nat = (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id);
+        struct xen_foreign_memory_map *nat = (void *)COMPAT_ARG_XLAT_VIRT_BASE;
 
         if ( copy_from_guest(&cmp, arg, 1) )
             return -EFAULT;
@@ -91,7 +91,7 @@ int compat_arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
     case XENMEM_machine_memory_map:
     {
         struct compat_memory_map cmp;
-        struct xen_memory_map *nat = (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id);
+        struct xen_memory_map *nat = (void *)COMPAT_ARG_XLAT_VIRT_BASE;
 
         if ( copy_from_guest(&cmp, arg, 1) )
             return -EFAULT;
@@ -189,7 +189,7 @@ int compat_mmuext_op(XEN_GUEST_HANDLE(mmuext_op_compat_t) cmp_uops,
     if ( unlikely(!guest_handle_okay(cmp_uops, count)) )
         return -EFAULT;
 
-    set_xen_guest_handle(nat_ops, (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id));
+    set_xen_guest_handle(nat_ops, (void *)COMPAT_ARG_XLAT_VIRT_BASE);
 
     for ( ; count; count -= i )
     {
index 7f92f84a013fb16411d0a56d104bba9479b6c638..6718f30a54ddd82fe1cdb6fd67c933ddb1eb8453 100644 (file)
@@ -35,7 +35,7 @@ CHECK_processor_csd;
 DEFINE_XEN_GUEST_HANDLE(compat_processor_csd_t);
 DEFINE_XEN_GUEST_HANDLE(compat_processor_cx_t);
 
-#define xlat_page_start COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id)
+#define xlat_page_start ((unsigned long)COMPAT_ARG_XLAT_VIRT_BASE)
 #define xlat_page_size  COMPAT_ARG_XLAT_SIZE
 #define xlat_page_left_size(xlat_page_current) \
     (xlat_page_start + xlat_page_size - xlat_page_current)
index 624f3bbb12345cd3f6a35d2e5c0144925b84a350..666d2dde7849212ea4106bcea78ccb582c0d7e9f 100644 (file)
@@ -36,6 +36,8 @@
 unsigned int m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START;
 #endif
 
+DEFINE_PER_CPU(char, compat_arg_xlat[COMPAT_ARG_XLAT_SIZE]);
+
 /* Top-level master (and idle-domain) page directory. */
 l4_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
     idle_pg_table[L4_PAGETABLE_ENTRIES];
index fa8e8d99076b6f7c6550c6bf2deb2ac7748b9c4b..cf2a13b4e9871de109d6ffd6b41df0f72d2c7857 100644 (file)
@@ -87,7 +87,7 @@ int compat_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
 
         if ( copy_from_guest(&cmp, arg, 1) )
             return -EFAULT;
-        nat = (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id);
+        nat = (void *)COMPAT_ARG_XLAT_VIRT_BASE;
         XLAT_vcpu_set_singleshot_timer(nat, &cmp);
         rc = do_vcpu_op(cmd, vcpuid, guest_handle_from_ptr(nat, void));
         break;
index 8781a331cf4e3575806b34dc46a9f97052a104b5..fa4322e77d89e850e9dde5ff6c48a8869ee0350c 100644 (file)
@@ -97,7 +97,7 @@ int compat_grant_table_op(unsigned int cmd,
             struct compat_gnttab_copy copy;
         } cmp;
 
-        set_xen_guest_handle(nat.uop, (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id));
+        set_xen_guest_handle(nat.uop, (void *)COMPAT_ARG_XLAT_VIRT_BASE);
         switch ( cmd )
         {
         case GNTTABOP_setup_table:
index 5596c5d73a39845b193d70667372fe72e59c412a..491a9ce56831ac0dd34b9c6c4aa17ecfa0b63da6 100644 (file)
@@ -27,7 +27,7 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE(void) compat)
             struct compat_translate_gpfn_list xlat;
         } cmp;
 
-        set_xen_guest_handle(nat.hnd, (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id));
+        set_xen_guest_handle(nat.hnd, (void *)COMPAT_ARG_XLAT_VIRT_BASE);
         split = 0;
         switch ( op )
         {
index 87058ee3cd61dcd1a3f92544db4ffbc2cecfb9d4..c2d8ad1d6a9dfe4a03ec64723dd6b5812873bafb 100644 (file)
@@ -249,14 +249,6 @@ extern unsigned int video_mode, video_flags;
 
 #endif
 
-#define COMPAT_ARG_XLAT_VIRT_BASE      (1UL << ROOT_PAGETABLE_SHIFT)
-#define COMPAT_ARG_XLAT_SHIFT          0
-#define COMPAT_ARG_XLAT_PAGES          (1U << COMPAT_ARG_XLAT_SHIFT)
-#define COMPAT_ARG_XLAT_SIZE           (COMPAT_ARG_XLAT_PAGES << PAGE_SHIFT)
-#define COMPAT_ARG_XLAT_VIRT_START(vcpu_id) \
-    (COMPAT_ARG_XLAT_VIRT_BASE + ((unsigned long)(vcpu_id) << \
-                                  (PAGE_SHIFT + COMPAT_ARG_XLAT_SHIFT + 1)))
-
 #define PGT_base_page_table     PGT_l4_page_table
 
 #define __HYPERVISOR_CS64 0xe008
index cf225ced72085902581ad0df301612aca646c798..a47b628c1b3324501a43d4a22d44ab3c1c2cb16a 100644 (file)
@@ -208,7 +208,6 @@ struct arch_domain
 
 #ifdef CONFIG_COMPAT
     unsigned int hv_compat_vstart;
-    l3_pgentry_t *mm_arg_xlat_l3;
 #endif
 
     /* I/O-port admin-specified access capabilities. */
index 25ea341e5b1233dae624873d35df863a68c75eb3..f401ac9d8d02c279b7b4ab19b84a7b7eff81e992 100644 (file)
@@ -2,7 +2,7 @@
 #define __ASM_X86_HVM_GUEST_ACCESS_H__
 
 #include <xen/percpu.h>
-DECLARE_PER_CPU(int, guest_handles_in_xen_space);
+DECLARE_PER_CPU(bool_t, hvm_64bit_hcall);
 
 unsigned long copy_to_user_hvm(void *to, const void *from, unsigned len);
 unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len);
index 2145b2394d9de2d7f79cd0eb62995e0042feb308..e6b63e6772b415346414e1a6f8cbe8dc95900d7c 100644 (file)
@@ -342,10 +342,8 @@ int steal_page(
 int map_ldt_shadow_page(unsigned int);
 
 #ifdef CONFIG_COMPAT
-int setup_arg_xlat_area(struct vcpu *, l4_pgentry_t *);
 unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits);
 #else
-# define setup_arg_xlat_area(vcpu, l4tab) 0
 # define domain_clamp_alloc_bitsize(d, b) (b)
 #endif
 
index d76206587f6c68797c8a415c32e174eb5aed54f1..aca6d2b490a406ec36f61b9a17b631aaa3cf3265 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef __X86_PERCPU_H__
 #define __X86_PERCPU_H__
 
-#define PERCPU_SHIFT 12
+#define PERCPU_SHIFT 13
 #define PERCPU_SIZE  (1UL << PERCPU_SHIFT)
 
 /* Separate out the type, so (int[3], foo) works. */
index e4b0cffcabe91e6ee23e56f01bd6b35db18a2191..cd43529cf8dcf46544ecf4dd101e683e44aa880d 100644 (file)
@@ -118,7 +118,7 @@ extern void __put_user_bad(void);
 ({                                                                     \
        long __pu_err = -EFAULT;                                        \
        __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
-       if (__addr_ok(__pu_addr))                                       \
+       if (access_ok(__pu_addr,size))                                  \
                __put_user_size((x),__pu_addr,(size),__pu_err,-EFAULT); \
        __pu_err;                                                       \
 })                                                     
@@ -135,7 +135,7 @@ extern void __put_user_bad(void);
        long __gu_err;                                          \
        __typeof__(*(ptr)) __user *__gu_addr = (ptr);           \
        __get_user_size((x),__gu_addr,(size),__gu_err,-EFAULT); \
-       if (!__addr_ok(__gu_addr)) __gu_err = -EFAULT;          \
+       if (!access_ok(__gu_addr,size)) __gu_err = -EFAULT;     \
        __gu_err;                                               \
 })                                                     
 
index 947593fd086114bbc301537902816b121f949c75..b2a92d2632b73423c6c651d7d519d741f5844897 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef __i386_UACCESS_H
 #define __i386_UACCESS_H
 
-#define __addr_ok(addr) ((unsigned long)(addr) < HYPERVISOR_VIRT_START)
-
 /*
  * Test whether a block of memory is a valid user space address.
  * Returns 0 if the range is valid, nonzero otherwise.
index 5c55b5beb919c0d1d1d1cbf88cfeb821a5bef47b..2450af21e3363a89af8ebc0653ba97b134dd6eb3 100644 (file)
@@ -1,6 +1,15 @@
 #ifndef __X86_64_UACCESS_H
 #define __X86_64_UACCESS_H
 
+#define COMPAT_ARG_XLAT_VIRT_BASE this_cpu(compat_arg_xlat)
+#define COMPAT_ARG_XLAT_SIZE      PAGE_SIZE
+DECLARE_PER_CPU(char, compat_arg_xlat[COMPAT_ARG_XLAT_SIZE]);
+#define is_compat_arg_xlat_range(addr, size) ({                               \
+    unsigned long __off;                                                      \
+    __off = (unsigned long)(addr) - (unsigned long)COMPAT_ARG_XLAT_VIRT_BASE; \
+    (__off | (__off + (unsigned long)(size))) <= PAGE_SIZE;                   \
+})
+
 /*
  * Valid if in +ve half of 48-bit address space, or above Xen-reserved area.
  * This is also valid for range checks (addr, addr+size). As long as the
     (((unsigned long)(addr) < (1UL<<48)) || \
      ((unsigned long)(addr) >= HYPERVISOR_VIRT_END))
 
-#define access_ok(addr, size) (__addr_ok(addr))
-
-#define array_access_ok(addr, count, size) (__addr_ok(addr))
+#define access_ok(addr, size) \
+    (__addr_ok(addr) || is_compat_arg_xlat_range(addr, size))
 
-#ifdef CONFIG_COMPAT
+#define array_access_ok(addr, count, size) \
+    (access_ok(addr, (count)*(size)))
 
 #define __compat_addr_ok(addr) \
     ((unsigned long)(addr) < HYPERVISOR_COMPAT_VIRT_START(current->domain))
@@ -27,8 +36,6 @@
     (likely((count) < (~0U / (size))) && \
      compat_access_ok(addr, (count) * (size)))
 
-#endif
-
 #define __put_user_size(x,ptr,size,retval,errret)                      \
 do {                                                                   \
        retval = 0;                                                     \