/* Make sure we have a correctly sized P->M table. */
if (max_pfn != xen_start_info.nr_pages) {
phys_to_machine_mapping = alloc_bootmem_low_pages(
- max_pfn * sizeof(unsigned long));
+ max_pfn * sizeof(unsigned int));
if (max_pfn > xen_start_info.nr_pages) {
/* set to INVALID_P2M_ENTRY */
memset(phys_to_machine_mapping, ~0,
- max_pfn * sizeof(unsigned long));
+ max_pfn * sizeof(unsigned int));
memcpy(phys_to_machine_mapping,
- (unsigned long *)xen_start_info.mfn_list,
- xen_start_info.nr_pages * sizeof(unsigned long));
+ (unsigned int *)xen_start_info.mfn_list,
+ xen_start_info.nr_pages * sizeof(unsigned int));
} else {
memcpy(phys_to_machine_mapping,
- (unsigned long *)xen_start_info.mfn_list,
- max_pfn * sizeof(unsigned long));
+ (unsigned int *)xen_start_info.mfn_list,
+ max_pfn * sizeof(unsigned int));
+ /* N.B. below relies on sizeof(int) == sizeof(long). */
if (HYPERVISOR_dom_mem_op(
MEMOP_decrease_reservation,
(unsigned long *)xen_start_info.mfn_list + max_pfn,
free_bootmem(
__pa(xen_start_info.mfn_list),
PFN_PHYS(PFN_UP(xen_start_info.nr_pages *
- sizeof(unsigned long))));
+ sizeof(unsigned int))));
}
pfn_to_mfn_frame_list = alloc_bootmem_low_pages(PAGE_SIZE);
- for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
+ for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned int)), j++ )
{
pfn_to_mfn_frame_list[j] =
virt_to_mfn(&phys_to_machine_mapping[i]);
extern unsigned long max_low_pfn;
unsigned long mfn = address >> PAGE_SHIFT;
unsigned long pfn = mfn_to_pfn(mfn);
- return ((pfn < max_low_pfn) && (pfn_to_mfn(pfn) == mfn));
+ return ((pfn < max_low_pfn) && (phys_to_machine_mapping[pfn] == mfn));
}
/*
/* Make sure we have a large enough P->M table. */
if (end_pfn > xen_start_info.nr_pages) {
phys_to_machine_mapping = alloc_bootmem(
- max_pfn * sizeof(unsigned long));
+ max_pfn * sizeof(u32));
memset(phys_to_machine_mapping, ~0,
- max_pfn * sizeof(unsigned long));
+ max_pfn * sizeof(u32));
memcpy(phys_to_machine_mapping,
- (unsigned long *)xen_start_info.mfn_list,
- xen_start_info.nr_pages * sizeof(unsigned long));
+ (u32 *)xen_start_info.mfn_list,
+ xen_start_info.nr_pages * sizeof(u32));
free_bootmem(
__pa(xen_start_info.mfn_list),
PFN_PHYS(PFN_UP(xen_start_info.nr_pages *
- sizeof(unsigned long))));
+ sizeof(u32))));
}
pfn_to_mfn_frame_list = alloc_bootmem(PAGE_SIZE);
- for ( i=0, j=0; i < end_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
+ for ( i=0, j=0; i < end_pfn; i+=(PAGE_SIZE/sizeof(u32)), j++ )
{
pfn_to_mfn_frame_list[j] =
virt_to_mfn(&phys_to_machine_mapping[i]);
extern unsigned long max_low_pfn;
unsigned long mfn = address >> PAGE_SHIFT;
unsigned long pfn = mfn_to_pfn(mfn);
- return ((pfn < max_low_pfn) && (pfn_to_mfn(pfn) == mfn));
+ return ((pfn < max_low_pfn) && (phys_to_machine_mapping[pfn] == mfn));
}
#elif defined(__x86_64__)
/*
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
+#define INVALID_P2M_ENTRY (~0U)
+#define FOREIGN_FRAME(m) ((m) | 0x80000000U)
extern unsigned int *phys_to_machine_mapping;
-#define pfn_to_mfn(_pfn) ((unsigned long)(phys_to_machine_mapping[(_pfn)]))
-#define mfn_to_pfn(_mfn) ((unsigned long)(machine_to_phys_mapping[(_mfn)]))
+#define pfn_to_mfn(pfn) \
+((unsigned long)phys_to_machine_mapping[(unsigned int)(pfn)] & 0x7FFFFFFFUL)
+#define mfn_to_pfn(mfn) \
+((unsigned long)machine_to_phys_mapping[(unsigned int)(mfn)])
/* Definitions for machine and pseudophysical addresses. */
#ifdef CONFIG_X86_PAE
*
* NB2. When deliberately mapping foreign pages into the p2m table, you *must*
* use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
- * require. In all the cases we care about, the high bit gets shifted out
- * (e.g., phys_to_machine()) so behaviour there is correct.
+ * require. In all the cases we care about, the FOREIGN_FRAME bit is
+ * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
*/
-#define INVALID_P2M_ENTRY (~0U)
-#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
#define pte_mfn(_pte) ((_pte).pte_low >> PAGE_SHIFT)
#define pte_pfn(_pte) \
({ \
unsigned long mfn = pte_mfn(_pte); \
unsigned long pfn = mfn_to_pfn(mfn); \
- if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn)) \
+ if ((pfn >= max_mapnr) || (phys_to_machine_mapping[pfn] != mfn))\
pfn = max_mapnr; /* special: force !pfn_valid() */ \
pfn; \
})
return !pte.pte_low && !pte.pte_high;
}
-#define INVALID_P2M_ENTRY (~0U)
-#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
#define pte_mfn(_pte) ( ((_pte).pte_low >> PAGE_SHIFT) |\
(((_pte).pte_high & 0xfff) << (32-PAGE_SHIFT)) )
#define pte_pfn(_pte) \
({ \
unsigned long mfn = pte_mfn(_pte); \
unsigned long pfn = mfn_to_pfn(mfn); \
- if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn)) \
+ if ((pfn >= max_mapnr) || (phys_to_machine_mapping[pfn] != mfn))\
pfn = max_mapnr; /* special: force !pfn_valid() */ \
pfn; \
})
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
+#define INVALID_P2M_ENTRY (~0U)
+#define FOREIGN_FRAME(m) ((m) | 0x80000000U)
extern u32 *phys_to_machine_mapping;
-#define pfn_to_mfn(_pfn) ((unsigned long) phys_to_machine_mapping[(unsigned int)(_pfn)])
-#define mfn_to_pfn(_mfn) ((unsigned long) machine_to_phys_mapping[(unsigned int)(_mfn)])
+#define pfn_to_mfn(pfn) \
+((unsigned long)phys_to_machine_mapping[(unsigned int)(pfn)] & 0x7FFFFFFFUL)
+#define mfn_to_pfn(mfn) \
+((unsigned long)machine_to_phys_mapping[(unsigned int)(mfn)])
/* Definitions for machine and pseudophysical addresses. */
typedef unsigned long paddr_t;
*
* NB2. When deliberately mapping foreign pages into the p2m table, you *must*
* use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
- * require. In all the cases we care about, the high bit gets shifted out
- * (e.g., phys_to_machine()) so behaviour there is correct.
+ * require. In all the cases we care about, the FOREIGN_FRAME bit is
+ * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
*/
-#define INVALID_P2M_ENTRY (~0U)
-#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
#define pte_mfn(_pte) (((_pte).pte & PTE_MASK) >> PAGE_SHIFT)
#define pte_pfn(_pte) \
({ \
unsigned long mfn = pte_mfn(_pte); \
unsigned pfn = mfn_to_pfn(mfn); \
- if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn)) \
+ if ((pfn >= max_mapnr) || (phys_to_machine_mapping[pfn] != mfn))\
pfn = max_mapnr; /* special: force !pfn_valid() */ \
pfn; \
})