* Only a limited amount of RAM, called xenheap, is always mapped on ARM32.
* For convenience always return false.
*/
-static inline bool arch_mfn_in_directmap(unsigned long mfn)
+static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr)
{
return false;
}
* On ARM64, all the RAM is currently direct mapped in Xen.
* Hence return always true.
*/
-static inline bool arch_mfn_in_directmap(unsigned long mfn)
+static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr)
{
return true;
}
/*
* x86 maps part of physical memory via the directmap region.
- * Return whether the input MFN falls in that range.
+ * Return whether the range of MFN falls in the directmap region.
*/
-static inline bool arch_mfn_in_directmap(unsigned long mfn)
+static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr)
{
unsigned long eva = min(DIRECTMAP_VIRT_END, HYPERVISOR_VIRT_END);
- return mfn <= (virt_to_mfn(eva - 1) + 1);
+ return (mfn + nr) <= (virt_to_mfn(eva - 1) + 1);
}
#endif /* __ASM_X86_MM_H__ */
needed = 0;
}
else if ( *use_tail && nr >= needed &&
- arch_mfn_in_directmap(mfn + nr) &&
+ arch_mfns_in_directmap(mfn + nr - needed, needed) &&
(!xenheap_bits ||
!((mfn + nr - 1) >> (xenheap_bits - PAGE_SHIFT))) )
{
PAGE_SIZE - sizeof(**avail) * NR_ZONES;
}
else if ( nr >= needed &&
- arch_mfn_in_directmap(mfn + needed) &&
+ arch_mfns_in_directmap(mfn, needed) &&
(!xenheap_bits ||
!((mfn + needed - 1) >> (xenheap_bits - PAGE_SHIFT))) )
{