#endif
}
-static int kexec_segments_add_segment(
- unsigned int *nr_segments, xen_kexec_segment_t *segments,
- unsigned long mfn)
+static int kexec_segments_add_segment(unsigned int *nr_segments,
+ xen_kexec_segment_t *segments,
+ mfn_t mfn)
{
- paddr_t maddr = (paddr_t)mfn << PAGE_SHIFT;
+ paddr_t maddr = mfn_to_maddr(mfn);
unsigned int n = *nr_segments;
/* Need a new segment? */
return 0;
}
-static int kexec_segments_from_ind_page(unsigned long mfn,
+static int kexec_segments_from_ind_page(mfn_t mfn,
unsigned int *nr_segments,
xen_kexec_segment_t *segments,
bool_t compat)
kimage_entry_t *entry;
int ret = 0;
- page = map_domain_page(_mfn(mfn));
+ page = map_domain_page(mfn);
/*
* Walk the indirection page list, adding destination pages to the
break;
case IND_INDIRECTION:
unmap_domain_page(page);
- entry = page = map_domain_page(_mfn(mfn));
+ entry = page = map_domain_page(mfn);
continue;
case IND_DONE:
goto done;
xen_kexec_segment_t *segments;
uint16_t arch;
unsigned int nr_segments = 0;
- unsigned long ind_mfn = load->image.indirection_page >> PAGE_SHIFT;
+ mfn_t ind_mfn = maddr_to_mfn(load->image.indirection_page);
int ret;
arch = kexec_load_v1_arch();
#include <asm/page.h>
+/* Override macros from asm/page.h to make them work with mfn_t */
+#undef mfn_to_page
+#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
+#undef page_to_mfn
+#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
+
/*
* When kexec transitions to the new kernel there is a one-to-one
* mapping between physical and virtual addresses. On processors
if ( !page )
return NULL;
- clear_domain_page(_mfn(page_to_mfn(page)));
+ clear_domain_page(page_to_mfn(page));
return page;
}
if ( page )
{
image->next_crash_page = hole_end;
- clear_domain_page(_mfn(page_to_mfn(page)));
+ clear_domain_page(page_to_mfn(page));
}
return page;
*old = (addr & ~PAGE_MASK) | IND_SOURCE;
unmap_domain_page(old);
- page = mfn_to_page(mfn_x(old_mfn));
+ page = mfn_to_page(old_mfn);
break;
}
else
return entry + 1;
}
-unsigned long kimage_entry_mfn(kimage_entry_t *entry, bool_t compat)
+mfn_t kimage_entry_mfn(kimage_entry_t *entry, bool_t compat)
{
if ( compat )
- return *(uint32_t *)entry >> PAGE_SHIFT;
- return *entry >> PAGE_SHIFT;
+ return maddr_to_mfn(*(uint32_t *)entry);
+ return maddr_to_mfn(*entry);
}
unsigned long kimage_entry_ind(kimage_entry_t *entry, bool_t compat)
return *entry & 0xf;
}
-int kimage_build_ind(struct kexec_image *image, unsigned long ind_mfn,
+int kimage_build_ind(struct kexec_image *image, mfn_t ind_mfn,
bool_t compat)
{
void *page;
int ret = 0;
paddr_t dest = KIMAGE_NO_DEST;
- page = map_domain_page(_mfn(ind_mfn));
+ page = map_domain_page(ind_mfn);
if ( !page )
return -ENOMEM;
for ( entry = page; ; )
{
unsigned long ind;
- unsigned long mfn;
+ mfn_t mfn;
ind = kimage_entry_ind(entry, compat);
mfn = kimage_entry_mfn(entry, compat);
switch ( ind )
{
case IND_DESTINATION:
- dest = (paddr_t)mfn << PAGE_SHIFT;
+ dest = mfn_to_maddr(mfn);
ret = kimage_set_destination(image, dest);
if ( ret < 0 )
goto done;
break;
case IND_INDIRECTION:
unmap_domain_page(page);
- page = map_domain_page(_mfn(mfn));
+ page = map_domain_page(mfn);
entry = page;
continue;
case IND_DONE:
goto done;
}
- copy_domain_page(_mfn(page_to_mfn(xen_page)), _mfn(mfn));
+ copy_domain_page(page_to_mfn(xen_page), mfn);
put_page(guest_page);
ret = kimage_add_page(image, page_to_maddr(xen_page));
unsigned memflags);
kimage_entry_t *kimage_entry_next(kimage_entry_t *entry, bool_t compat);
-unsigned long kimage_entry_mfn(kimage_entry_t *entry, bool_t compat);
+mfn_t kimage_entry_mfn(kimage_entry_t *entry, bool_t compat);
unsigned long kimage_entry_ind(kimage_entry_t *entry, bool_t compat);
-int kimage_build_ind(struct kexec_image *image, unsigned long ind_mfn,
+int kimage_build_ind(struct kexec_image *image, mfn_t ind_mfn,
bool_t compat);
#endif /* __ASSEMBLY__ */