__func__, dom->kernel_size, dom->kernel_blob, dst);
memcpy(dst, dom->kernel_blob, dom->kernel_size);
+ xc_cache_flush(dom->xch, dst, dom->kernel_size);
return 0;
}
memcpy(dest, image + skip, text_size);
memset(dest + text_size, 0, bss_size);
+ xc_cache_flush(dom->xch, dest, text_size+bss_size);
return 0;
}
}
else
memcpy(ramdiskmap, dom->ramdisk_blob, dom->ramdisk_size);
+ xc_cache_flush(dom->xch, ramdiskmap, ramdisklen);
}
/* load devicetree */
goto err;
}
memcpy(devicetreemap, dom->devicetree_blob, dom->devicetree_size);
+ xc_cache_flush(dom->xch, devicetreemap, dom->devicetree_size);
}
/* allocate other pages */
#include <sys/mman.h>
#include <sys/ioctl.h>
+#include <sys/syscall.h>
#include <xen/memory.h>
#include <xen/sys/evtchn.h>
return ret;
}
+static void linux_privcmd_cache_flush(xc_interface *xch,
+ const void *ptr, size_t nr)
+{
+#if defined(__arm__)
+ unsigned long start = (unsigned long)ptr;
+ unsigned long end = start + nr;
+ /* cacheflush(unsigned long start, unsigned long end, int flags) */
+ int rc = syscall(__ARM_NR_cacheflush, start, end, 0);
+ if ( rc < 0 )
+ PERROR("cache flush operation failed: %d\n", errno);
+#elif defined(__aarch64__)
+ unsigned long start = (unsigned long)ptr;
+ unsigned long end = start + nr;
+ unsigned long p, ctr;
+ int stride;
+
+ /* Flush cache using direct DC CVAC instructions. This is
+ * available to EL0 when SCTLR_EL1.UCI is set, which Linux does.
+ *
+ * Bits 19:16 of CTR_EL0 are log2 of the minimum dcache line size
+ * in words, which we use as our stride length. This is readable
+ * with SCTLR_EL1.UCT is set, which Linux does.
+ */
+ asm volatile ("mrs %0, ctr_el0" : "=r" (ctr));
+
+ stride = 4 * (1 << ((ctr & 0xf0000UL) >> 16));
+
+ for ( p = start ; p < end ; p += stride )
+ asm volatile ("dc cvac, %0" : : "r" (p));
+ asm volatile ("dsb sy");
+#elif defined(__i386__) || defined(__x86_64__)
+ /* No need for cache maintenance on x86 */
+#else
+ PERROR("No cache flush operation defined for architecture");
+ abort();
+#endif
+}
+
static struct xc_osdep_ops linux_privcmd_ops = {
.open = &linux_privcmd_open,
.close = &linux_privcmd_close,
.map_foreign_bulk = &linux_privcmd_map_foreign_bulk,
.map_foreign_range = &linux_privcmd_map_foreign_range,
.map_foreign_ranges = &linux_privcmd_map_foreign_ranges,
+
+ .cache_flush = &linux_privcmd_cache_flush,
},
};
return ret;
}
+static void minios_privcmd_cache_flush(xc_interface *xch,
+ const void *ptr, size_t nr)
+{
+#if defined(__i386__) || defined(__x86_64__)
+ /* No need for cache maintenance on x86 */
+#else
+ printf("No cache flush operation defined for architecture");
+ BUG();
+#endif
+}
static struct xc_osdep_ops minios_privcmd_ops = {
.open = &minios_privcmd_open,
.map_foreign_bulk = &minios_privcmd_map_foreign_bulk,
.map_foreign_range = &minios_privcmd_map_foreign_range,
.map_foreign_ranges = &minios_privcmd_map_foreign_ranges,
+
+ .cache_flush = &minios_privcmd_cache_flush,
},
};
#include <xen/sys/evtchn.h>
#include <unistd.h>
+#include <stdlib.h>
#include <fcntl.h>
#include <malloc.h>
#include <sys/mman.h>
return NULL;
}
+static void netbsd_privcmd_cache_flush(xc_interface *xch,
+ const void *ptr, size_t nr)
+{
+#if defined(__i386__) || defined(__x86_64__)
+ /* No need for cache maintenance on x86 */
+#else
+ PERROR("No cache flush operation defined for architecture");
+ abort();
+#endif
+}
+
static struct xc_osdep_ops netbsd_privcmd_ops = {
.open = &netbsd_privcmd_open,
.close = &netbsd_privcmd_close,
.map_foreign_bulk = &xc_map_foreign_bulk_compat,
.map_foreign_range = &netbsd_privcmd_map_foreign_range,
.map_foreign_ranges = &netbsd_privcmd_map_foreign_ranges,
+
+ .cache_flush = &netbsd_privcmd_cache_flush,
},
};
return xch->ops->u.privcmd.hypercall(xch, xch->ops_handle, hypercall);
}
+void xc_cache_flush(xc_interface *xch, const void *p, size_t n)
+{
+ xch->ops->u.privcmd.cache_flush(xch, p, n);
+}
+
xc_evtchn *xc_evtchn_open(xentoollog_logger *logger,
unsigned open_flags)
{
/* Optionally flush file to disk and discard page cache */
void discard_file_cache(xc_interface *xch, int fd, int flush);
+/* Flush data cache */
+void xc_cache_flush(xc_interface *xch, const void *p, size_t n);
+
#define MAX_MMU_UPDATES 1024
struct xc_mmu {
mmu_update_t updates[MAX_MMU_UPDATES];
#include <xen/memory.h>
#include <xen/sys/evtchn.h>
#include <unistd.h>
+#include <stdlib.h>
#include <fcntl.h>
#include <malloc.h>
return NULL;
}
+static void solaris_privcmd_cache_flush(xc_interface *xch,
+ const void *ptr, size_t nr)
+{
+#if defined(__i386__) || defined(__x86_64__)
+ /* No need for cache maintenance on x86 */
+#else
+ PERROR("No cache flush operation defined for architecture");
+ abort();
+#endif
+}
+
static struct xc_osdep_ops solaris_privcmd_ops = {
.open = &solaris_privcmd_open,
.close = &solaris_privcmd_close,
.map_foreign_bulk = &xc_map_foreign_bulk_compat,
.map_foreign_range = &solaris_privcmd_map_foreign_range,
.map_foreign_ranges = &solaris_privcmd_map_foreign_ranges,
+
+ .cache_flush = &solaris_privcmd_cache_flush,
},
};
return MAP_FAILED;
}
+static void ENOSYS_privcmd_cache_flush(xc_interface *xch, const void *p, size_t n)
+{
+ unsigned long start = (unsigned long)p;
+ unsigned long end = start + n;
+ IPRINTF(xch, "ENOSYS_privcmd: cache_flush: %#lx-%#lx\n", start, end);
+}
+
static struct xc_osdep_ops ENOSYS_privcmd_ops =
{
.open = &ENOSYS_privcmd_open,
.map_foreign_bulk = &ENOSYS_privcmd_map_foreign_bulk,
.map_foreign_range = &ENOSYS_privcmd_map_foreign_range,
.map_foreign_ranges = &ENOSYS_privcmd_map_foreign_ranges,
+
+ .cache_flush = &ENOSYS_privcmd_cache_flush,
}
};
void *(*map_foreign_ranges)(xc_interface *xch, xc_osdep_handle h, uint32_t dom, size_t size, int prot,
size_t chunksize, privcmd_mmap_entry_t entries[],
int nentries);
+ void (*cache_flush)(xc_interface *xch, const void *p, size_t n);
} privcmd;
struct {
int (*fd)(xc_evtchn *xce, xc_osdep_handle h);