The basic idea is like Persistent Kernel Map (PKMAP) in Linux. We
pre-populate all the relevant page tables before the system is fully
set up.
We will need it on Arm in order to rework the arm64 version of
xenheap_setup_mappings() as we may need to use pages allocated from
the boot allocator before they are effectively mapped.
This infrastructure is not lock-protected therefore can only be used
before smpboot. After smpboot, map_domain_page() has to be used.
This is based on the x86 version [1] that was originally implemented
by Wei Liu.
The PMAP infrastructure is implemented in common code with some
arch helpers to set/clear the page-table entries and convertion
between a fixmap slot to a virtual address...
As mfn_to_xen_entry() now needs to be exported, take the opportunity
to swich the parameter attr from unsigned to unsigned int.
[1] <
e92da4ad6015b6089737fcccba3ec1d6424649a5.
1588278317.git.hongyxia@amazon.com>
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
Signed-off-by: Hongyan Xia <hongyxia@amazon.com>
[julien: Adapted for Arm]
Signed-off-by: Julien Grall <jgrall@amazon.com>
Reviewed-by: Luca Fancellu <luca.fancellu@arm.com>
Tested-by: Luca Fancellu <luca.fancellu@arm.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
select HAS_DEVICE_TREE
select HAS_PASSTHROUGH
select HAS_PDX
+ select HAS_PMAP
select IOMMU_FORCE_PT_SHARE
config ARCH_DEFCONFIG
#define __ASM_FIXMAP_H
#include <xen/acpi.h>
+#include <xen/pmap.h>
/* Fixmap slots */
#define FIXMAP_CONSOLE 0 /* The primary UART */
#define FIXMAP_MISC 1 /* Ephemeral mappings of hardware */
#define FIXMAP_ACPI_BEGIN 2 /* Start mappings of ACPI tables */
#define FIXMAP_ACPI_END (FIXMAP_ACPI_BEGIN + NUM_FIXMAP_ACPI_PAGES - 1) /* End mappings of ACPI tables */
+#define FIXMAP_PMAP_BEGIN (FIXMAP_ACPI_END + 1) /* Start of PMAP */
+#define FIXMAP_PMAP_END (FIXMAP_PMAP_BEGIN + NUM_FIX_PMAP - 1) /* End of PMAP */
+
+#define FIXMAP_LAST FIXMAP_PMAP_END
+
+#define FIXADDR_START FIXMAP_ADDR(0)
+#define FIXADDR_TOP FIXMAP_ADDR(FIXMAP_LAST)
#ifndef __ASSEMBLY__
+/*
+ * Direct access to xen_fixmap[] should only happen when {set,
+ * clear}_fixmap() is unusable (e.g. where we would end up to
+ * recursively call the helpers).
+ */
+extern lpae_t xen_fixmap[XEN_PT_LPAE_ENTRIES];
+
/* Map a page in a fixmap entry */
extern void set_fixmap(unsigned map, mfn_t mfn, unsigned attributes);
/* Remove a mapping from a fixmap entry */
extern void clear_fixmap(unsigned map);
+#define fix_to_virt(slot) ((void *)FIXMAP_ADDR(slot))
+
+static inline unsigned int virt_to_fix(vaddr_t vaddr)
+{
+ BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
+
+ return ((vaddr - FIXADDR_START) >> PAGE_SHIFT);
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_FIXMAP_H */
#ifndef __ASSEMBLY__
#include <xen/page-defs.h>
+#include <xen/mm-frame.h>
/*
* WARNING! Unlike the x86 pagetable code, where l1 is the lowest level and
third_table_offset(addr) \
}
+/*
+ * Standard entry type that we'll use to build Xen's own pagetables.
+ * We put the same permissions at every level, because they're ignored
+ * by the walker in non-leaf entries.
+ */
+lpae_t mfn_to_xen_entry(mfn_t mfn, unsigned int attr);
+
#endif /* __ASSEMBLY__ */
/*
--- /dev/null
+#ifndef __ASM_PMAP_H__
+#define __ASM_PMAP_H__
+
+#include <xen/mm.h>
+
+#include <asm/fixmap.h>
+
+static inline void arch_pmap_map(unsigned int slot, mfn_t mfn)
+{
+ lpae_t *entry = &xen_fixmap[slot];
+ lpae_t pte;
+
+ ASSERT(!lpae_is_valid(*entry));
+
+ pte = mfn_to_xen_entry(mfn, PAGE_HYPERVISOR_RW);
+ pte.pt.table = 1;
+ write_pte(entry, pte);
+}
+
+static inline void arch_pmap_unmap(unsigned int slot)
+{
+ lpae_t pte = {};
+
+ write_pte(&xen_fixmap[slot], pte);
+
+ flush_xen_tlb_range_va_local(FIXMAP_ADDR(slot), PAGE_SIZE);
+}
+
+#endif /* __ASM_PMAP_H__ */
dump_pt_walk(ttbr, addr, HYP_PT_ROOT_LEVEL, 1);
}
-/*
- * Standard entry type that we'll use to build Xen's own pagetables.
- * We put the same permissions at every level, because they're ignored
- * by the walker in non-leaf entries.
- */
-static inline lpae_t mfn_to_xen_entry(mfn_t mfn, unsigned attr)
+lpae_t mfn_to_xen_entry(mfn_t mfn, unsigned int attr)
{
lpae_t e = (lpae_t) {
.pt = {
config HAS_PDX
bool
+config HAS_PMAP
+ bool
+
config HAS_SCHED_GRANULARITY
bool
obj-y += page_alloc.o
obj-$(CONFIG_HAS_PDX) += pdx.o
obj-$(CONFIG_PERF_COUNTERS) += perfc.o
+obj-bin-$(CONFIG_HAS_PMAP) += pmap.init.o
obj-y += preempt.o
obj-y += random.o
obj-y += rangeset.o
--- /dev/null
+#include <xen/bitops.h>
+#include <xen/init.h>
+#include <xen/irq.h>
+#include <xen/pmap.h>
+
+#include <asm/pmap.h>
+#include <asm/fixmap.h>
+
+/*
+ * Simple mapping infrastructure to map / unmap pages in fixed map.
+ * This is used to set the page table before the map domain page infrastructure
+ * is initialized.
+ *
+ * This structure is not protected by any locks, so it must not be used after
+ * smp bring-up.
+ */
+
+/* Bitmap to track which slot is used */
+static __initdata DECLARE_BITMAP(inuse, NUM_FIX_PMAP);
+
+void *__init pmap_map(mfn_t mfn)
+{
+ unsigned int idx;
+ unsigned int slot;
+
+ ASSERT(system_state < SYS_STATE_smp_boot);
+ ASSERT(!in_irq());
+
+ idx = find_first_zero_bit(inuse, NUM_FIX_PMAP);
+ if ( idx == NUM_FIX_PMAP )
+ panic("Out of PMAP slots\n");
+
+ __set_bit(idx, inuse);
+
+ slot = idx + FIXMAP_PMAP_BEGIN;
+ ASSERT(slot >= FIXMAP_PMAP_BEGIN && slot <= FIXMAP_PMAP_END);
+
+ /*
+ * We cannot use set_fixmap() here. We use PMAP when the domain map
+ * page infrastructure is not yet initialized, so map_pages_to_xen() called
+ * by set_fixmap() needs to map pages on demand, which then calls pmap()
+ * again, resulting in a loop. Modify the PTEs directly instead. The same
+ * is true for pmap_unmap().
+ */
+ arch_pmap_map(slot, mfn);
+
+ return fix_to_virt(slot);
+}
+
+void __init pmap_unmap(const void *p)
+{
+ unsigned int idx;
+ unsigned int slot = virt_to_fix((unsigned long)p);
+
+ ASSERT(system_state < SYS_STATE_smp_boot);
+ ASSERT(slot >= FIXMAP_PMAP_BEGIN && slot <= FIXMAP_PMAP_END);
+ ASSERT(!in_irq());
+
+ idx = slot - FIXMAP_PMAP_BEGIN;
+
+ __clear_bit(idx, inuse);
+ arch_pmap_unmap(slot);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+#ifndef __XEN_PMAP_H__
+#define __XEN_PMAP_H__
+
+/* Large enough for mapping 5 levels of page tables with some headroom */
+#define NUM_FIX_PMAP 8
+
+#ifndef __ASSEMBLY__
+
+#include <xen/mm-frame.h>
+
+void *pmap_map(mfn_t mfn);
+void pmap_unmap(const void *p);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __XEN_PMAP_H__ */