#define INITIAL_NR_GRANT_FRAMES 1U
#define GNTTAB_MAX_VERSION 1
-struct grant_table_arch {
- gfn_t *shared_gfn;
- gfn_t *status_gfn;
-};
-
static inline void gnttab_clear_flags(struct domain *d,
unsigned int mask, uint16_t *addr)
{
#define gnttab_dom0_frames() \
min_t(unsigned int, opt_max_grant_frames, PFN_DOWN(_etext - _stext))
-#define gnttab_init_arch(gt) \
-({ \
- unsigned int ngf_ = (gt)->max_grant_frames; \
- unsigned int nsf_ = grant_to_status_frames(ngf_); \
- \
- (gt)->arch.shared_gfn = xmalloc_array(gfn_t, ngf_); \
- (gt)->arch.status_gfn = xmalloc_array(gfn_t, nsf_); \
- if ( (gt)->arch.shared_gfn && (gt)->arch.status_gfn ) \
- { \
- while ( ngf_-- ) \
- (gt)->arch.shared_gfn[ngf_] = INVALID_GFN; \
- while ( nsf_-- ) \
- (gt)->arch.status_gfn[nsf_] = INVALID_GFN; \
- } \
- else \
- gnttab_destroy_arch(gt); \
- (gt)->arch.shared_gfn ? 0 : -ENOMEM; \
-})
-
-#define gnttab_destroy_arch(gt) \
- do { \
- XFREE((gt)->arch.shared_gfn); \
- XFREE((gt)->arch.status_gfn); \
- } while ( 0 )
-
#define gnttab_set_frame_gfn(gt, st, idx, gfn, mfn) \
- ({ \
- int rc_ = 0; \
- gfn_t ogfn = gnttab_get_frame_gfn(gt, st, idx); \
- if ( gfn_eq(ogfn, INVALID_GFN) || gfn_eq(ogfn, gfn) || \
- (rc_ = guest_physmap_remove_page((gt)->domain, ogfn, mfn, \
- 0)) == 0 ) \
- ((st) ? (gt)->arch.status_gfn \
- : (gt)->arch.shared_gfn)[idx] = (gfn); \
- rc_; \
- })
+ (gfn_eq(gfn, INVALID_GFN) \
+ ? guest_physmap_remove_page((gt)->domain, \
+ gnttab_get_frame_gfn(gt, st, idx), \
+ mfn, 0) \
+ : 0)
#define gnttab_get_frame_gfn(gt, st, idx) ({ \
(st) ? gnttab_status_gfn(NULL, gt, idx) \
: gnttab_shared_gfn(NULL, gt, idx); \
})
+#define gnttab_shared_page(t, i) virt_to_page((t)->shared_raw[i])
+
+#define gnttab_status_page(t, i) virt_to_page((t)->status[i])
+
#define gnttab_shared_gfn(d, t, i) \
- (((i) >= nr_grant_frames(t)) ? INVALID_GFN : (t)->arch.shared_gfn[i])
+ page_get_xenheap_gfn(gnttab_shared_page(t, i))
#define gnttab_status_gfn(d, t, i) \
- (((i) >= nr_status_frames(t)) ? INVALID_GFN : (t)->arch.status_gfn[i])
+ page_get_xenheap_gfn(gnttab_status_page(t, i))
#define gnttab_need_iommu_mapping(d) \
(is_domain_direct_mapped(d) && is_iommu_enabled(d))
#define PGT_writable_page PG_mask(1, 1) /* has writable mappings? */
#define PGT_type_mask PG_mask(1, 1) /* Bits 31 or 63. */
- /* Count of uses of this frame as its current type. */
-#define PGT_count_width PG_shift(2)
-#define PGT_count_mask ((1UL<<PGT_count_width)-1)
+ /* 2-bit count of uses of this frame as its current type. */
+#define PGT_count_mask PG_mask(3, 3)
+
+/*
+ * Stored in bits [28:0] (arm32) or [60:0] (arm64) GFN if page is xenheap page.
+ */
+#define PGT_gfn_width PG_shift(3)
+#define PGT_gfn_mask ((1UL<<PGT_gfn_width)-1)
+
+#define PGT_INVALID_XENHEAP_GFN _gfn(PGT_gfn_mask)
+
+/*
+ * An arch-specific initialization pattern is needed for the type_info field
+ * as it's GFN portion can contain the valid GFN if page is xenheap page.
+ */
+#define PGT_TYPE_INFO_INITIALIZER gfn_x(PGT_INVALID_XENHEAP_GFN)
/* Cleared when the owning guest 'frees' this page. */
#define _PGC_allocated PG_shift(1)
unsigned int arch_get_dma_bitsize(void);
+/*
+ * All accesses to the GFN portion of type_info field should always be
+ * protected by the P2M lock. In case when it is not feasible to satisfy
+ * that requirement (risk of deadlock, lock inversion, etc) it is important
+ * to make sure that all non-protected updates to this field are atomic.
+ */
+static inline gfn_t page_get_xenheap_gfn(const struct page_info *p)
+{
+ gfn_t gfn_ = _gfn(ACCESS_ONCE(p->u.inuse.type_info) & PGT_gfn_mask);
+
+ ASSERT(is_xen_heap_page(p));
+
+ return gfn_eq(gfn_, PGT_INVALID_XENHEAP_GFN) ? INVALID_GFN : gfn_;
+}
+
+static inline void page_set_xenheap_gfn(struct page_info *p, gfn_t gfn)
+{
+ gfn_t gfn_ = gfn_eq(gfn, INVALID_GFN) ? PGT_INVALID_XENHEAP_GFN : gfn;
+ unsigned long x, nx, y = p->u.inuse.type_info;
+
+ ASSERT(is_xen_heap_page(p));
+
+ do {
+ x = y;
+ nx = (x & ~PGT_gfn_mask) | gfn_x(gfn_);
+ } while ( (y = cmpxchg(&p->u.inuse.type_info, x, nx)) != x );
+}
+
#endif /* __ARCH_ARM_MM__ */
/*
* Local variables:
spin_lock(&d->page_alloc_lock);
- /* The incremented type count pins as writable or read-only. */
- page->u.inuse.type_info =
- (flags == SHARE_ro ? PGT_none : PGT_writable_page) | 1;
+ /*
+ * The incremented type count pins as writable or read-only.
+ *
+ * Please note, the update of type_info field here is not atomic as
+ * we use Read-Modify-Write operation on it. But currently it is fine
+ * because the caller of page_set_xenheap_gfn() (which is another place
+ * where type_info is updated) would need to acquire a reference on
+ * the page. This is only possible after the count_info is updated *and*
+ * there is a barrier between the type_info and count_info. So there is
+ * no immediate need to use cmpxchg() here.
+ */
+ page->u.inuse.type_info &= ~(PGT_type_mask | PGT_count_mask);
+ page->u.inuse.type_info |= (flags == SHARE_ro ? PGT_none
+ : PGT_writable_page) |
+ MASK_INSR(1, PGT_count_mask);
page_set_owner(page, d);
smp_wmb(); /* install valid domain ptr before updating refcnt. */
return -ENOSYS;
}
- /* Map at new location. */
- rc = guest_physmap_add_entry(d, gfn, mfn, 0, t);
+ /*
+ * Map at new location. Here we need to map xenheap RAM page differently
+ * because we need to store the valid GFN and make sure that nothing was
+ * mapped before (the stored GFN is invalid). And these actions need to be
+ * performed with the P2M lock held. The guest_physmap_add_entry() is just
+ * a wrapper on top of p2m_set_entry().
+ */
+ if ( !p2m_is_ram(t) || !is_xen_heap_mfn(mfn) )
+ rc = guest_physmap_add_entry(d, gfn, mfn, 0, t);
+ else
+ {
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+
+ p2m_write_lock(p2m);
+ if ( gfn_eq(page_get_xenheap_gfn(mfn_to_page(mfn)), INVALID_GFN) )
+ {
+ rc = p2m_set_entry(p2m, gfn, 1, mfn, t, p2m->default_access);
+ if ( !rc )
+ page_set_xenheap_gfn(mfn_to_page(mfn), gfn);
+ }
+ else
+ /*
+ * Mandate the caller to first unmap the page before mapping it
+ * again. This is to prevent Xen creating an unwanted hole in
+ * the P2M. For instance, this could happen if the firmware stole
+ * a RAM address for mapping the shared_info page into but forgot
+ * to unmap it afterwards.
+ */
+ rc = -EBUSY;
+ p2m_write_unlock(p2m);
+ }
/*
* For XENMAPSPACE_gmfn_foreign if we failed to add the mapping, we need
*/
static void p2m_put_l3_page(const lpae_t pte)
{
+ mfn_t mfn = lpae_get_mfn(pte);
+
ASSERT(p2m_is_valid(pte));
/*
*/
if ( p2m_is_foreign(pte.p2m.type) )
{
- mfn_t mfn = lpae_get_mfn(pte);
-
ASSERT(mfn_valid(mfn));
put_page(mfn_to_page(mfn));
}
+ /* Detect the xenheap page and mark the stored GFN as invalid. */
+ else if ( p2m_is_ram(pte.p2m.type) && is_xen_heap_mfn(mfn) )
+ page_set_xenheap_gfn(mfn_to_page(mfn), INVALID_GFN);
}
/* Free lpae sub-tree behind an entry */
#define INITIAL_NR_GRANT_FRAMES 1U
-struct grant_table_arch {
-};
-
static inline int create_grant_host_mapping(uint64_t addr, mfn_t frame,
unsigned int flags,
unsigned int cache_flags)
return replace_grant_pv_mapping(addr, frame, new_addr, flags);
}
-#define gnttab_init_arch(gt) 0
-#define gnttab_destroy_arch(gt) do {} while ( 0 )
#define gnttab_set_frame_gfn(gt, st, idx, gfn, mfn) \
(gfn_eq(gfn, INVALID_GFN) \
? guest_physmap_remove_page((gt)->domain, \
/* Domain to which this struct grant_table belongs. */
struct domain *domain;
-
- struct grant_table_arch arch;
};
unsigned int __read_mostly opt_max_grant_frames = 64;
grant_write_lock(gt);
- ret = gnttab_init_arch(gt);
- if ( ret )
- goto unlock;
-
/* gnttab_grow_table() allocates a min number of frames, so 0 is okay. */
ret = gnttab_grow_table(d, 0);
- unlock:
grant_write_unlock(gt);
out:
if ( t == NULL )
return;
- gnttab_destroy_arch(t);
-
for ( i = 0; i < nr_grant_frames(t); i++ )
free_xenheap_page(t->shared_raw[i]);
xfree(t->shared_raw);
#define PGC_reserved 0
#endif
+#ifndef PGT_TYPE_INFO_INITIALIZER
+#define PGT_TYPE_INFO_INITIALIZER 0
+#endif
+
/*
* Comma-separated list of hexadecimal page numbers containing bad bytes.
* e.g. 'badpage=0x3f45,0x8a321'.
&tlbflush_timestamp);
/* Initialise fields which have other uses for free pages. */
- pg[i].u.inuse.type_info = 0;
+ pg[i].u.inuse.type_info = PGT_TYPE_INFO_INITIALIZER;
page_set_owner(&pg[i], NULL);
}
*/
pg[i].count_info = PGC_reserved | PGC_state_inuse;
/* Initialise fields which have other uses for free pages. */
- pg[i].u.inuse.type_info = 0;
+ pg[i].u.inuse.type_info = PGT_TYPE_INFO_INITIALIZER;
page_set_owner(&pg[i], NULL);
}