#include <asm/page.h>
#include <asm/string.h>
#include <asm/p2m.h>
+#include <asm/altp2m.h>
#include <asm/atomic.h>
#include <asm/event.h>
#include <xsm/xsm.h>
int expected_refcnt,
shr_handle_t *phandle)
{
+ struct p2m_domain *hp2m = p2m_get_hostp2m(d);
p2m_type_t p2mt;
+ p2m_access_t p2ma;
mfn_t mfn;
struct page_info *page = NULL; /* gcc... */
int ret;
*phandle = 0UL;
- mfn = get_gfn(d, gfn, &p2mt);
+ mfn = get_gfn_type_access(hp2m, gfn, &p2mt, &p2ma, 0, NULL);
/* Check if mfn is valid */
ret = -EINVAL;
if ( !p2m_is_sharable(p2mt) )
goto out;
+ /* Check if there are mem_access/remapped altp2m entries for this page */
+ if ( altp2m_active(d) )
+ {
+ unsigned int i;
+ struct p2m_domain *ap2m;
+ mfn_t amfn;
+ p2m_access_t ap2ma;
+
+ altp2m_list_lock(d);
+
+ for ( i = 0; i < MAX_ALTP2M; i++ )
+ {
+ ap2m = d->arch.altp2m_p2m[i];
+ if ( !ap2m )
+ continue;
+
+ amfn = get_gfn_type_access(ap2m, gfn, NULL, &ap2ma, 0, NULL);
+ if ( mfn_valid(amfn) && (mfn_x(amfn) != mfn_x(mfn) || ap2ma != p2ma) )
+ {
+ altp2m_list_unlock(d);
+ goto out;
+ }
+ }
+
+ altp2m_list_unlock(d);
+ }
+
/* Try to convert the mfn to the sharable type */
page = mfn_to_page(mfn);
ret = page_make_sharable(d, page, expected_refcnt);
declare_mm_rwlock(p2m);
+/* Sharing per page lock
+ *
+ * This is an external lock, not represented by an mm_lock_t. The memory
+ * sharing lock uses it to protect addition and removal of (gfn,domain)
+ * tuples to a shared page. We enforce order here against the p2m lock,
+ * which is taken after the page_lock to change the gfn's p2m entry.
+ *
+ * The lock is recursive because during share we lock two pages. */
+
+declare_mm_order_constraint(per_page_sharing)
+#define page_sharing_mm_pre_lock() mm_enforce_order_lock_pre_per_page_sharing()
+#define page_sharing_mm_post_lock(l, r) \
+ mm_enforce_order_lock_post_per_page_sharing((l), (r))
+#define page_sharing_mm_unlock(l, r) mm_enforce_order_unlock((l), (r))
+
/* Alternate P2M list lock (per-domain)
*
* A per-domain lock that protects the list of alternate p2m's.
#define p2m_locked_by_me(p) mm_write_locked_by_me(&(p)->lock)
#define gfn_locked_by_me(p,g) p2m_locked_by_me(p)
-/* Sharing per page lock
- *
- * This is an external lock, not represented by an mm_lock_t. The memory
- * sharing lock uses it to protect addition and removal of (gfn,domain)
- * tuples to a shared page. We enforce order here against the p2m lock,
- * which is taken after the page_lock to change the gfn's p2m entry.
- *
- * The lock is recursive because during share we lock two pages. */
-
-declare_mm_order_constraint(per_page_sharing)
-#define page_sharing_mm_pre_lock() mm_enforce_order_lock_pre_per_page_sharing()
-#define page_sharing_mm_post_lock(l, r) \
- mm_enforce_order_lock_post_per_page_sharing((l), (r))
-#define page_sharing_mm_unlock(l, r) mm_enforce_order_unlock((l), (r))
-
/* PoD lock (per-p2m-table)
*
* Protects private PoD data structs: entry and cache
/* Check host p2m if no valid entry in alternate */
if ( !mfn_valid(mfn) )
{
- mfn = hp2m->get_entry(hp2m, gfn_l, &t, &old_a,
- P2M_ALLOC | P2M_UNSHARE, &page_order, NULL);
+
+ mfn = get_gfn_type_access(hp2m, gfn_l, &t, &old_a,
+ P2M_ALLOC | P2M_UNSHARE, &page_order);
rc = -ESRCH;
if ( !mfn_valid(mfn) || t != p2m_ram_rw )
return 0;
mfn = get_gfn_type_access(hp2m, gfn_x(gfn), &p2mt, &p2ma,
- P2M_ALLOC | P2M_UNSHARE, &page_order);
+ P2M_ALLOC, &page_order);
__put_gfn(hp2m, gfn_x(gfn));
if ( mfn_eq(mfn, INVALID_MFN) )
/* Check host p2m if no valid entry in alternate */
if ( !mfn_valid(mfn) )
{
- mfn = hp2m->get_entry(hp2m, gfn_x(old_gfn), &t, &a,
- P2M_ALLOC | P2M_UNSHARE, &page_order, NULL);
+ mfn = get_gfn_type_access(hp2m, gfn_x(old_gfn), &t, &a,
+ P2M_ALLOC | P2M_UNSHARE, &page_order);
if ( !mfn_valid(mfn) || t != p2m_ram_rw )
goto out;
if ( !mfn_valid(mfn) )
mfn = hp2m->get_entry(hp2m, gfn_x(new_gfn), &t, &a, 0, NULL, NULL);
+ /* Note: currently it is not safe to remap to a shared entry */
if ( !mfn_valid(mfn) || (t != p2m_ram_rw) )
goto out;