xen_pfn_t new_gfn);
int xc_altp2m_get_vcpu_p2m_idx(xc_interface *handle, uint32_t domid,
uint32_t vcpuid, uint16_t *p2midx);
+/*
+ * Set view visibility for xc_altp2m_switch_to_view and vmfunc.
+ * Note: If altp2m mode is set to mixed the guest is able to change the view
+ * visibility and then call vmfunc.
+ */
+int xc_altp2m_set_visibility(xc_interface *handle, uint32_t domid,
+ uint16_t view_id, bool visible);
/**
* Mem paging operations.
xc_hypercall_buffer_free(handle, arg);
return rc;
}
+
+int xc_altp2m_set_visibility(xc_interface *handle, uint32_t domid,
+ uint16_t view_id, bool visible)
+{
+ int rc;
+
+ DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
+
+ arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
+ if ( arg == NULL )
+ return -1;
+
+ arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
+ arg->cmd = HVMOP_altp2m_set_visibility;
+ arg->domain = domid;
+ arg->u.set_visibility.altp2m_idx = view_id;
+ arg->u.set_visibility.visible = visible;
+
+ rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
+ HYPERCALL_BUFFER_AS_ARG(arg));
+
+ xc_hypercall_buffer_free(handle, arg);
+ return rc;
+}
case HVMOP_altp2m_get_mem_access:
case HVMOP_altp2m_change_gfn:
case HVMOP_altp2m_get_p2m_idx:
+ case HVMOP_altp2m_set_visibility:
break;
default:
break;
}
+ case HVMOP_altp2m_set_visibility:
+ {
+ unsigned int idx = a.u.set_visibility.altp2m_idx;
+
+ if ( a.u.set_visibility.pad )
+ rc = -EINVAL;
+ else if ( !altp2m_active(d) )
+ rc = -EOPNOTSUPP;
+ else
+ rc = p2m_set_altp2m_view_visibility(d, idx,
+ a.u.set_visibility.visible);
+ }
+
default:
ASSERT_UNREACHABLE();
}
{
v->arch.hvm.vmx.secondary_exec_control |= mask;
__vmwrite(VM_FUNCTION_CONTROL, VMX_VMFUNC_EPTP_SWITCHING);
- __vmwrite(EPTP_LIST_ADDR, virt_to_maddr(d->arch.altp2m_eptp));
+ __vmwrite(EPTP_LIST_ADDR, virt_to_maddr(d->arch.altp2m_visible_eptp));
if ( cpu_has_vmx_virt_exceptions )
{
goto out;
}
+ if ( (d->arch.altp2m_visible_eptp = alloc_xenheap_page()) == NULL )
+ {
+ rv = -ENOMEM;
+ goto out;
+ }
+
for ( i = 0; i < MAX_EPTP; i++ )
+ {
d->arch.altp2m_eptp[i] = mfn_x(INVALID_MFN);
+ d->arch.altp2m_visible_eptp[i] = mfn_x(INVALID_MFN);
+ }
for ( i = 0; i < MAX_ALTP2M; i++ )
{
d->arch.altp2m_eptp = NULL;
}
+ if ( d->arch.altp2m_visible_eptp )
+ {
+ free_xenheap_page(d->arch.altp2m_visible_eptp);
+ d->arch.altp2m_visible_eptp = NULL;
+ }
+
for ( i = 0; i < MAX_ALTP2M; i++ )
p2m_teardown(d->arch.altp2m_p2m[i]);
}
ept = &p2m->ept;
ept->mfn = pagetable_get_pfn(p2m_get_pagetable(p2m));
d->arch.altp2m_eptp[array_index_nospec(i, MAX_EPTP)] = ept->eptp;
+ d->arch.altp2m_visible_eptp[array_index_nospec(i, MAX_EPTP)] = ept->eptp;
}
unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp)
{
p2m_reset_altp2m(d, i, ALTP2M_DEACTIVATE);
d->arch.altp2m_eptp[i] = mfn_x(INVALID_MFN);
+ d->arch.altp2m_visible_eptp[i] = mfn_x(INVALID_MFN);
}
altp2m_list_unlock(d);
{
p2m_reset_altp2m(d, idx, ALTP2M_DEACTIVATE);
d->arch.altp2m_eptp[array_index_nospec(idx, MAX_EPTP)] =
- mfn_x(INVALID_MFN);
+ mfn_x(INVALID_MFN);
+ d->arch.altp2m_visible_eptp[array_index_nospec(idx, MAX_EPTP)] =
+ mfn_x(INVALID_MFN);
rc = 0;
}
}
rc = -EINVAL;
altp2m_list_lock(d);
- if ( d->arch.altp2m_eptp[idx] != mfn_x(INVALID_MFN) )
+ if ( d->arch.altp2m_visible_eptp[idx] != mfn_x(INVALID_MFN) )
{
for_each_vcpu( d, v )
if ( idx != vcpu_altp2m(v).p2midx )
return rc;
}
+
+int p2m_set_altp2m_view_visibility(struct domain *d, unsigned int altp2m_idx,
+ uint8_t visible)
+{
+ int rc = 0;
+
+ altp2m_list_lock(d);
+
+ /*
+ * Eptp index is correlated with altp2m index and should not exceed
+ * min(MAX_ALTP2M, MAX_EPTP).
+ */
+ if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
+ d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
+ mfn_x(INVALID_MFN) )
+ rc = -EINVAL;
+ else if ( visible )
+ d->arch.altp2m_visible_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] =
+ d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)];
+ else
+ d->arch.altp2m_visible_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] =
+ mfn_x(INVALID_MFN);
+
+ altp2m_list_unlock(d);
+
+ return rc;
+}
#endif
/*
struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
mm_lock_t altp2m_list_lock;
uint64_t *altp2m_eptp;
+ uint64_t *altp2m_visible_eptp;
#endif
/* NB. protected by d->event_lock and by irq_desc[irq].lock */
int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
mfn_t mfn, unsigned int page_order,
p2m_type_t p2mt, p2m_access_t p2ma);
+
+/* Set a specific p2m view visibility */
+int p2m_set_altp2m_view_visibility(struct domain *d, unsigned int idx,
+ uint8_t visible);
#else
struct p2m_domain *p2m_get_altp2m(struct vcpu *v);
static inline void p2m_altp2m_check(struct vcpu *v, uint16_t idx) {}
uint16_t altp2m_idx;
};
+struct xen_hvm_altp2m_set_visibility {
+ uint16_t altp2m_idx;
+ uint8_t visible;
+ uint8_t pad;
+};
+
struct xen_hvm_altp2m_op {
uint32_t version; /* HVMOP_ALTP2M_INTERFACE_VERSION */
uint32_t cmd;
#define HVMOP_altp2m_get_p2m_idx 14
/* Set the "Supress #VE" bit for a range of pages */
#define HVMOP_altp2m_set_suppress_ve_multi 15
+/* Set visibility for a given altp2m view */
+#define HVMOP_altp2m_set_visibility 16
domid_t domain;
uint16_t pad1;
uint32_t pad2;
struct xen_hvm_altp2m_suppress_ve_multi suppress_ve_multi;
struct xen_hvm_altp2m_vcpu_disable_notify disable_notify;
struct xen_hvm_altp2m_get_vcpu_p2m_idx get_vcpu_p2m_idx;
+ struct xen_hvm_altp2m_set_visibility set_visibility;
uint8_t pad[64];
} u;
};