return 1;
}
-static int hvm_alloc_ioreq_gmfn(struct domain *d, unsigned long *gmfn)
+static int hvm_alloc_ioreq_gfn(struct domain *d, unsigned long *gfn)
{
unsigned int i;
int rc;
rc = -ENOMEM;
- for ( i = 0; i < sizeof(d->arch.hvm_domain.ioreq_gmfn.mask) * 8; i++ )
+ for ( i = 0; i < sizeof(d->arch.hvm_domain.ioreq_gfn.mask) * 8; i++ )
{
- if ( test_and_clear_bit(i, &d->arch.hvm_domain.ioreq_gmfn.mask) )
+ if ( test_and_clear_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask) )
{
- *gmfn = d->arch.hvm_domain.ioreq_gmfn.base + i;
+ *gfn = d->arch.hvm_domain.ioreq_gfn.base + i;
rc = 0;
break;
}
return rc;
}
-static void hvm_free_ioreq_gmfn(struct domain *d, unsigned long gmfn)
+static void hvm_free_ioreq_gfn(struct domain *d, unsigned long gfn)
{
- unsigned int i = gmfn - d->arch.hvm_domain.ioreq_gmfn.base;
+ unsigned int i = gfn - d->arch.hvm_domain.ioreq_gfn.base;
- if ( gmfn != gfn_x(INVALID_GFN) )
- set_bit(i, &d->arch.hvm_domain.ioreq_gmfn.mask);
+ if ( gfn != gfn_x(INVALID_GFN) )
+ set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
}
static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, bool_t buf)
}
static int hvm_map_ioreq_page(
- struct hvm_ioreq_server *s, bool_t buf, unsigned long gmfn)
+ struct hvm_ioreq_server *s, bool_t buf, unsigned long gfn)
{
struct domain *d = s->domain;
struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
void *va;
int rc;
- if ( (rc = prepare_ring_for_helper(d, gmfn, &page, &va)) )
+ if ( (rc = prepare_ring_for_helper(d, gfn, &page, &va)) )
return rc;
if ( (iorp->va != NULL) || d->is_dying )
iorp->va = va;
iorp->page = page;
- iorp->gmfn = gmfn;
+ iorp->gfn = gfn;
return 0;
}
return found;
}
-static void hvm_remove_ioreq_gmfn(
+static void hvm_remove_ioreq_gfn(
struct domain *d, struct hvm_ioreq_page *iorp)
{
- if ( guest_physmap_remove_page(d, _gfn(iorp->gmfn),
+ if ( guest_physmap_remove_page(d, _gfn(iorp->gfn),
_mfn(page_to_mfn(iorp->page)), 0) )
domain_crash(d);
clear_page(iorp->va);
}
-static int hvm_add_ioreq_gmfn(
+static int hvm_add_ioreq_gfn(
struct domain *d, struct hvm_ioreq_page *iorp)
{
int rc;
clear_page(iorp->va);
- rc = guest_physmap_add_page(d, _gfn(iorp->gmfn),
+ rc = guest_physmap_add_page(d, _gfn(iorp->gfn),
_mfn(page_to_mfn(iorp->page)), 0);
if ( rc == 0 )
paging_mark_dirty(d, _mfn(page_to_mfn(iorp->page)));
}
static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s,
- unsigned long ioreq_pfn,
- unsigned long bufioreq_pfn)
+ unsigned long ioreq_gfn,
+ unsigned long bufioreq_gfn)
{
int rc;
- rc = hvm_map_ioreq_page(s, 0, ioreq_pfn);
+ rc = hvm_map_ioreq_page(s, 0, ioreq_gfn);
if ( rc )
return rc;
- if ( bufioreq_pfn != gfn_x(INVALID_GFN) )
- rc = hvm_map_ioreq_page(s, 1, bufioreq_pfn);
+ if ( bufioreq_gfn != gfn_x(INVALID_GFN) )
+ rc = hvm_map_ioreq_page(s, 1, bufioreq_gfn);
if ( rc )
hvm_unmap_ioreq_page(s, 0);
bool_t handle_bufioreq)
{
struct domain *d = s->domain;
- unsigned long ioreq_pfn = gfn_x(INVALID_GFN);
- unsigned long bufioreq_pfn = gfn_x(INVALID_GFN);
+ unsigned long ioreq_gfn = gfn_x(INVALID_GFN);
+ unsigned long bufioreq_gfn = gfn_x(INVALID_GFN);
int rc;
if ( is_default )
d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN]);
}
- rc = hvm_alloc_ioreq_gmfn(d, &ioreq_pfn);
+ rc = hvm_alloc_ioreq_gfn(d, &ioreq_gfn);
if ( !rc && handle_bufioreq )
- rc = hvm_alloc_ioreq_gmfn(d, &bufioreq_pfn);
+ rc = hvm_alloc_ioreq_gfn(d, &bufioreq_gfn);
if ( !rc )
- rc = hvm_ioreq_server_map_pages(s, ioreq_pfn, bufioreq_pfn);
+ rc = hvm_ioreq_server_map_pages(s, ioreq_gfn, bufioreq_gfn);
if ( rc )
{
- hvm_free_ioreq_gmfn(d, ioreq_pfn);
- hvm_free_ioreq_gmfn(d, bufioreq_pfn);
+ hvm_free_ioreq_gfn(d, ioreq_gfn);
+ hvm_free_ioreq_gfn(d, bufioreq_gfn);
}
return rc;
if ( !is_default )
{
if ( handle_bufioreq )
- hvm_free_ioreq_gmfn(d, s->bufioreq.gmfn);
+ hvm_free_ioreq_gfn(d, s->bufioreq.gfn);
- hvm_free_ioreq_gmfn(d, s->ioreq.gmfn);
+ hvm_free_ioreq_gfn(d, s->ioreq.gfn);
}
}
if ( !is_default )
{
- hvm_remove_ioreq_gmfn(d, &s->ioreq);
+ hvm_remove_ioreq_gfn(d, &s->ioreq);
if ( handle_bufioreq )
- hvm_remove_ioreq_gmfn(d, &s->bufioreq);
+ hvm_remove_ioreq_gfn(d, &s->bufioreq);
}
s->enabled = 1;
if ( !is_default )
{
if ( handle_bufioreq )
- hvm_add_ioreq_gmfn(d, &s->bufioreq);
+ hvm_add_ioreq_gfn(d, &s->bufioreq);
- hvm_add_ioreq_gmfn(d, &s->ioreq);
+ hvm_add_ioreq_gfn(d, &s->ioreq);
}
s->enabled = 0;
}
int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
- unsigned long *ioreq_pfn,
- unsigned long *bufioreq_pfn,
+ unsigned long *ioreq_gfn,
+ unsigned long *bufioreq_gfn,
evtchn_port_t *bufioreq_port)
{
struct hvm_ioreq_server *s;
if ( s->id != id )
continue;
- *ioreq_pfn = s->ioreq.gmfn;
+ *ioreq_gfn = s->ioreq.gfn;
if ( s->bufioreq.va != NULL )
{
- *bufioreq_pfn = s->bufioreq.gmfn;
+ *bufioreq_gfn = s->bufioreq.gfn;
*bufioreq_port = s->bufioreq_evtchn;
}
* A domain supports a single 'legacy' IOREQ Server which is instantiated if
* parameter...
*
- * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the synchronous
+ * HVM_PARAM_IOREQ_PFN is read (to get the gfn containing the synchronous
* ioreq structures), or...
- * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the buffered
+ * HVM_PARAM_BUFIOREQ_PFN is read (to get the gfn containing the buffered
* ioreq ring), or...
* HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
* to request buffered I/O emulation).
*
* The emulator needs to map the synchronous ioreq structures and buffered
* ioreq ring (if it exists) that Xen uses to request emulation. These are
- * hosted in the target domain's gmfns <ioreq_pfn> and <bufioreq_pfn>
+ * hosted in the target domain's gmfns <ioreq_gfn> and <bufioreq_gfn>
* respectively. In addition, if the IOREQ Server is handling buffered
* emulation requests, the emulator needs to bind to event channel
* <bufioreq_port> to listen for them. (The event channels used for
* synchronous emulation requests are specified in the per-CPU ioreq
- * structures in <ioreq_pfn>).
+ * structures in <ioreq_gfn>).
* If the IOREQ Server is not handling buffered emulation requests then the
- * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be 0.
+ * values handed back in <bufioreq_gfn> and <bufioreq_port> will both be 0.
*/
#define XEN_DMOP_get_ioreq_server_info 2
uint16_t pad;
/* OUT - buffered ioreq port */
evtchn_port_t bufioreq_port;
- /* OUT - sync ioreq pfn */
- uint64_aligned_t ioreq_pfn;
- /* OUT - buffered ioreq pfn */
- uint64_aligned_t bufioreq_pfn;
+ /* OUT - sync ioreq gfn */
+ uint64_aligned_t ioreq_gfn;
+ /* OUT - buffered ioreq gfn */
+ uint64_aligned_t bufioreq_gfn;
};
/*
*
* The IOREQ Server will not be passed any emulation requests until it is
* in the enabled state.
- * Note that the contents of the ioreq_pfn and bufioreq_fn (see
+ * Note that the contents of the ioreq_gfn and bufioreq_gfn (see
* XEN_DMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server
* is in the enabled state.
*/