}
while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
+ if ( unlikely((x & PGT_type_mask) != type) )
+ {
+ /* Special pages should not be accessible from devices. */
+ struct domain *d = page_get_owner(page);
+ if ( d && unlikely(need_iommu(d)) )
+ {
+ if ( (x & PGT_type_mask) == PGT_writable_page )
+ iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));
+ else if ( type == PGT_writable_page )
+ iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)),
+ page_to_mfn(page));
+ }
+ }
+
if ( unlikely(!(nx & PGT_validated)) )
{
/* Try to validate page type; drop the new reference on failure. */
if ( mfn_valid(mfn) && (gfn > d->arch.p2m->max_mapped_pfn) )
d->arch.p2m->max_mapped_pfn = gfn;
- if ( iommu_enabled && is_hvm_domain(d) )
+ if ( iommu_enabled && (is_hvm_domain(d) || need_iommu(d)) )
{
if ( p2mt == p2m_ram_rw )
for ( i = 0; i < (1UL << page_order); i++ )
unsigned long i;
if ( !paging_mode_translate(d) )
+ {
+ if ( need_iommu(d) )
+ for ( i = 0; i < (1 << page_order); i++ )
+ iommu_unmap_page(d, mfn + i);
return;
+ }
P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn, mfn);
int rc = 0;
if ( !paging_mode_translate(d) )
- return -EINVAL;
+ {
+ if ( need_iommu(d) && t == p2m_ram_rw )
+ {
+ for ( i = 0; i < (1 << page_order); i++ )
+ if ( (rc = iommu_map_page(d, mfn + i, mfn + i)) != 0 )
+ {
+ while ( i-- > 0 )
+ iommu_unmap_page(d, mfn + i);
+ return rc;
+ }
+ }
+ return 0;
+ }
#if CONFIG_PAGING_LEVELS == 3
/*
struct domain *ld, *rd;
struct vcpu *led;
int handle;
- unsigned long frame = 0;
+ unsigned long frame = 0, nr_gets = 0;
int rc = GNTST_okay;
+ u32 old_pin;
unsigned int cache_flags;
struct active_grant_entry *act;
struct grant_mapping *mt;
}
}
+ old_pin = act->pin;
if ( op->flags & GNTMAP_device_map )
act->pin += (op->flags & GNTMAP_readonly) ?
GNTPIN_devr_inc : GNTPIN_devw_inc;
rc = GNTST_general_error;
goto undo_out;
}
-
+
+ nr_gets++;
if ( op->flags & GNTMAP_host_map )
{
rc = create_grant_host_mapping(op->host_addr, frame, op->flags, 0);
if ( rc != GNTST_okay )
- {
- if ( gnttab_host_mapping_get_page_type(op, ld, rd) )
- put_page_type(mfn_to_page(frame));
- put_page(mfn_to_page(frame));
goto undo_out;
- }
if ( op->flags & GNTMAP_device_map )
{
+ nr_gets++;
(void)get_page(mfn_to_page(frame), rd);
if ( !(op->flags & GNTMAP_readonly) )
get_page_type(mfn_to_page(frame), PGT_writable_page);
}
}
+ if ( need_iommu(ld) &&
+ !(old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) &&
+ (act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
+ {
+ if ( iommu_map_page(ld, mfn_to_gmfn(ld, frame), frame) )
+ {
+ rc = GNTST_general_error;
+ goto undo_out;
+ }
+ }
+
TRACE_1D(TRC_MEM_PAGE_GRANT_MAP, op->dom);
mt = &maptrack_entry(ld->grant_table, handle);
return;
undo_out:
+ if ( nr_gets > 1 )
+ {
+ if ( !(op->flags & GNTMAP_readonly) )
+ put_page_type(mfn_to_page(frame));
+ put_page(mfn_to_page(frame));
+ }
+ if ( nr_gets > 0 )
+ {
+ if ( gnttab_host_mapping_get_page_type(op, ld, rd) )
+ put_page_type(mfn_to_page(frame));
+ put_page(mfn_to_page(frame));
+ }
+
spin_lock(&rd->grant_table->lock);
act = &active_entry(rd->grant_table, op->ref);
struct active_grant_entry *act;
grant_entry_t *sha;
s16 rc = 0;
+ u32 old_pin;
ld = current->domain;
act = &active_entry(rd->grant_table, op->map->ref);
sha = &shared_entry(rd->grant_table, op->map->ref);
+ old_pin = act->pin;
if ( op->frame == 0 )
{
act->pin -= GNTPIN_hstw_inc;
}
+ if ( need_iommu(ld) &&
+ (old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) &&
+ !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
+ {
+ if ( iommu_unmap_page(ld, mfn_to_gmfn(ld, op->frame)) )
+ {
+ rc = GNTST_general_error;
+ goto unmap_out;
+ }
+ }
+
/* If just unmapped a writable mapping, mark as dirtied */
if ( !(op->flags & GNTMAP_readonly) )
gnttab_mark_dirty(rd, op->frame);
}
mfn = page_to_mfn(page);
+ guest_physmap_add_page(d, gpfn, mfn, a->extent_order);
- if ( unlikely(paging_mode_translate(d)) )
- {
- guest_physmap_add_page(d, gpfn, mfn, a->extent_order);
- }
- else
+ if ( !paging_mode_translate(d) )
{
for ( j = 0; j < (1 << a->extent_order); j++ )
set_gpfn_from_mfn(mfn + j, gpfn + j);
&gpfn, exch.out.extent_start, (i<<out_chunk_order)+j, 1);
mfn = page_to_mfn(page);
- if ( unlikely(paging_mode_translate(d)) )
- {
- guest_physmap_add_page(d, gpfn, mfn, exch.out.extent_order);
- }
- else
+ guest_physmap_add_page(d, gpfn, mfn, exch.out.extent_order);
+
+ if ( !paging_mode_translate(d) )
{
for ( k = 0; k < (1UL << exch.out.extent_order); k++ )
set_gpfn_from_mfn(mfn + k, gpfn + k);