struct ioreq_server *s)
{
ASSERT(id < MAX_NR_IOREQ_SERVERS);
- ASSERT(!s || !d->arch.hvm.ioreq_server.server[id]);
+ ASSERT(!s || !d->ioreq_server.server[id]);
- d->arch.hvm.ioreq_server.server[id] = s;
+ d->ioreq_server.server[id] = s;
}
#define GET_IOREQ_SERVER(d, id) \
- (d)->arch.hvm.ioreq_server.server[id]
+ (d)->ioreq_server.server[id]
static struct ioreq_server *get_ioreq_server(const struct domain *d,
unsigned int id)
unsigned int id;
bool found = false;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
FOR_EACH_IOREQ_SERVER(d, id, s)
{
}
}
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return found;
}
return -ENOMEM;
domain_pause(d);
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
{
if ( id )
*id = i;
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
domain_unpause(d);
return 0;
fail:
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
domain_unpause(d);
xfree(s);
struct ioreq_server *s;
int rc;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
rc = 0;
out:
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return rc;
}
struct ioreq_server *s;
int rc;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
rc = 0;
out:
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return rc;
}
ASSERT(is_hvm_domain(d));
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
}
out:
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return rc;
}
if ( start > end )
return -EINVAL;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
rc = rangeset_add_range(r, start, end);
out:
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return rc;
}
if ( start > end )
return -EINVAL;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
rc = rangeset_remove_range(r, start, end);
out:
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return rc;
}
if ( flags & ~XEN_DMOP_IOREQ_MEM_ACCESS_WRITE )
return -EINVAL;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
rc = arch_ioreq_server_map_mem_type(d, s, flags);
out:
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
if ( rc == 0 )
arch_ioreq_server_map_mem_type_completed(d, s, flags);
struct ioreq_server *s;
int rc;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
rc = 0;
out:
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return rc;
}
unsigned int id;
int rc;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
FOR_EACH_IOREQ_SERVER(d, id, s)
{
goto fail;
}
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return 0;
hvm_ioreq_server_remove_vcpu(s, v);
}
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return rc;
}
struct ioreq_server *s;
unsigned int id;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
FOR_EACH_IOREQ_SERVER(d, id, s)
hvm_ioreq_server_remove_vcpu(s, v);
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
}
void hvm_destroy_all_ioreq_servers(struct domain *d)
if ( !arch_ioreq_server_destroy_all(d) )
return;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
/* No need to domain_pause() as the domain is being torn down */
xfree(s);
}
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
}
struct ioreq_server *hvm_select_ioreq_server(struct domain *d,
void hvm_ioreq_init(struct domain *d)
{
- spin_lock_init(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_init(&d->ioreq_server.lock);
arch_ioreq_domain_init(d);
}