|
@@ -667,6 +667,8 @@ static int xen_map_ioreq_server(XenIOState *state)
|
|
|
xen_pfn_t ioreq_pfn;
|
|
|
xen_pfn_t bufioreq_pfn;
|
|
|
evtchn_port_t bufioreq_evtchn;
|
|
|
+ unsigned long num_frames = 1;
|
|
|
+ unsigned long frame = 1;
|
|
|
int rc;
|
|
|
|
|
|
/*
|
|
@@ -675,59 +677,78 @@ static int xen_map_ioreq_server(XenIOState *state)
|
|
|
*/
|
|
|
QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq != 0);
|
|
|
QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1);
|
|
|
+
|
|
|
+ if (state->has_bufioreq) {
|
|
|
+ frame = 0;
|
|
|
+ num_frames = 2;
|
|
|
+ }
|
|
|
state->fres = xenforeignmemory_map_resource(xen_fmem, xen_domid,
|
|
|
XENMEM_resource_ioreq_server,
|
|
|
- state->ioservid, 0, 2,
|
|
|
+ state->ioservid,
|
|
|
+ frame, num_frames,
|
|
|
&addr,
|
|
|
PROT_READ | PROT_WRITE, 0);
|
|
|
if (state->fres != NULL) {
|
|
|
trace_xen_map_resource_ioreq(state->ioservid, addr);
|
|
|
- state->buffered_io_page = addr;
|
|
|
- state->shared_page = addr + XC_PAGE_SIZE;
|
|
|
+ state->shared_page = addr;
|
|
|
+ if (state->has_bufioreq) {
|
|
|
+ state->buffered_io_page = addr;
|
|
|
+ state->shared_page = addr + XC_PAGE_SIZE;
|
|
|
+ }
|
|
|
} else if (errno != EOPNOTSUPP) {
|
|
|
error_report("failed to map ioreq server resources: error %d handle=%p",
|
|
|
errno, xen_xc);
|
|
|
return -1;
|
|
|
}
|
|
|
|
|
|
- rc = xen_get_ioreq_server_info(xen_domid, state->ioservid,
|
|
|
- (state->shared_page == NULL) ?
|
|
|
- &ioreq_pfn : NULL,
|
|
|
- (state->buffered_io_page == NULL) ?
|
|
|
- &bufioreq_pfn : NULL,
|
|
|
- &bufioreq_evtchn);
|
|
|
- if (rc < 0) {
|
|
|
- error_report("failed to get ioreq server info: error %d handle=%p",
|
|
|
- errno, xen_xc);
|
|
|
- return rc;
|
|
|
- }
|
|
|
+ /*
|
|
|
+ * If we fail to map the shared page with xenforeignmemory_map_resource()
|
|
|
+ * or if we're using buffered ioreqs, we need xen_get_ioreq_server_info()
|
|
|
+ * to provide the the addresses to map the shared page and/or to get the
|
|
|
+ * event-channel port for buffered ioreqs.
|
|
|
+ */
|
|
|
+ if (state->shared_page == NULL || state->has_bufioreq) {
|
|
|
+ rc = xen_get_ioreq_server_info(xen_domid, state->ioservid,
|
|
|
+ (state->shared_page == NULL) ?
|
|
|
+ &ioreq_pfn : NULL,
|
|
|
+ (state->has_bufioreq &&
|
|
|
+ state->buffered_io_page == NULL) ?
|
|
|
+ &bufioreq_pfn : NULL,
|
|
|
+ &bufioreq_evtchn);
|
|
|
+ if (rc < 0) {
|
|
|
+ error_report("failed to get ioreq server info: error %d handle=%p",
|
|
|
+ errno, xen_xc);
|
|
|
+ return rc;
|
|
|
+ }
|
|
|
|
|
|
- if (state->shared_page == NULL) {
|
|
|
- trace_xen_map_ioreq_server_shared_page(ioreq_pfn);
|
|
|
+ if (state->shared_page == NULL) {
|
|
|
+ trace_xen_map_ioreq_server_shared_page(ioreq_pfn);
|
|
|
|
|
|
- state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
|
|
|
- PROT_READ | PROT_WRITE,
|
|
|
- 1, &ioreq_pfn, NULL);
|
|
|
+ state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
|
|
|
+ PROT_READ | PROT_WRITE,
|
|
|
+ 1, &ioreq_pfn, NULL);
|
|
|
+ }
|
|
|
if (state->shared_page == NULL) {
|
|
|
error_report("map shared IO page returned error %d handle=%p",
|
|
|
errno, xen_xc);
|
|
|
}
|
|
|
- }
|
|
|
|
|
|
- if (state->buffered_io_page == NULL) {
|
|
|
- trace_xen_map_ioreq_server_buffered_io_page(bufioreq_pfn);
|
|
|
+ if (state->has_bufioreq && state->buffered_io_page == NULL) {
|
|
|
+ trace_xen_map_ioreq_server_buffered_io_page(bufioreq_pfn);
|
|
|
|
|
|
- state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
|
|
|
- PROT_READ | PROT_WRITE,
|
|
|
- 1, &bufioreq_pfn,
|
|
|
- NULL);
|
|
|
- if (state->buffered_io_page == NULL) {
|
|
|
- error_report("map buffered IO page returned error %d", errno);
|
|
|
- return -1;
|
|
|
+ state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
|
|
|
+ PROT_READ | PROT_WRITE,
|
|
|
+ 1, &bufioreq_pfn,
|
|
|
+ NULL);
|
|
|
+ if (state->buffered_io_page == NULL) {
|
|
|
+ error_report("map buffered IO page returned error %d", errno);
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- if (state->shared_page == NULL || state->buffered_io_page == NULL) {
|
|
|
+ if (state->shared_page == NULL ||
|
|
|
+ (state->has_bufioreq && state->buffered_io_page == NULL)) {
|
|
|
return -1;
|
|
|
}
|
|
|
|
|
@@ -830,14 +851,15 @@ static void xen_do_ioreq_register(XenIOState *state,
|
|
|
state->ioreq_local_port[i] = rc;
|
|
|
}
|
|
|
|
|
|
- rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid,
|
|
|
- state->bufioreq_remote_port);
|
|
|
- if (rc == -1) {
|
|
|
- error_report("buffered evtchn bind error %d", errno);
|
|
|
- goto err;
|
|
|
+ if (state->has_bufioreq) {
|
|
|
+ rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid,
|
|
|
+ state->bufioreq_remote_port);
|
|
|
+ if (rc == -1) {
|
|
|
+ error_report("buffered evtchn bind error %d", errno);
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
+ state->bufioreq_local_port = rc;
|
|
|
}
|
|
|
- state->bufioreq_local_port = rc;
|
|
|
-
|
|
|
/* Init RAM management */
|
|
|
#ifdef XEN_COMPAT_PHYSMAP
|
|
|
xen_map_cache_init(xen_phys_offset_to_gaddr, state);
|
|
@@ -865,6 +887,7 @@ err:
|
|
|
}
|
|
|
|
|
|
void xen_register_ioreq(XenIOState *state, unsigned int max_cpus,
|
|
|
+ uint8_t handle_bufioreq,
|
|
|
const MemoryListener *xen_memory_listener)
|
|
|
{
|
|
|
int rc;
|
|
@@ -883,7 +906,8 @@ void xen_register_ioreq(XenIOState *state, unsigned int max_cpus,
|
|
|
goto err;
|
|
|
}
|
|
|
|
|
|
- rc = xen_create_ioreq_server(xen_domid, &state->ioservid);
|
|
|
+ state->has_bufioreq = handle_bufioreq != HVM_IOREQSRV_BUFIOREQ_OFF;
|
|
|
+ rc = xen_create_ioreq_server(xen_domid, handle_bufioreq, &state->ioservid);
|
|
|
if (!rc) {
|
|
|
xen_do_ioreq_register(state, max_cpus, xen_memory_listener);
|
|
|
} else {
|