|
@@ -478,18 +478,19 @@ static int qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext)
|
|
|
cmd->u.surface_create.stride);
|
|
|
return 1;
|
|
|
}
|
|
|
- qemu_mutex_lock(&qxl->track_lock);
|
|
|
- if (cmd->type == QXL_SURFACE_CMD_CREATE) {
|
|
|
- qxl->guest_surfaces.cmds[id] = ext->cmd.data;
|
|
|
- qxl->guest_surfaces.count++;
|
|
|
- if (qxl->guest_surfaces.max < qxl->guest_surfaces.count)
|
|
|
- qxl->guest_surfaces.max = qxl->guest_surfaces.count;
|
|
|
- }
|
|
|
- if (cmd->type == QXL_SURFACE_CMD_DESTROY) {
|
|
|
- qxl->guest_surfaces.cmds[id] = 0;
|
|
|
- qxl->guest_surfaces.count--;
|
|
|
+ WITH_QEMU_LOCK_GUARD(&qxl->track_lock) {
|
|
|
+ if (cmd->type == QXL_SURFACE_CMD_CREATE) {
|
|
|
+ qxl->guest_surfaces.cmds[id] = ext->cmd.data;
|
|
|
+ qxl->guest_surfaces.count++;
|
|
|
+ if (qxl->guest_surfaces.max < qxl->guest_surfaces.count) {
|
|
|
+ qxl->guest_surfaces.max = qxl->guest_surfaces.count;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (cmd->type == QXL_SURFACE_CMD_DESTROY) {
|
|
|
+ qxl->guest_surfaces.cmds[id] = 0;
|
|
|
+ qxl->guest_surfaces.count--;
|
|
|
+ }
|
|
|
}
|
|
|
- qemu_mutex_unlock(&qxl->track_lock);
|
|
|
break;
|
|
|
}
|
|
|
case QXL_CMD_CURSOR:
|
|
@@ -958,10 +959,9 @@ static void interface_update_area_complete(QXLInstance *sin,
|
|
|
int i;
|
|
|
int qxl_i;
|
|
|
|
|
|
- qemu_mutex_lock(&qxl->ssd.lock);
|
|
|
+ QEMU_LOCK_GUARD(&qxl->ssd.lock);
|
|
|
if (surface_id != 0 || !num_updated_rects ||
|
|
|
!qxl->render_update_cookie_num) {
|
|
|
- qemu_mutex_unlock(&qxl->ssd.lock);
|
|
|
return;
|
|
|
}
|
|
|
trace_qxl_interface_update_area_complete(qxl->id, surface_id, dirty->left,
|
|
@@ -980,7 +980,6 @@ static void interface_update_area_complete(QXLInstance *sin,
|
|
|
* Don't bother copying or scheduling the bh since we will flip
|
|
|
* the whole area anyway on completion of the update_area async call
|
|
|
*/
|
|
|
- qemu_mutex_unlock(&qxl->ssd.lock);
|
|
|
return;
|
|
|
}
|
|
|
qxl_i = qxl->num_dirty_rects;
|
|
@@ -991,7 +990,6 @@ static void interface_update_area_complete(QXLInstance *sin,
|
|
|
trace_qxl_interface_update_area_complete_schedule_bh(qxl->id,
|
|
|
qxl->num_dirty_rects);
|
|
|
qemu_bh_schedule(qxl->update_area_bh);
|
|
|
- qemu_mutex_unlock(&qxl->ssd.lock);
|
|
|
}
|
|
|
|
|
|
/* called from spice server thread context only */
|
|
@@ -1694,15 +1692,14 @@ static void ioport_write(void *opaque, hwaddr addr,
|
|
|
case QXL_IO_MONITORS_CONFIG_ASYNC:
|
|
|
async_common:
|
|
|
async = QXL_ASYNC;
|
|
|
- qemu_mutex_lock(&d->async_lock);
|
|
|
- if (d->current_async != QXL_UNDEFINED_IO) {
|
|
|
- qxl_set_guest_bug(d, "%d async started before last (%d) complete",
|
|
|
- io_port, d->current_async);
|
|
|
- qemu_mutex_unlock(&d->async_lock);
|
|
|
- return;
|
|
|
+ WITH_QEMU_LOCK_GUARD(&d->async_lock) {
|
|
|
+ if (d->current_async != QXL_UNDEFINED_IO) {
|
|
|
+ qxl_set_guest_bug(d, "%d async started before last (%d) complete",
|
|
|
+ io_port, d->current_async);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ d->current_async = orig_io_port;
|
|
|
}
|
|
|
- d->current_async = orig_io_port;
|
|
|
- qemu_mutex_unlock(&d->async_lock);
|
|
|
break;
|
|
|
default:
|
|
|
break;
|