|
@@ -31,6 +31,7 @@ typedef enum VhostUserGpuRequest {
|
|
VHOST_USER_GPU_UPDATE,
|
|
VHOST_USER_GPU_UPDATE,
|
|
VHOST_USER_GPU_DMABUF_SCANOUT,
|
|
VHOST_USER_GPU_DMABUF_SCANOUT,
|
|
VHOST_USER_GPU_DMABUF_UPDATE,
|
|
VHOST_USER_GPU_DMABUF_UPDATE,
|
|
|
|
+ VHOST_USER_GPU_GET_EDID,
|
|
} VhostUserGpuRequest;
|
|
} VhostUserGpuRequest;
|
|
|
|
|
|
typedef struct VhostUserGpuDisplayInfoReply {
|
|
typedef struct VhostUserGpuDisplayInfoReply {
|
|
@@ -78,6 +79,10 @@ typedef struct VhostUserGpuDMABUFScanout {
|
|
int fd_drm_fourcc;
|
|
int fd_drm_fourcc;
|
|
} QEMU_PACKED VhostUserGpuDMABUFScanout;
|
|
} QEMU_PACKED VhostUserGpuDMABUFScanout;
|
|
|
|
|
|
|
|
+typedef struct VhostUserGpuEdidRequest {
|
|
|
|
+ uint32_t scanout_id;
|
|
|
|
+} QEMU_PACKED VhostUserGpuEdidRequest;
|
|
|
|
+
|
|
typedef struct VhostUserGpuMsg {
|
|
typedef struct VhostUserGpuMsg {
|
|
uint32_t request; /* VhostUserGpuRequest */
|
|
uint32_t request; /* VhostUserGpuRequest */
|
|
uint32_t flags;
|
|
uint32_t flags;
|
|
@@ -88,6 +93,8 @@ typedef struct VhostUserGpuMsg {
|
|
VhostUserGpuScanout scanout;
|
|
VhostUserGpuScanout scanout;
|
|
VhostUserGpuUpdate update;
|
|
VhostUserGpuUpdate update;
|
|
VhostUserGpuDMABUFScanout dmabuf_scanout;
|
|
VhostUserGpuDMABUFScanout dmabuf_scanout;
|
|
|
|
+ VhostUserGpuEdidRequest edid_req;
|
|
|
|
+ struct virtio_gpu_resp_edid resp_edid;
|
|
struct virtio_gpu_resp_display_info display_info;
|
|
struct virtio_gpu_resp_display_info display_info;
|
|
uint64_t u64;
|
|
uint64_t u64;
|
|
} payload;
|
|
} payload;
|
|
@@ -99,6 +106,8 @@ static VhostUserGpuMsg m __attribute__ ((unused));
|
|
|
|
|
|
#define VHOST_USER_GPU_MSG_FLAG_REPLY 0x4
|
|
#define VHOST_USER_GPU_MSG_FLAG_REPLY 0x4
|
|
|
|
|
|
|
|
+#define VHOST_USER_GPU_PROTOCOL_F_EDID 0
|
|
|
|
+
|
|
static void vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked);
|
|
static void vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked);
|
|
|
|
|
|
static void
|
|
static void
|
|
@@ -161,6 +170,9 @@ vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg)
|
|
.request = msg->request,
|
|
.request = msg->request,
|
|
.flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
|
|
.flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
|
|
.size = sizeof(uint64_t),
|
|
.size = sizeof(uint64_t),
|
|
|
|
+ .payload = {
|
|
|
|
+ .u64 = (1 << VHOST_USER_GPU_PROTOCOL_F_EDID)
|
|
|
|
+ }
|
|
};
|
|
};
|
|
|
|
|
|
vhost_user_gpu_send_msg(g, &reply);
|
|
vhost_user_gpu_send_msg(g, &reply);
|
|
@@ -184,6 +196,26 @@ vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg)
|
|
vhost_user_gpu_send_msg(g, &reply);
|
|
vhost_user_gpu_send_msg(g, &reply);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
+ case VHOST_USER_GPU_GET_EDID: {
|
|
|
|
+ VhostUserGpuEdidRequest *m = &msg->payload.edid_req;
|
|
|
|
+ struct virtio_gpu_resp_edid resp = { {} };
|
|
|
|
+ VhostUserGpuMsg reply = {
|
|
|
|
+ .request = msg->request,
|
|
|
|
+ .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
|
|
|
|
+ .size = sizeof(reply.payload.resp_edid),
|
|
|
|
+ };
|
|
|
|
+
|
|
|
|
+ if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
|
|
|
|
+ error_report("invalid scanout: %d", m->scanout_id);
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ resp.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
|
|
|
|
+ virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), m->scanout_id, &resp);
|
|
|
|
+ memcpy(&reply.payload.resp_edid, &resp, sizeof(resp));
|
|
|
|
+ vhost_user_gpu_send_msg(g, &reply);
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
case VHOST_USER_GPU_SCANOUT: {
|
|
case VHOST_USER_GPU_SCANOUT: {
|
|
VhostUserGpuScanout *m = &msg->payload.scanout;
|
|
VhostUserGpuScanout *m = &msg->payload.scanout;
|
|
|
|
|