瀏覽代碼

vhost_net: don't set backend for the uninitialized virtqueue

We used to set backend unconditionally, this won't work for some
guests (e.g windows driver) who may not initialize all virtqueues. For
kernel backend, this will fail since it may try to validate the rings
during setting backend.

Fixing this by simply skipping the backend set when we find desc is
not ready.

Reviewed-by: Michael S. Tsirkin<mst@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
Jason Wang 6 年之前
父節點
當前提交
23bfaf77fa
共有 3 個文件被更改,包括 16 次插入0 次删除
  1. 10 0
      hw/net/vhost_net.c
  2. 5 0
      hw/virtio/virtio.c
  3. 1 0
      include/hw/virtio/virtio.h

+ 10 - 0
hw/net/vhost_net.c

@@ -244,6 +244,11 @@ static int vhost_net_start_one(struct vhost_net *net,
         qemu_set_fd_handler(net->backend, NULL, NULL, NULL);
         qemu_set_fd_handler(net->backend, NULL, NULL, NULL);
         file.fd = net->backend;
         file.fd = net->backend;
         for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
         for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
+            if (!virtio_queue_enabled(dev, net->dev.vq_index +
+                                      file.index)) {
+                /* Queue might not be ready for start */
+                continue;
+            }
             r = vhost_net_set_backend(&net->dev, &file);
             r = vhost_net_set_backend(&net->dev, &file);
             if (r < 0) {
             if (r < 0) {
                 r = -errno;
                 r = -errno;
@@ -256,6 +261,11 @@ fail:
     file.fd = -1;
     file.fd = -1;
     if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
     if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
         while (file.index-- > 0) {
         while (file.index-- > 0) {
+            if (!virtio_queue_enabled(dev, net->dev.vq_index +
+                                      file.index)) {
+                /* Queue might not be ready for start */
+                continue;
+            }
             int r = vhost_net_set_backend(&net->dev, &file);
             int r = vhost_net_set_backend(&net->dev, &file);
             assert(r >= 0);
             assert(r >= 0);
         }
         }

+ 5 - 0
hw/virtio/virtio.c

@@ -2318,6 +2318,11 @@ hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
     return vdev->vq[n].vring.desc;
     return vdev->vq[n].vring.desc;
 }
 }
 
 
+bool virtio_queue_enabled(VirtIODevice *vdev, int n)
+{
+    return virtio_queue_get_desc_addr(vdev, n) != 0;
+}
+
 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
 {
 {
     return vdev->vq[n].vring.avail;
     return vdev->vq[n].vring.avail;

+ 1 - 0
include/hw/virtio/virtio.h

@@ -282,6 +282,7 @@ typedef struct VirtIORNGConf VirtIORNGConf;
                       VIRTIO_F_IOMMU_PLATFORM, false)
                       VIRTIO_F_IOMMU_PLATFORM, false)
 
 
 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n);
 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n);
+bool virtio_queue_enabled(VirtIODevice *vdev, int n);
 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n);
 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n);
 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n);
 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n);
 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n);
 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n);