|
@@ -43,6 +43,8 @@
|
|
#include <fcntl.h>
|
|
#include <fcntl.h>
|
|
#include <sys/ioctl.h>
|
|
#include <sys/ioctl.h>
|
|
#include <linux/vhost.h>
|
|
#include <linux/vhost.h>
|
|
|
|
+#include <sys/vfs.h>
|
|
|
|
+#include <linux/magic.h>
|
|
|
|
|
|
#ifdef __NR_userfaultfd
|
|
#ifdef __NR_userfaultfd
|
|
#include <linux/userfaultfd.h>
|
|
#include <linux/userfaultfd.h>
|
|
@@ -195,30 +197,58 @@ vu_panic(VuDev *dev, const char *msg, ...)
|
|
*/
|
|
*/
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/* Search for a memory region that covers this guest physical address. */
|
|
|
|
+static VuDevRegion *
|
|
|
|
+vu_gpa_to_mem_region(VuDev *dev, uint64_t guest_addr)
|
|
|
|
+{
|
|
|
|
+ int low = 0;
|
|
|
|
+ int high = dev->nregions - 1;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Memory regions cannot overlap in guest physical address space. Each
|
|
|
|
+ * GPA belongs to exactly one memory region, so there can only be one
|
|
|
|
+ * match.
|
|
|
|
+ *
|
|
|
|
+ * We store our memory regions ordered by GPA and can simply perform a
|
|
|
|
+ * binary search.
|
|
|
|
+ */
|
|
|
|
+ while (low <= high) {
|
|
|
|
+ unsigned int mid = low + (high - low) / 2;
|
|
|
|
+ VuDevRegion *cur = &dev->regions[mid];
|
|
|
|
+
|
|
|
|
+ if (guest_addr >= cur->gpa && guest_addr < cur->gpa + cur->size) {
|
|
|
|
+ return cur;
|
|
|
|
+ }
|
|
|
|
+ if (guest_addr >= cur->gpa + cur->size) {
|
|
|
|
+ low = mid + 1;
|
|
|
|
+ }
|
|
|
|
+ if (guest_addr < cur->gpa) {
|
|
|
|
+ high = mid - 1;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ return NULL;
|
|
|
|
+}
|
|
|
|
+
|
|
/* Translate guest physical address to our virtual address. */
|
|
/* Translate guest physical address to our virtual address. */
|
|
void *
|
|
void *
|
|
vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr)
|
|
vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr)
|
|
{
|
|
{
|
|
- unsigned int i;
|
|
|
|
|
|
+ VuDevRegion *r;
|
|
|
|
|
|
if (*plen == 0) {
|
|
if (*plen == 0) {
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
|
|
- /* Find matching memory region. */
|
|
|
|
- for (i = 0; i < dev->nregions; i++) {
|
|
|
|
- VuDevRegion *r = &dev->regions[i];
|
|
|
|
-
|
|
|
|
- if ((guest_addr >= r->gpa) && (guest_addr < (r->gpa + r->size))) {
|
|
|
|
- if ((guest_addr + *plen) > (r->gpa + r->size)) {
|
|
|
|
- *plen = r->gpa + r->size - guest_addr;
|
|
|
|
- }
|
|
|
|
- return (void *)(uintptr_t)
|
|
|
|
- guest_addr - r->gpa + r->mmap_addr + r->mmap_offset;
|
|
|
|
- }
|
|
|
|
|
|
+ r = vu_gpa_to_mem_region(dev, guest_addr);
|
|
|
|
+ if (!r) {
|
|
|
|
+ return NULL;
|
|
}
|
|
}
|
|
|
|
|
|
- return NULL;
|
|
|
|
|
|
+ if ((guest_addr + *plen) > (r->gpa + r->size)) {
|
|
|
|
+ *plen = r->gpa + r->size - guest_addr;
|
|
|
|
+ }
|
|
|
|
+ return (void *)(uintptr_t)guest_addr - r->gpa + r->mmap_addr +
|
|
|
|
+ r->mmap_offset;
|
|
}
|
|
}
|
|
|
|
|
|
/* Translate qemu virtual address to our virtual address. */
|
|
/* Translate qemu virtual address to our virtual address. */
|
|
@@ -240,6 +270,221 @@ qva_to_va(VuDev *dev, uint64_t qemu_addr)
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void
|
|
|
|
+vu_remove_all_mem_regs(VuDev *dev)
|
|
|
|
+{
|
|
|
|
+ unsigned int i;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < dev->nregions; i++) {
|
|
|
|
+ VuDevRegion *r = &dev->regions[i];
|
|
|
|
+
|
|
|
|
+ munmap((void *)(uintptr_t)r->mmap_addr, r->size + r->mmap_offset);
|
|
|
|
+ }
|
|
|
|
+ dev->nregions = 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static bool
|
|
|
|
+map_ring(VuDev *dev, VuVirtq *vq)
|
|
|
|
+{
|
|
|
|
+ vq->vring.desc = qva_to_va(dev, vq->vra.desc_user_addr);
|
|
|
|
+ vq->vring.used = qva_to_va(dev, vq->vra.used_user_addr);
|
|
|
|
+ vq->vring.avail = qva_to_va(dev, vq->vra.avail_user_addr);
|
|
|
|
+
|
|
|
|
+ DPRINT("Setting virtq addresses:\n");
|
|
|
|
+ DPRINT(" vring_desc at %p\n", vq->vring.desc);
|
|
|
|
+ DPRINT(" vring_used at %p\n", vq->vring.used);
|
|
|
|
+ DPRINT(" vring_avail at %p\n", vq->vring.avail);
|
|
|
|
+
|
|
|
|
+ return !(vq->vring.desc && vq->vring.used && vq->vring.avail);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static bool
|
|
|
|
+vu_is_vq_usable(VuDev *dev, VuVirtq *vq)
|
|
|
|
+{
|
|
|
|
+ if (unlikely(dev->broken)) {
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (likely(vq->vring.avail)) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * In corner cases, we might temporarily remove a memory region that
|
|
|
|
+ * mapped a ring. When removing a memory region we make sure to
|
|
|
|
+ * unmap any rings that would be impacted. Let's try to remap if we
|
|
|
|
+ * already succeeded mapping this ring once.
|
|
|
|
+ */
|
|
|
|
+ if (!vq->vra.desc_user_addr || !vq->vra.used_user_addr ||
|
|
|
|
+ !vq->vra.avail_user_addr) {
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (map_ring(dev, vq)) {
|
|
|
|
+ vu_panic(dev, "remapping queue on access");
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ return true;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void
|
|
|
|
+unmap_rings(VuDev *dev, VuDevRegion *r)
|
|
|
|
+{
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < dev->max_queues; i++) {
|
|
|
|
+ VuVirtq *vq = &dev->vq[i];
|
|
|
|
+ const uintptr_t desc = (uintptr_t)vq->vring.desc;
|
|
|
|
+ const uintptr_t used = (uintptr_t)vq->vring.used;
|
|
|
|
+ const uintptr_t avail = (uintptr_t)vq->vring.avail;
|
|
|
|
+
|
|
|
|
+ if (desc < r->mmap_addr || desc >= r->mmap_addr + r->size) {
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+ if (used < r->mmap_addr || used >= r->mmap_addr + r->size) {
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+ if (avail < r->mmap_addr || avail >= r->mmap_addr + r->size) {
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ DPRINT("Unmapping rings of queue %d\n", i);
|
|
|
|
+ vq->vring.desc = NULL;
|
|
|
|
+ vq->vring.used = NULL;
|
|
|
|
+ vq->vring.avail = NULL;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static size_t
|
|
|
|
+get_fd_hugepagesize(int fd)
|
|
|
|
+{
|
|
|
|
+#if defined(__linux__)
|
|
|
|
+ struct statfs fs;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ do {
|
|
|
|
+ ret = fstatfs(fd, &fs);
|
|
|
|
+ } while (ret != 0 && errno == EINTR);
|
|
|
|
+
|
|
|
|
+ if (!ret && (unsigned int)fs.f_type == HUGETLBFS_MAGIC) {
|
|
|
|
+ return fs.f_bsize;
|
|
|
|
+ }
|
|
|
|
+#endif
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void
|
|
|
|
+_vu_add_mem_reg(VuDev *dev, VhostUserMemoryRegion *msg_region, int fd)
|
|
|
|
+{
|
|
|
|
+ const uint64_t start_gpa = msg_region->guest_phys_addr;
|
|
|
|
+ const uint64_t end_gpa = start_gpa + msg_region->memory_size;
|
|
|
|
+ int prot = PROT_READ | PROT_WRITE;
|
|
|
|
+ uint64_t mmap_offset, fd_offset;
|
|
|
|
+ size_t hugepagesize;
|
|
|
|
+ VuDevRegion *r;
|
|
|
|
+ void *mmap_addr;
|
|
|
|
+ int low = 0;
|
|
|
|
+ int high = dev->nregions - 1;
|
|
|
|
+ unsigned int idx;
|
|
|
|
+
|
|
|
|
+ DPRINT("Adding region %d\n", dev->nregions);
|
|
|
|
+ DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
|
|
|
|
+ msg_region->guest_phys_addr);
|
|
|
|
+ DPRINT(" memory_size: 0x%016"PRIx64"\n",
|
|
|
|
+ msg_region->memory_size);
|
|
|
|
+ DPRINT(" userspace_addr: 0x%016"PRIx64"\n",
|
|
|
|
+ msg_region->userspace_addr);
|
|
|
|
+ DPRINT(" old mmap_offset: 0x%016"PRIx64"\n",
|
|
|
|
+ msg_region->mmap_offset);
|
|
|
|
+
|
|
|
|
+ if (dev->postcopy_listening) {
|
|
|
|
+ /*
|
|
|
|
+ * In postcopy we're using PROT_NONE here to catch anyone
|
|
|
|
+ * accessing it before we userfault
|
|
|
|
+ */
|
|
|
|
+ prot = PROT_NONE;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * We will add memory regions into the array sorted by GPA. Perform a
|
|
|
|
+ * binary search to locate the insertion point: it will be at the low
|
|
|
|
+ * index.
|
|
|
|
+ */
|
|
|
|
+ while (low <= high) {
|
|
|
|
+ unsigned int mid = low + (high - low) / 2;
|
|
|
|
+ VuDevRegion *cur = &dev->regions[mid];
|
|
|
|
+
|
|
|
|
+ /* Overlap of GPA addresses. */
|
|
|
|
+ if (start_gpa < cur->gpa + cur->size && cur->gpa < end_gpa) {
|
|
|
|
+ vu_panic(dev, "regions with overlapping guest physical addresses");
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ if (start_gpa >= cur->gpa + cur->size) {
|
|
|
|
+ low = mid + 1;
|
|
|
|
+ }
|
|
|
|
+ if (start_gpa < cur->gpa) {
|
|
|
|
+ high = mid - 1;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ idx = low;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Convert most of msg_region->mmap_offset to fd_offset. In almost all
|
|
|
|
+ * cases, this will leave us with mmap_offset == 0, mmap()'ing only
|
|
|
|
+ * what we really need. Only if a memory region would partially cover
|
|
|
|
+ * hugetlb pages, we'd get mmap_offset != 0, which usually doesn't happen
|
|
|
|
+ * anymore (i.e., modern QEMU).
|
|
|
|
+ *
|
|
|
|
+ * Note that mmap() with hugetlb would fail if the offset into the file
|
|
|
|
+ * is not aligned to the huge page size.
|
|
|
|
+ */
|
|
|
|
+ hugepagesize = get_fd_hugepagesize(fd);
|
|
|
|
+ if (hugepagesize) {
|
|
|
|
+ fd_offset = ALIGN_DOWN(msg_region->mmap_offset, hugepagesize);
|
|
|
|
+ mmap_offset = msg_region->mmap_offset - fd_offset;
|
|
|
|
+ } else {
|
|
|
|
+ fd_offset = msg_region->mmap_offset;
|
|
|
|
+ mmap_offset = 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ DPRINT(" fd_offset: 0x%016"PRIx64"\n",
|
|
|
|
+ fd_offset);
|
|
|
|
+ DPRINT(" new mmap_offset: 0x%016"PRIx64"\n",
|
|
|
|
+ mmap_offset);
|
|
|
|
+
|
|
|
|
+ mmap_addr = mmap(0, msg_region->memory_size + mmap_offset,
|
|
|
|
+ prot, MAP_SHARED | MAP_NORESERVE, fd, fd_offset);
|
|
|
|
+ if (mmap_addr == MAP_FAILED) {
|
|
|
|
+ vu_panic(dev, "region mmap error: %s", strerror(errno));
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ DPRINT(" mmap_addr: 0x%016"PRIx64"\n",
|
|
|
|
+ (uint64_t)(uintptr_t)mmap_addr);
|
|
|
|
+
|
|
|
|
+#if defined(__linux__)
|
|
|
|
+ /* Don't include all guest memory in a coredump. */
|
|
|
|
+ madvise(mmap_addr, msg_region->memory_size + mmap_offset,
|
|
|
|
+ MADV_DONTDUMP);
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+ /* Shift all affected entries by 1 to open a hole at idx. */
|
|
|
|
+ r = &dev->regions[idx];
|
|
|
|
+ memmove(r + 1, r, sizeof(VuDevRegion) * (dev->nregions - idx));
|
|
|
|
+ r->gpa = msg_region->guest_phys_addr;
|
|
|
|
+ r->size = msg_region->memory_size;
|
|
|
|
+ r->qva = msg_region->userspace_addr;
|
|
|
|
+ r->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
|
|
|
|
+ r->mmap_offset = mmap_offset;
|
|
|
|
+ dev->nregions++;
|
|
|
|
+
|
|
|
|
+ if (dev->postcopy_listening) {
|
|
|
|
+ /*
|
|
|
|
+ * Return the address to QEMU so that it can translate the ufd
|
|
|
|
+ * fault addresses back.
|
|
|
|
+ */
|
|
|
|
+ msg_region->userspace_addr = r->mmap_addr + r->mmap_offset;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
static void
|
|
static void
|
|
vmsg_close_fds(VhostUserMsg *vmsg)
|
|
vmsg_close_fds(VhostUserMsg *vmsg)
|
|
{
|
|
{
|
|
@@ -612,21 +857,6 @@ vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg)
|
|
return false;
|
|
return false;
|
|
}
|
|
}
|
|
|
|
|
|
-static bool
|
|
|
|
-map_ring(VuDev *dev, VuVirtq *vq)
|
|
|
|
-{
|
|
|
|
- vq->vring.desc = qva_to_va(dev, vq->vra.desc_user_addr);
|
|
|
|
- vq->vring.used = qva_to_va(dev, vq->vra.used_user_addr);
|
|
|
|
- vq->vring.avail = qva_to_va(dev, vq->vra.avail_user_addr);
|
|
|
|
-
|
|
|
|
- DPRINT("Setting virtq addresses:\n");
|
|
|
|
- DPRINT(" vring_desc at %p\n", vq->vring.desc);
|
|
|
|
- DPRINT(" vring_used at %p\n", vq->vring.used);
|
|
|
|
- DPRINT(" vring_avail at %p\n", vq->vring.avail);
|
|
|
|
-
|
|
|
|
- return !(vq->vring.desc && vq->vring.used && vq->vring.avail);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static bool
|
|
static bool
|
|
generate_faults(VuDev *dev) {
|
|
generate_faults(VuDev *dev) {
|
|
unsigned int i;
|
|
unsigned int i;
|
|
@@ -710,11 +940,7 @@ generate_faults(VuDev *dev) {
|
|
|
|
|
|
static bool
|
|
static bool
|
|
vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
|
|
vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
|
|
- int i;
|
|
|
|
- bool track_ramblocks = dev->postcopy_listening;
|
|
|
|
VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m;
|
|
VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m;
|
|
- VuDevRegion *dev_region = &dev->regions[dev->nregions];
|
|
|
|
- void *mmap_addr;
|
|
|
|
|
|
|
|
if (vmsg->fd_num != 1) {
|
|
if (vmsg->fd_num != 1) {
|
|
vmsg_close_fds(vmsg);
|
|
vmsg_close_fds(vmsg);
|
|
@@ -744,84 +970,24 @@ vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
|
|
* we know all the postcopy client bases have been received, and we
|
|
* we know all the postcopy client bases have been received, and we
|
|
* should start generating faults.
|
|
* should start generating faults.
|
|
*/
|
|
*/
|
|
- if (track_ramblocks &&
|
|
|
|
|
|
+ if (dev->postcopy_listening &&
|
|
vmsg->size == sizeof(vmsg->payload.u64) &&
|
|
vmsg->size == sizeof(vmsg->payload.u64) &&
|
|
vmsg->payload.u64 == 0) {
|
|
vmsg->payload.u64 == 0) {
|
|
(void)generate_faults(dev);
|
|
(void)generate_faults(dev);
|
|
return false;
|
|
return false;
|
|
}
|
|
}
|
|
|
|
|
|
- DPRINT("Adding region: %u\n", dev->nregions);
|
|
|
|
- DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
|
|
|
|
- msg_region->guest_phys_addr);
|
|
|
|
- DPRINT(" memory_size: 0x%016"PRIx64"\n",
|
|
|
|
- msg_region->memory_size);
|
|
|
|
- DPRINT(" userspace_addr 0x%016"PRIx64"\n",
|
|
|
|
- msg_region->userspace_addr);
|
|
|
|
- DPRINT(" mmap_offset 0x%016"PRIx64"\n",
|
|
|
|
- msg_region->mmap_offset);
|
|
|
|
-
|
|
|
|
- dev_region->gpa = msg_region->guest_phys_addr;
|
|
|
|
- dev_region->size = msg_region->memory_size;
|
|
|
|
- dev_region->qva = msg_region->userspace_addr;
|
|
|
|
- dev_region->mmap_offset = msg_region->mmap_offset;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * We don't use offset argument of mmap() since the
|
|
|
|
- * mapped address has to be page aligned, and we use huge
|
|
|
|
- * pages.
|
|
|
|
- */
|
|
|
|
- if (track_ramblocks) {
|
|
|
|
- /*
|
|
|
|
- * In postcopy we're using PROT_NONE here to catch anyone
|
|
|
|
- * accessing it before we userfault.
|
|
|
|
- */
|
|
|
|
- mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
|
|
|
|
- PROT_NONE, MAP_SHARED | MAP_NORESERVE,
|
|
|
|
- vmsg->fds[0], 0);
|
|
|
|
- } else {
|
|
|
|
- mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
|
|
|
|
- PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE,
|
|
|
|
- vmsg->fds[0], 0);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (mmap_addr == MAP_FAILED) {
|
|
|
|
- vu_panic(dev, "region mmap error: %s", strerror(errno));
|
|
|
|
- } else {
|
|
|
|
- dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
|
|
|
|
- DPRINT(" mmap_addr: 0x%016"PRIx64"\n",
|
|
|
|
- dev_region->mmap_addr);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
|
|
+ _vu_add_mem_reg(dev, msg_region, vmsg->fds[0]);
|
|
close(vmsg->fds[0]);
|
|
close(vmsg->fds[0]);
|
|
|
|
|
|
- if (track_ramblocks) {
|
|
|
|
- /*
|
|
|
|
- * Return the address to QEMU so that it can translate the ufd
|
|
|
|
- * fault addresses back.
|
|
|
|
- */
|
|
|
|
- msg_region->userspace_addr = (uintptr_t)(mmap_addr +
|
|
|
|
- dev_region->mmap_offset);
|
|
|
|
-
|
|
|
|
|
|
+ if (dev->postcopy_listening) {
|
|
/* Send the message back to qemu with the addresses filled in. */
|
|
/* Send the message back to qemu with the addresses filled in. */
|
|
vmsg->fd_num = 0;
|
|
vmsg->fd_num = 0;
|
|
DPRINT("Successfully added new region in postcopy\n");
|
|
DPRINT("Successfully added new region in postcopy\n");
|
|
- dev->nregions++;
|
|
|
|
return true;
|
|
return true;
|
|
- } else {
|
|
|
|
- for (i = 0; i < dev->max_queues; i++) {
|
|
|
|
- if (dev->vq[i].vring.desc) {
|
|
|
|
- if (map_ring(dev, &dev->vq[i])) {
|
|
|
|
- vu_panic(dev, "remapping queue %d for new memory region",
|
|
|
|
- i);
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- DPRINT("Successfully added new region\n");
|
|
|
|
- dev->nregions++;
|
|
|
|
- return false;
|
|
|
|
}
|
|
}
|
|
|
|
+ DPRINT("Successfully added new region\n");
|
|
|
|
+ return false;
|
|
}
|
|
}
|
|
|
|
|
|
static inline bool reg_equal(VuDevRegion *vudev_reg,
|
|
static inline bool reg_equal(VuDevRegion *vudev_reg,
|
|
@@ -839,8 +1005,8 @@ static inline bool reg_equal(VuDevRegion *vudev_reg,
|
|
static bool
|
|
static bool
|
|
vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
|
|
vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
|
|
VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m;
|
|
VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m;
|
|
- unsigned int i;
|
|
|
|
- bool found = false;
|
|
|
|
|
|
+ unsigned int idx;
|
|
|
|
+ VuDevRegion *r;
|
|
|
|
|
|
if (vmsg->fd_num > 1) {
|
|
if (vmsg->fd_num > 1) {
|
|
vmsg_close_fds(vmsg);
|
|
vmsg_close_fds(vmsg);
|
|
@@ -867,35 +1033,31 @@ vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
|
|
DPRINT(" mmap_offset 0x%016"PRIx64"\n",
|
|
DPRINT(" mmap_offset 0x%016"PRIx64"\n",
|
|
msg_region->mmap_offset);
|
|
msg_region->mmap_offset);
|
|
|
|
|
|
- for (i = 0; i < dev->nregions; i++) {
|
|
|
|
- if (reg_equal(&dev->regions[i], msg_region)) {
|
|
|
|
- VuDevRegion *r = &dev->regions[i];
|
|
|
|
- void *ma = (void *) (uintptr_t) r->mmap_addr;
|
|
|
|
-
|
|
|
|
- if (ma) {
|
|
|
|
- munmap(ma, r->size + r->mmap_offset);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Shift all affected entries by 1 to close the hole at index i and
|
|
|
|
- * zero out the last entry.
|
|
|
|
- */
|
|
|
|
- memmove(dev->regions + i, dev->regions + i + 1,
|
|
|
|
- sizeof(VuDevRegion) * (dev->nregions - i - 1));
|
|
|
|
- memset(dev->regions + dev->nregions - 1, 0, sizeof(VuDevRegion));
|
|
|
|
- DPRINT("Successfully removed a region\n");
|
|
|
|
- dev->nregions--;
|
|
|
|
- i--;
|
|
|
|
|
|
+ r = vu_gpa_to_mem_region(dev, msg_region->guest_phys_addr);
|
|
|
|
+ if (!r || !reg_equal(r, msg_region)) {
|
|
|
|
+ vmsg_close_fds(vmsg);
|
|
|
|
+ vu_panic(dev, "Specified region not found\n");
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
|
|
- found = true;
|
|
|
|
|
|
+ /*
|
|
|
|
+ * There might be valid cases where we temporarily remove memory regions
|
|
|
|
+ * to readd them again, or remove memory regions and don't use the rings
|
|
|
|
+ * anymore before we set the ring addresses and restart the device.
|
|
|
|
+ *
|
|
|
|
+ * Unmap all affected rings, remapping them on demand later. This should
|
|
|
|
+ * be a corner case.
|
|
|
|
+ */
|
|
|
|
+ unmap_rings(dev, r);
|
|
|
|
|
|
- /* Continue the search for eventual duplicates. */
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
|
|
+ munmap((void *)(uintptr_t)r->mmap_addr, r->size + r->mmap_offset);
|
|
|
|
|
|
- if (!found) {
|
|
|
|
- vu_panic(dev, "Specified region not found\n");
|
|
|
|
- }
|
|
|
|
|
|
+ idx = r - dev->regions;
|
|
|
|
+ assert(idx < dev->nregions);
|
|
|
|
+ /* Shift all affected entries by 1 to close the hole. */
|
|
|
|
+ memmove(r, r + 1, sizeof(VuDevRegion) * (dev->nregions - idx - 1));
|
|
|
|
+ DPRINT("Successfully removed a region\n");
|
|
|
|
+ dev->nregions--;
|
|
|
|
|
|
vmsg_close_fds(vmsg);
|
|
vmsg_close_fds(vmsg);
|
|
|
|
|
|
@@ -920,140 +1082,42 @@ vu_get_shared_object(VuDev *dev, VhostUserMsg *vmsg)
|
|
return true;
|
|
return true;
|
|
}
|
|
}
|
|
|
|
|
|
-static bool
|
|
|
|
-vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
|
|
|
|
-{
|
|
|
|
- unsigned int i;
|
|
|
|
- VhostUserMemory m = vmsg->payload.memory, *memory = &m;
|
|
|
|
- dev->nregions = memory->nregions;
|
|
|
|
-
|
|
|
|
- DPRINT("Nregions: %u\n", memory->nregions);
|
|
|
|
- for (i = 0; i < dev->nregions; i++) {
|
|
|
|
- void *mmap_addr;
|
|
|
|
- VhostUserMemoryRegion *msg_region = &memory->regions[i];
|
|
|
|
- VuDevRegion *dev_region = &dev->regions[i];
|
|
|
|
-
|
|
|
|
- DPRINT("Region %d\n", i);
|
|
|
|
- DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
|
|
|
|
- msg_region->guest_phys_addr);
|
|
|
|
- DPRINT(" memory_size: 0x%016"PRIx64"\n",
|
|
|
|
- msg_region->memory_size);
|
|
|
|
- DPRINT(" userspace_addr 0x%016"PRIx64"\n",
|
|
|
|
- msg_region->userspace_addr);
|
|
|
|
- DPRINT(" mmap_offset 0x%016"PRIx64"\n",
|
|
|
|
- msg_region->mmap_offset);
|
|
|
|
-
|
|
|
|
- dev_region->gpa = msg_region->guest_phys_addr;
|
|
|
|
- dev_region->size = msg_region->memory_size;
|
|
|
|
- dev_region->qva = msg_region->userspace_addr;
|
|
|
|
- dev_region->mmap_offset = msg_region->mmap_offset;
|
|
|
|
-
|
|
|
|
- /* We don't use offset argument of mmap() since the
|
|
|
|
- * mapped address has to be page aligned, and we use huge
|
|
|
|
- * pages.
|
|
|
|
- * In postcopy we're using PROT_NONE here to catch anyone
|
|
|
|
- * accessing it before we userfault
|
|
|
|
- */
|
|
|
|
- mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
|
|
|
|
- PROT_NONE, MAP_SHARED | MAP_NORESERVE,
|
|
|
|
- vmsg->fds[i], 0);
|
|
|
|
-
|
|
|
|
- if (mmap_addr == MAP_FAILED) {
|
|
|
|
- vu_panic(dev, "region mmap error: %s", strerror(errno));
|
|
|
|
- } else {
|
|
|
|
- dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
|
|
|
|
- DPRINT(" mmap_addr: 0x%016"PRIx64"\n",
|
|
|
|
- dev_region->mmap_addr);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* Return the address to QEMU so that it can translate the ufd
|
|
|
|
- * fault addresses back.
|
|
|
|
- */
|
|
|
|
- msg_region->userspace_addr = (uintptr_t)(mmap_addr +
|
|
|
|
- dev_region->mmap_offset);
|
|
|
|
- close(vmsg->fds[i]);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* Send the message back to qemu with the addresses filled in */
|
|
|
|
- vmsg->fd_num = 0;
|
|
|
|
- if (!vu_send_reply(dev, dev->sock, vmsg)) {
|
|
|
|
- vu_panic(dev, "failed to respond to set-mem-table for postcopy");
|
|
|
|
- return false;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* Wait for QEMU to confirm that it's registered the handler for the
|
|
|
|
- * faults.
|
|
|
|
- */
|
|
|
|
- if (!dev->read_msg(dev, dev->sock, vmsg) ||
|
|
|
|
- vmsg->size != sizeof(vmsg->payload.u64) ||
|
|
|
|
- vmsg->payload.u64 != 0) {
|
|
|
|
- vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table");
|
|
|
|
- return false;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* OK, now we can go and register the memory and generate faults */
|
|
|
|
- (void)generate_faults(dev);
|
|
|
|
-
|
|
|
|
- return false;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static bool
|
|
static bool
|
|
vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
|
|
vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
|
|
{
|
|
{
|
|
- unsigned int i;
|
|
|
|
VhostUserMemory m = vmsg->payload.memory, *memory = &m;
|
|
VhostUserMemory m = vmsg->payload.memory, *memory = &m;
|
|
|
|
+ unsigned int i;
|
|
|
|
|
|
- for (i = 0; i < dev->nregions; i++) {
|
|
|
|
- VuDevRegion *r = &dev->regions[i];
|
|
|
|
- void *ma = (void *) (uintptr_t) r->mmap_addr;
|
|
|
|
|
|
+ vu_remove_all_mem_regs(dev);
|
|
|
|
|
|
- if (ma) {
|
|
|
|
- munmap(ma, r->size + r->mmap_offset);
|
|
|
|
- }
|
|
|
|
|
|
+ DPRINT("Nregions: %u\n", memory->nregions);
|
|
|
|
+ for (i = 0; i < memory->nregions; i++) {
|
|
|
|
+ _vu_add_mem_reg(dev, &memory->regions[i], vmsg->fds[i]);
|
|
|
|
+ close(vmsg->fds[i]);
|
|
}
|
|
}
|
|
- dev->nregions = memory->nregions;
|
|
|
|
|
|
|
|
if (dev->postcopy_listening) {
|
|
if (dev->postcopy_listening) {
|
|
- return vu_set_mem_table_exec_postcopy(dev, vmsg);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- DPRINT("Nregions: %u\n", memory->nregions);
|
|
|
|
- for (i = 0; i < dev->nregions; i++) {
|
|
|
|
- void *mmap_addr;
|
|
|
|
- VhostUserMemoryRegion *msg_region = &memory->regions[i];
|
|
|
|
- VuDevRegion *dev_region = &dev->regions[i];
|
|
|
|
|
|
+ /* Send the message back to qemu with the addresses filled in */
|
|
|
|
+ vmsg->fd_num = 0;
|
|
|
|
+ if (!vu_send_reply(dev, dev->sock, vmsg)) {
|
|
|
|
+ vu_panic(dev, "failed to respond to set-mem-table for postcopy");
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
|
|
- DPRINT("Region %d\n", i);
|
|
|
|
- DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
|
|
|
|
- msg_region->guest_phys_addr);
|
|
|
|
- DPRINT(" memory_size: 0x%016"PRIx64"\n",
|
|
|
|
- msg_region->memory_size);
|
|
|
|
- DPRINT(" userspace_addr 0x%016"PRIx64"\n",
|
|
|
|
- msg_region->userspace_addr);
|
|
|
|
- DPRINT(" mmap_offset 0x%016"PRIx64"\n",
|
|
|
|
- msg_region->mmap_offset);
|
|
|
|
-
|
|
|
|
- dev_region->gpa = msg_region->guest_phys_addr;
|
|
|
|
- dev_region->size = msg_region->memory_size;
|
|
|
|
- dev_region->qva = msg_region->userspace_addr;
|
|
|
|
- dev_region->mmap_offset = msg_region->mmap_offset;
|
|
|
|
-
|
|
|
|
- /* We don't use offset argument of mmap() since the
|
|
|
|
- * mapped address has to be page aligned, and we use huge
|
|
|
|
- * pages. */
|
|
|
|
- mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
|
|
|
|
- PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE,
|
|
|
|
- vmsg->fds[i], 0);
|
|
|
|
-
|
|
|
|
- if (mmap_addr == MAP_FAILED) {
|
|
|
|
- vu_panic(dev, "region mmap error: %s", strerror(errno));
|
|
|
|
- } else {
|
|
|
|
- dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
|
|
|
|
- DPRINT(" mmap_addr: 0x%016"PRIx64"\n",
|
|
|
|
- dev_region->mmap_addr);
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Wait for QEMU to confirm that it's registered the handler for the
|
|
|
|
+ * faults.
|
|
|
|
+ */
|
|
|
|
+ if (!dev->read_msg(dev, dev->sock, vmsg) ||
|
|
|
|
+ vmsg->size != sizeof(vmsg->payload.u64) ||
|
|
|
|
+ vmsg->payload.u64 != 0) {
|
|
|
|
+ vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table");
|
|
|
|
+ return false;
|
|
}
|
|
}
|
|
|
|
|
|
- close(vmsg->fds[i]);
|
|
|
|
|
|
+ /* OK, now we can go and register the memory and generate faults */
|
|
|
|
+ (void)generate_faults(dev);
|
|
|
|
+ return false;
|
|
}
|
|
}
|
|
|
|
|
|
for (i = 0; i < dev->max_queues; i++) {
|
|
for (i = 0; i < dev->max_queues; i++) {
|
|
@@ -2112,14 +2176,7 @@ vu_deinit(VuDev *dev)
|
|
{
|
|
{
|
|
unsigned int i;
|
|
unsigned int i;
|
|
|
|
|
|
- for (i = 0; i < dev->nregions; i++) {
|
|
|
|
- VuDevRegion *r = &dev->regions[i];
|
|
|
|
- void *m = (void *) (uintptr_t) r->mmap_addr;
|
|
|
|
- if (m != MAP_FAILED) {
|
|
|
|
- munmap(m, r->size + r->mmap_offset);
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- dev->nregions = 0;
|
|
|
|
|
|
+ vu_remove_all_mem_regs(dev);
|
|
|
|
|
|
for (i = 0; i < dev->max_queues; i++) {
|
|
for (i = 0; i < dev->max_queues; i++) {
|
|
VuVirtq *vq = &dev->vq[i];
|
|
VuVirtq *vq = &dev->vq[i];
|
|
@@ -2171,6 +2228,8 @@ vu_deinit(VuDev *dev)
|
|
|
|
|
|
free(dev->vq);
|
|
free(dev->vq);
|
|
dev->vq = NULL;
|
|
dev->vq = NULL;
|
|
|
|
+ free(dev->regions);
|
|
|
|
+ dev->regions = NULL;
|
|
}
|
|
}
|
|
|
|
|
|
bool
|
|
bool
|
|
@@ -2205,9 +2264,17 @@ vu_init(VuDev *dev,
|
|
dev->backend_fd = -1;
|
|
dev->backend_fd = -1;
|
|
dev->max_queues = max_queues;
|
|
dev->max_queues = max_queues;
|
|
|
|
|
|
|
|
+ dev->regions = malloc(VHOST_USER_MAX_RAM_SLOTS * sizeof(dev->regions[0]));
|
|
|
|
+ if (!dev->regions) {
|
|
|
|
+ DPRINT("%s: failed to malloc mem regions\n", __func__);
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+
|
|
dev->vq = malloc(max_queues * sizeof(dev->vq[0]));
|
|
dev->vq = malloc(max_queues * sizeof(dev->vq[0]));
|
|
if (!dev->vq) {
|
|
if (!dev->vq) {
|
|
DPRINT("%s: failed to malloc virtqueues\n", __func__);
|
|
DPRINT("%s: failed to malloc virtqueues\n", __func__);
|
|
|
|
+ free(dev->regions);
|
|
|
|
+ dev->regions = NULL;
|
|
return false;
|
|
return false;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2374,8 +2441,7 @@ vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes,
|
|
idx = vq->last_avail_idx;
|
|
idx = vq->last_avail_idx;
|
|
|
|
|
|
total_bufs = in_total = out_total = 0;
|
|
total_bufs = in_total = out_total = 0;
|
|
- if (unlikely(dev->broken) ||
|
|
|
|
- unlikely(!vq->vring.avail)) {
|
|
|
|
|
|
+ if (!vu_is_vq_usable(dev, vq)) {
|
|
goto done;
|
|
goto done;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2490,8 +2556,7 @@ vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes,
|
|
bool
|
|
bool
|
|
vu_queue_empty(VuDev *dev, VuVirtq *vq)
|
|
vu_queue_empty(VuDev *dev, VuVirtq *vq)
|
|
{
|
|
{
|
|
- if (unlikely(dev->broken) ||
|
|
|
|
- unlikely(!vq->vring.avail)) {
|
|
|
|
|
|
+ if (!vu_is_vq_usable(dev, vq)) {
|
|
return true;
|
|
return true;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2530,8 +2595,7 @@ vring_notify(VuDev *dev, VuVirtq *vq)
|
|
|
|
|
|
static void _vu_queue_notify(VuDev *dev, VuVirtq *vq, bool sync)
|
|
static void _vu_queue_notify(VuDev *dev, VuVirtq *vq, bool sync)
|
|
{
|
|
{
|
|
- if (unlikely(dev->broken) ||
|
|
|
|
- unlikely(!vq->vring.avail)) {
|
|
|
|
|
|
+ if (!vu_is_vq_usable(dev, vq)) {
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2856,8 +2920,7 @@ vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz)
|
|
unsigned int head;
|
|
unsigned int head;
|
|
VuVirtqElement *elem;
|
|
VuVirtqElement *elem;
|
|
|
|
|
|
- if (unlikely(dev->broken) ||
|
|
|
|
- unlikely(!vq->vring.avail)) {
|
|
|
|
|
|
+ if (!vu_is_vq_usable(dev, vq)) {
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3014,8 +3077,7 @@ vu_queue_fill(VuDev *dev, VuVirtq *vq,
|
|
{
|
|
{
|
|
struct vring_used_elem uelem;
|
|
struct vring_used_elem uelem;
|
|
|
|
|
|
- if (unlikely(dev->broken) ||
|
|
|
|
- unlikely(!vq->vring.avail)) {
|
|
|
|
|
|
+ if (!vu_is_vq_usable(dev, vq)) {
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3044,8 +3106,7 @@ vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int count)
|
|
{
|
|
{
|
|
uint16_t old, new;
|
|
uint16_t old, new;
|
|
|
|
|
|
- if (unlikely(dev->broken) ||
|
|
|
|
- unlikely(!vq->vring.avail)) {
|
|
|
|
|
|
+ if (!vu_is_vq_usable(dev, vq)) {
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|