|
@@ -64,6 +64,7 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container,
|
|
|
hwaddr iova, ram_addr_t size,
|
|
|
IOMMUTLBEntry *iotlb)
|
|
|
{
|
|
|
+ VFIOContainerBase *bcontainer = &container->bcontainer;
|
|
|
struct vfio_iommu_type1_dma_unmap *unmap;
|
|
|
struct vfio_bitmap *bitmap;
|
|
|
VFIOBitmap vbmap;
|
|
@@ -91,7 +92,7 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container,
|
|
|
bitmap->size = vbmap.size;
|
|
|
bitmap->data = (__u64 *)vbmap.bitmap;
|
|
|
|
|
|
- if (vbmap.size > container->max_dirty_bitmap_size) {
|
|
|
+ if (vbmap.size > bcontainer->max_dirty_bitmap_size) {
|
|
|
error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, vbmap.size);
|
|
|
ret = -E2BIG;
|
|
|
goto unmap_exit;
|
|
@@ -131,7 +132,7 @@ static int vfio_legacy_dma_unmap(VFIOContainerBase *bcontainer, hwaddr iova,
|
|
|
|
|
|
if (iotlb && vfio_devices_all_running_and_mig_active(bcontainer)) {
|
|
|
if (!vfio_devices_all_device_dirty_tracking(bcontainer) &&
|
|
|
- container->bcontainer.dirty_pages_supported) {
|
|
|
+ bcontainer->dirty_pages_supported) {
|
|
|
return vfio_dma_unmap_bitmap(container, iova, size, iotlb);
|
|
|
}
|
|
|
|
|
@@ -469,8 +470,8 @@ static void vfio_get_iommu_info_migration(VFIOContainer *container,
|
|
|
*/
|
|
|
if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) {
|
|
|
bcontainer->dirty_pages_supported = true;
|
|
|
- container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size;
|
|
|
- container->dirty_pgsizes = cap_mig->pgsize_bitmap;
|
|
|
+ bcontainer->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size;
|
|
|
+ bcontainer->dirty_pgsizes = cap_mig->pgsize_bitmap;
|
|
|
}
|
|
|
}
|
|
|
|