qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v4 10/13] vfio: Add function to get dirty page l


From: Yan Zhao
Subject: Re: [Qemu-devel] [PATCH v4 10/13] vfio: Add function to get dirty page list
Date: Tue, 25 Jun 2019 20:40:39 -0400
User-agent: Mutt/1.9.4 (2018-02-28)

On Thu, Jun 20, 2019 at 10:37:38PM +0800, Kirti Wankhede wrote:
> Dirty page tracking (.log_sync) is part of RAM copying state, where
> vendor driver provides the bitmap of pages which are dirtied by vendor
> driver through migration region and as part of RAM copy, those pages
> gets copied to file stream.
> 
> To get dirty page bitmap:
> - write start address, page_size and pfn count.
> - read count of pfns copied.
>     - Vendor driver should return 0 if driver doesn't have any page to
>       report dirty in given range.
>     - Vendor driver should return -1 to mark all pages dirty for given range.
> - read data_offset, where vendor driver has written bitmap.
> - read bitmap from the region or mmaped part of the region. This copy is
>   iterated till page bitmap for all requested pfns are copied.
> 
> Signed-off-by: Kirti Wankhede <address@hidden>
> Reviewed-by: Neo Jia <address@hidden>
> ---
>  hw/vfio/migration.c           | 119 
> ++++++++++++++++++++++++++++++++++++++++++
>  include/hw/vfio/vfio-common.h |   2 +
>  2 files changed, 121 insertions(+)
> 
> diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c
> index e4895f91761d..68775b5dec11 100644
> --- a/hw/vfio/migration.c
> +++ b/hw/vfio/migration.c
> @@ -228,6 +228,125 @@ static int vfio_load_device_config_state(QEMUFile *f, 
> void *opaque)
>      return qemu_file_get_error(f);
>  }
>  
> +void vfio_get_dirty_page_list(VFIODevice *vbasedev,
> +                              uint64_t start_pfn,
> +                              uint64_t pfn_count,
> +                              uint64_t page_size)
> +{
> +    VFIOMigration *migration = vbasedev->migration;
> +    VFIORegion *region = &migration->region.buffer;
> +    uint64_t count = 0;
> +    int64_t copied_pfns = 0;
> +    int ret;
> +
> +    qemu_mutex_lock(&migration->lock);
> +    ret = pwrite(vbasedev->fd, &start_pfn, sizeof(start_pfn),
> +                 region->fd_offset + offsetof(struct 
> vfio_device_migration_info,
> +                                              start_pfn));
> +    if (ret < 0) {
> +        error_report("Failed to set dirty pages start address %d %s",
> +                ret, strerror(errno));
> +        goto dpl_unlock;
> +    }
> +
> +    ret = pwrite(vbasedev->fd, &page_size, sizeof(page_size),
> +                 region->fd_offset + offsetof(struct 
> vfio_device_migration_info,
> +                                              page_size));
> +    if (ret < 0) {
> +        error_report("Failed to set dirty page size %d %s",
> +                ret, strerror(errno));
> +        goto dpl_unlock;
> +    }
> +
> +    ret = pwrite(vbasedev->fd, &pfn_count, sizeof(pfn_count),
> +                 region->fd_offset + offsetof(struct 
> vfio_device_migration_info,
> +                                              total_pfns));
> +    if (ret < 0) {
> +        error_report("Failed to set dirty page total pfns %d %s",
> +                ret, strerror(errno));
> +        goto dpl_unlock;
> +    }
> +
> +    do {
> +        uint64_t bitmap_size, data_offset = 0;
> +        void *buf = NULL;
> +        bool buffer_mmaped = false;
> +
> +        /* Read copied dirty pfns */
> +        ret = pread(vbasedev->fd, &copied_pfns, sizeof(copied_pfns),
> +                region->fd_offset + offsetof(struct 
> vfio_device_migration_info,
> +                                             copied_pfns));
> +        if (ret < 0) {
> +            error_report("Failed to get dirty pages bitmap count %d %s",
> +                    ret, strerror(errno));
> +            goto dpl_unlock;
> +        }
> +
> +        if (copied_pfns == 0) {
> +            /*
> +             * copied_pfns could be 0 if driver doesn't have any page to
> +             * report dirty in given range
> +             */
> +            break;
this copied_pfn is the dirty page count in which range?
if it is got each iteration, why break here rather than continue ?
consider there's a big region with pfn_count, and it is now breaked into
several smaller subregions, and copied_pfns is 0 in the first subregion,
it doesn't mean copied_pfns are all 0 in the remaining subregions.

> +        } else if (copied_pfns == -1) {
> +            /* Mark all pages dirty for this range */
> +            cpu_physical_memory_set_dirty_range(start_pfn * page_size,
> +                                                pfn_count * page_size,
> +                                                DIRTY_MEMORY_MIGRATION);
> +            break;
> +        }
> +
> +        bitmap_size = (BITS_TO_LONGS(copied_pfns) + 1) * sizeof(unsigned 
> long);
> +
> +        ret = pread(vbasedev->fd, &data_offset, sizeof(data_offset),
> +                region->fd_offset + offsetof(struct 
> vfio_device_migration_info,
> +                                             data_offset));
> +        if (ret != sizeof(data_offset)) {
> +            error_report("Failed to get migration buffer data offset %d",
> +                         ret);
> +            goto dpl_unlock;
> +        }
> +
> +        if (region->mmaps) {
> +            int i;
> +            for (i = 0; i < region->nr_mmaps; i++) {
> +                if ((region->mmaps[i].offset >= data_offset) &&
> +                    (data_offset < region->mmaps[i].offset +
> +                                   region->mmaps[i].size)) {
> +                    buf = region->mmaps[i].mmap + (data_offset -
> +                                                   region->mmaps[i].offset);
> +                    buffer_mmaped = true;
> +                    break;
> +                }
> +            }
> +        }
> +
> +        if (!buffer_mmaped) {
> +            buf = g_malloc0(bitmap_size);
> +
> +            ret = pread(vbasedev->fd, buf, bitmap_size,
> +                        region->fd_offset + data_offset);
> +            if (ret != bitmap_size) {
> +                error_report("Failed to get dirty pages bitmap %d", ret);
> +                g_free(buf);
> +                goto dpl_unlock;
> +            }
> +        }
> +
> +        cpu_physical_memory_set_dirty_lebitmap((unsigned long *)buf,
> +                                               (start_pfn + count) * 
> page_size,
> +                                                copied_pfns);
> +        count +=  copied_pfns;
> +
here also. why it is count += copied_pfns.

> +        if (!buffer_mmaped) {
> +            g_free(buf);
> +        }
> +    } while (count < pfn_count);
> +
> +dpl_unlock:
> +    qemu_mutex_unlock(&migration->lock);
> +}
> +
>  /* ---------------------------------------------------------------------- */
>  
>  static int vfio_save_setup(QEMUFile *f, void *opaque)
> diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
> index 1d26e6be8d48..423d6dbccace 100644
> --- a/include/hw/vfio/vfio-common.h
> +++ b/include/hw/vfio/vfio-common.h
> @@ -224,5 +224,7 @@ int vfio_spapr_remove_window(VFIOContainer *container,
>  
>  int vfio_migration_probe(VFIODevice *vbasedev, Error **errp);
>  void vfio_migration_finalize(VFIODevice *vbasedev);
> +void vfio_get_dirty_page_list(VFIODevice *vbasedev, uint64_t start_pfn,
> +                               uint64_t pfn_count, uint64_t page_size);
>  
>  #endif /* HW_VFIO_VFIO_COMMON_H */
> -- 
> 2.7.0
> 



reply via email to

[Prev in Thread] Current Thread [Next in Thread]