qemu-block
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-block] [Qemu-devel] [PATCH v2 7/7] vmdk: Update metadata for m


From: Fam Zheng
Subject: Re: [Qemu-block] [Qemu-devel] [PATCH v2 7/7] vmdk: Update metadata for multiple clusters
Date: Fri, 31 Mar 2017 17:08:06 +0800
User-agent: Mutt/1.8.0 (2017-02-23)

On Fri, 03/31 14:17, Ashijeet Acharya wrote:
> On Fri, Mar 31, 2017 at 12:56 PM, Fam Zheng <address@hidden> wrote:
> > On Sat, 03/25 16:48, Ashijeet Acharya wrote:
> >> Include a next pointer in VmdkMetaData struct to point to the previous
> >> allocated L2 table. Modify vmdk_L2update to start updating metadata for
> >> allocation of multiple clusters at once.
> >>
> >> Signed-off-by: Ashijeet Acharya <address@hidden>
> >> ---
> >>  block/vmdk.c | 131 
> >> ++++++++++++++++++++++++++++++++++++++++++++++-------------
> >>  1 file changed, 102 insertions(+), 29 deletions(-)
> >>
> >> diff --git a/block/vmdk.c b/block/vmdk.c
> >> index 3de8b8f..4517409 100644
> >> --- a/block/vmdk.c
> >> +++ b/block/vmdk.c
> >> @@ -137,6 +137,8 @@ typedef struct VmdkMetaData {
> >>      int valid;
> >>      uint32_t *l2_cache_entry;
> >>      uint32_t nb_clusters;
> >> +    uint32_t offset;
> >> +    struct VmdkMetaData *next;
> >>  } VmdkMetaData;
> >>
> >>  typedef struct VmdkGrainMarker {
> >> @@ -1037,29 +1039,81 @@ static void vmdk_refresh_limits(BlockDriverState 
> >> *bs, Error **errp)
> >>      }
> >>  }
> >>
> >> -static int vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data,
> >> -                         uint32_t offset)
> >> +static int vmdk_alloc_cluster_link_l2(VmdkExtent *extent,
> >> +                                      VmdkMetaData *m_data, bool zeroed)
> >>  {
> >> -    offset = cpu_to_le32(offset);
> >> +    int i;
> >> +    uint32_t offset, temp_offset;
> >> +
> >> +    if (zeroed) {
> >> +        temp_offset = VMDK_GTE_ZEROED;
> >> +    } else {
> >> +        temp_offset = m_data->offset;
> >> +    }
> >> +
> >> +    temp_offset = cpu_to_le32(temp_offset);
> >> +
> >>      /* update L2 table */
> >> -    if (bdrv_pwrite_sync(extent->file,
> >> +    offset = temp_offset;
> >> +    for (i = 0; i < m_data->nb_clusters; i++) {
> >> +        if (bdrv_pwrite_sync(extent->file,
> >>                  ((int64_t)m_data->l2_offset * 512)
> >> -                    + (m_data->l2_index * sizeof(offset)),
> >> -                &offset, sizeof(offset)) < 0) {
> >> -        return VMDK_ERROR;
> >> +                    + ((m_data->l2_index + i) * sizeof(offset)),
> >> +                &(offset), sizeof(offset)) < 0) {
> >> +            return VMDK_ERROR;
> >> +        }
> >> +        if (!zeroed) {
> >> +            offset += 128;
> >> +        }
> >>      }
> >> +
> >>      /* update backup L2 table */
> >> +    offset = temp_offset;
> >>      if (extent->l1_backup_table_offset != 0) {
> >>          m_data->l2_offset = extent->l1_backup_table[m_data->l1_index];
> >> -        if (bdrv_pwrite_sync(extent->file,
> >> -                    ((int64_t)m_data->l2_offset * 512)
> >> -                        + (m_data->l2_index * sizeof(offset)),
> >> -                    &offset, sizeof(offset)) < 0) {
> >> -            return VMDK_ERROR;
> >> +        for (i = 0; i < m_data->nb_clusters; i++) {
> >> +            if (bdrv_pwrite_sync(extent->file,
> >> +                        ((int64_t)m_data->l2_offset * 512)
> >> +                            + ((m_data->l2_index + i) * sizeof(offset)),
> >> +                        &(offset), sizeof(offset)) < 0) {
> >> +                return VMDK_ERROR;
> >> +            }
> >> +            if (!zeroed) {
> >> +                offset += 128;
> >> +            }
> >>          }
> >>      }
> >> +
> >> +    offset = temp_offset;
> >>      if (m_data->l2_cache_entry) {
> >> -        *m_data->l2_cache_entry = offset;
> >> +        for (i = 0; i < m_data->nb_clusters; i++) {
> >> +            *m_data->l2_cache_entry = offset;
> >> +            m_data->l2_cache_entry++;
> >> +
> >> +            if (!zeroed) {
> >> +                offset += 128;
> >> +            }
> >> +        }
> >> +    }
> >> +
> >> +    return VMDK_OK;
> >> +}
> >> +
> >> +static int vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data,
> >> +                         bool zeroed)
> >> +{
> >> +    int ret;
> >> +
> >> +    while (m_data->next != NULL) {
> >> +        VmdkMetaData *next;
> >> +
> >> +        ret = vmdk_alloc_cluster_link_l2(extent, m_data, zeroed);
> >> +        if (ret < 0) {
> >> +            return ret;
> >> +        }
> >> +
> >> +        next = m_data->next;
> >> +        m_data = next;
> >
> > I don't see why the next pointer is necessary.  Also the tail is always 
> > unused,
> > why do you need to allocate it?
> 
> If I don't allocate the tail, I was getting a segfault in vmdk_pwritev().

That may be because the way you interate the linked list in vmdk_pwritev is:

>        while (m_data->next != NULL) {
>            VmdkMetaData *next;
>            next = m_data->next;
>            g_free(m_data);
>            m_data = next;
>        }
>

which does require a dummy tail.

But after all it's still not clear to me why you cannot keep m_data on stack,
and why you need the next pointer at all.

Fam



reply via email to

[Prev in Thread] Current Thread [Next in Thread]