qemu-block
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-block] [Qemu-devel] [PATCH v2 7/7] vmdk: Update metadata for m


From: Ashijeet Acharya
Subject: Re: [Qemu-block] [Qemu-devel] [PATCH v2 7/7] vmdk: Update metadata for multiple clusters
Date: Fri, 31 Mar 2017 15:11:02 +0530

On Fri, Mar 31, 2017 at 2:38 PM, Fam Zheng <address@hidden> wrote:
> On Fri, 03/31 14:17, Ashijeet Acharya wrote:
>> On Fri, Mar 31, 2017 at 12:56 PM, Fam Zheng <address@hidden> wrote:
>> > On Sat, 03/25 16:48, Ashijeet Acharya wrote:
>> >> Include a next pointer in VmdkMetaData struct to point to the previous
>> >> allocated L2 table. Modify vmdk_L2update to start updating metadata for
>> >> allocation of multiple clusters at once.
>> >>
>> >> Signed-off-by: Ashijeet Acharya <address@hidden>
>> >> ---
>> >>  block/vmdk.c | 131 
>> >> ++++++++++++++++++++++++++++++++++++++++++++++-------------
>> >>  1 file changed, 102 insertions(+), 29 deletions(-)
>> >>
>> >> diff --git a/block/vmdk.c b/block/vmdk.c
>> >> index 3de8b8f..4517409 100644
>> >> --- a/block/vmdk.c
>> >> +++ b/block/vmdk.c
>> >> @@ -137,6 +137,8 @@ typedef struct VmdkMetaData {
>> >>      int valid;
>> >>      uint32_t *l2_cache_entry;
>> >>      uint32_t nb_clusters;
>> >> +    uint32_t offset;
>> >> +    struct VmdkMetaData *next;
>> >>  } VmdkMetaData;
>> >>
>> >>  typedef struct VmdkGrainMarker {
>> >> @@ -1037,29 +1039,81 @@ static void vmdk_refresh_limits(BlockDriverState 
>> >> *bs, Error **errp)
>> >>      }
>> >>  }
>> >>
>> >> -static int vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data,
>> >> -                         uint32_t offset)
>> >> +static int vmdk_alloc_cluster_link_l2(VmdkExtent *extent,
>> >> +                                      VmdkMetaData *m_data, bool zeroed)
>> >>  {
>> >> -    offset = cpu_to_le32(offset);
>> >> +    int i;
>> >> +    uint32_t offset, temp_offset;
>> >> +
>> >> +    if (zeroed) {
>> >> +        temp_offset = VMDK_GTE_ZEROED;
>> >> +    } else {
>> >> +        temp_offset = m_data->offset;
>> >> +    }
>> >> +
>> >> +    temp_offset = cpu_to_le32(temp_offset);
>> >> +
>> >>      /* update L2 table */
>> >> -    if (bdrv_pwrite_sync(extent->file,
>> >> +    offset = temp_offset;
>> >> +    for (i = 0; i < m_data->nb_clusters; i++) {
>> >> +        if (bdrv_pwrite_sync(extent->file,
>> >>                  ((int64_t)m_data->l2_offset * 512)
>> >> -                    + (m_data->l2_index * sizeof(offset)),
>> >> -                &offset, sizeof(offset)) < 0) {
>> >> -        return VMDK_ERROR;
>> >> +                    + ((m_data->l2_index + i) * sizeof(offset)),
>> >> +                &(offset), sizeof(offset)) < 0) {
>> >> +            return VMDK_ERROR;
>> >> +        }
>> >> +        if (!zeroed) {
>> >> +            offset += 128;
>> >> +        }
>> >>      }
>> >> +
>> >>      /* update backup L2 table */
>> >> +    offset = temp_offset;
>> >>      if (extent->l1_backup_table_offset != 0) {
>> >>          m_data->l2_offset = extent->l1_backup_table[m_data->l1_index];
>> >> -        if (bdrv_pwrite_sync(extent->file,
>> >> -                    ((int64_t)m_data->l2_offset * 512)
>> >> -                        + (m_data->l2_index * sizeof(offset)),
>> >> -                    &offset, sizeof(offset)) < 0) {
>> >> -            return VMDK_ERROR;
>> >> +        for (i = 0; i < m_data->nb_clusters; i++) {
>> >> +            if (bdrv_pwrite_sync(extent->file,
>> >> +                        ((int64_t)m_data->l2_offset * 512)
>> >> +                            + ((m_data->l2_index + i) * sizeof(offset)),
>> >> +                        &(offset), sizeof(offset)) < 0) {
>> >> +                return VMDK_ERROR;
>> >> +            }
>> >> +            if (!zeroed) {
>> >> +                offset += 128;
>> >> +            }
>> >>          }
>> >>      }
>> >> +
>> >> +    offset = temp_offset;
>> >>      if (m_data->l2_cache_entry) {
>> >> -        *m_data->l2_cache_entry = offset;
>> >> +        for (i = 0; i < m_data->nb_clusters; i++) {
>> >> +            *m_data->l2_cache_entry = offset;
>> >> +            m_data->l2_cache_entry++;
>> >> +
>> >> +            if (!zeroed) {
>> >> +                offset += 128;
>> >> +            }
>> >> +        }
>> >> +    }
>> >> +
>> >> +    return VMDK_OK;
>> >> +}
>> >> +
>> >> +static int vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data,
>> >> +                         bool zeroed)
>> >> +{
>> >> +    int ret;
>> >> +
>> >> +    while (m_data->next != NULL) {
>> >> +        VmdkMetaData *next;
>> >> +
>> >> +        ret = vmdk_alloc_cluster_link_l2(extent, m_data, zeroed);
>> >> +        if (ret < 0) {
>> >> +            return ret;
>> >> +        }
>> >> +
>> >> +        next = m_data->next;
>> >> +        m_data = next;
>> >
>> > I don't see why the next pointer is necessary.  Also the tail is always 
>> > unused,
>> > why do you need to allocate it?
>>
>> If I don't allocate the tail, I was getting a segfault in vmdk_pwritev().
>
> That may be because the way you interate the linked list in vmdk_pwritev is:
>
>>        while (m_data->next != NULL) {
>>            VmdkMetaData *next;
>>            next = m_data->next;
>>            g_free(m_data);
>>            m_data = next;
>>        }
>>
>
> which does require a dummy tail.

No, I remember it segfaulting even before I inserted that piece of
code. I think the reason is that I try to access m_data->valid inside
vmdk_pwritev()...

>
> But after all it's still not clear to me why you cannot keep m_data on stack,

...and by using malloc and moving it to the heap solved my problem,
plus for constructing the linked list.

> and why you need the next pointer at all.

If I don't segregate them in batches of 512, I will need to increment
the l2_offset manually...right? If I don't use the next pointer, what
solution do you recommend?

Ashijeet

>
> Fam



reply via email to

[Prev in Thread] Current Thread [Next in Thread]