qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v3 28/46] ivshmem: replace 'guest' for 'peer' ap


From: Claudio Fontana
Subject: Re: [Qemu-devel] [PATCH v3 28/46] ivshmem: replace 'guest' for 'peer' appropriately
Date: Wed, 16 Sep 2015 13:44:59 +0200
User-agent: Mozilla/5.0 (Windows NT 6.1; rv:38.0) Gecko/20100101 Thunderbird/38.2.0

On 15.09.2015 18:07, address@hidden wrote:
> From: Marc-André Lureau <address@hidden>
> 
> The terms 'guest' and 'peer' are used sometime interchangeably which may
> be confusing. Instead, use 'peer' for the remote instances of ivshmem
> clients, and 'guest' for the local VM.
> 
> Signed-off-by: Marc-André Lureau <address@hidden>
> ---
>  hw/misc/ivshmem.c | 28 ++++++++++++++--------------
>  1 file changed, 14 insertions(+), 14 deletions(-)
> 
> diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c
> index 6acdc56..b9c78cd 100644
> --- a/hw/misc/ivshmem.c
> +++ b/hw/misc/ivshmem.c
> @@ -89,7 +89,7 @@ typedef struct IVShmemState {
>      int shm_fd; /* shared memory file descriptor */
>  
>      Peer *peers;
> -    int nb_peers; /* how many guests we have space for */
> +    int nb_peers; /* how many peers we have space for */
>  
>      int vm_id;
>      uint32_t vectors;
> @@ -387,9 +387,9 @@ static void ivshmem_del_eventfd(IVShmemState *s, int 
> posn, int i)
>                                &s->peers[posn].eventfds[i]);
>  }
>  
> -static void close_guest_eventfds(IVShmemState *s, int posn)
> +static void close_peer_eventfds(IVShmemState *s, int posn)
>  {
> -    int i, guest_curr_max;
> +    int i, n;
>  
>      if (!ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
>          return;
> @@ -399,14 +399,14 @@ static void close_guest_eventfds(IVShmemState *s, int 
> posn)
>          return;
>      }
>  
> -    guest_curr_max = s->peers[posn].nb_eventfds;
> +    n = s->peers[posn].nb_eventfds;
>  
>      memory_region_transaction_begin();
> -    for (i = 0; i < guest_curr_max; i++) {
> +    for (i = 0; i < n; i++) {
>          ivshmem_del_eventfd(s, posn, i);
>      }
>      memory_region_transaction_commit();
> -    for (i = 0; i < guest_curr_max; i++) {
> +    for (i = 0; i < n; i++) {
>          event_notifier_cleanup(&s->peers[posn].eventfds[i]);
>      }
>  
> @@ -415,7 +415,7 @@ static void close_guest_eventfds(IVShmemState *s, int 
> posn)
>  }
>  
>  /* this function increase the dynamic storage need to store data about other
> - * guests */
> + * peers */
>  static int resize_peers(IVShmemState *s, int new_min_size)
>  {
>  
> @@ -432,7 +432,7 @@ static int resize_peers(IVShmemState *s, int new_min_size)
>      old_size = s->nb_peers;
>      s->nb_peers = new_min_size;
>  
> -    IVSHMEM_DPRINTF("bumping storage to %d guests\n", s->nb_peers);
> +    IVSHMEM_DPRINTF("bumping storage to %d peers\n", s->nb_peers);
>  
>      s->peers = g_realloc(s->peers, s->nb_peers * sizeof(Peer));
>  
> @@ -503,7 +503,7 @@ static void ivshmem_read(void *opaque, const uint8_t 
> *buf, int size)
>      incoming_fd = qemu_chr_fe_get_msgfd(s->server_chr);
>      IVSHMEM_DPRINTF("posn is %ld, fd is %d\n", incoming_posn, incoming_fd);
>  
> -    /* make sure we have enough space for this guest */
> +    /* make sure we have enough space for this peer */
>      if (incoming_posn >= s->nb_peers) {
>          if (resize_peers(s, incoming_posn + 1) < 0) {
>              error_report("failed to resize peers array");
> @@ -522,9 +522,9 @@ static void ivshmem_read(void *opaque, const uint8_t 
> *buf, int size)
>              /* receive our posn */
>              s->vm_id = incoming_posn;
>          } else {
> -            /* otherwise an fd == -1 means an existing guest has gone away */
> +            /* otherwise an fd == -1 means an existing peer has gone away */
>              IVSHMEM_DPRINTF("posn %ld has gone away\n", incoming_posn);
> -            close_guest_eventfds(s, incoming_posn);
> +            close_peer_eventfds(s, incoming_posn);
>          }
>          return;
>      }
> @@ -571,7 +571,7 @@ static void ivshmem_read(void *opaque, const uint8_t 
> *buf, int size)
>      /* get a new eventfd */
>      nth_eventfd = peer->nb_eventfds++;
>  
> -    /* this is an eventfd for a particular guest VM */
> +    /* this is an eventfd for a particular peer VM */
>      IVSHMEM_DPRINTF("eventfds[%ld][%d] = %d\n", incoming_posn,
>                      nth_eventfd, incoming_fd);
>      event_notifier_init_fd(&peer->eventfds[nth_eventfd], incoming_fd);
> @@ -751,7 +751,7 @@ static void pci_ivshmem_realize(PCIDevice *dev, Error 
> **errp)
>              return;
>          }
>  
> -        /* we allocate enough space for 16 guests and grow as needed */
> +        /* we allocate enough space for 16 peers and grow as needed */
>          resize_peers(s, 16);
>          s->vm_id = -1;
>  
> @@ -829,7 +829,7 @@ static void pci_ivshmem_exit(PCIDevice *dev)
>  
>      if (s->peers) {
>          for (i = 0; i < s->nb_peers; i++) {
> -            close_guest_eventfds(s, i);
> +            close_peer_eventfds(s, i);
>          }
>          g_free(s->peers);
>      }
> 

Reviewed-by: Claudio Fontana <address@hidden>




reply via email to

[Prev in Thread] Current Thread [Next in Thread]