qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 2/3] virtio-pci: add an option to bypass guest_n


From: Marc-André Lureau
Subject: Re: [Qemu-devel] [PATCH 2/3] virtio-pci: add an option to bypass guest_notifier_mask
Date: Mon, 7 Dec 2015 14:59:10 +0100

Hi

On Mon, Dec 7, 2015 at 2:37 PM, Marc-André Lureau
<address@hidden> wrote:
> Hi
>
> On Thu, Dec 3, 2015 at 10:53 AM, Didier Pallard
> <address@hidden> wrote:
>> Using guest_notifier_mask function in vhost-user case may
>> break interrupt mask paradigm, because mask/unmask is not
>> really done when returning from guest_notifier_mask call, instead
>> message is posted in a unix socket, and processed later.
>>
>> Add an option bit to disable the use of guest_notifier_mask
>> in virtio pci.
>
> Why not disabling it whenever vhost-user is used if it's broken and
> inefficient to be fixed?

Sorry, I missed the following patch.

I am not sure it's necessary to have a user-visible property,
especially if it's overwritten later. But apparently, other properties
are already forced.

looks good overall to me, but maintainers might have different opinions.

>>
>> Signed-off-by: Didier Pallard <address@hidden>
>> Reviewed-by: Thibaut Collet <address@hidden>
>> ---
>>  hw/virtio/virtio-pci.c | 29 +++++++++++++++++++++++------
>>  hw/virtio/virtio-pci.h |  6 ++++++
>>  2 files changed, 29 insertions(+), 6 deletions(-)
>>
>> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
>> index dd48562..26bb617 100644
>> --- a/hw/virtio/virtio-pci.c
>> +++ b/hw/virtio/virtio-pci.c
>> @@ -806,7 +806,8 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy 
>> *proxy, int nvqs)
>>          /* If guest supports masking, set up irqfd now.
>>           * Otherwise, delay until unmasked in the frontend.
>>           */
>> -        if (k->guest_notifier_mask) {
>> +        if ((proxy->flags & VIRTIO_PCI_FLAG_USE_NOTIFIERMASK) &&
>> +            k->guest_notifier_mask) {
>>              ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
>>              if (ret < 0) {
>>                  kvm_virtio_pci_vq_vector_release(proxy, vector);
>> @@ -822,7 +823,8 @@ undo:
>>          if (vector >= msix_nr_vectors_allocated(dev)) {
>>              continue;
>>          }
>> -        if (k->guest_notifier_mask) {
>> +        if ((proxy->flags & VIRTIO_PCI_FLAG_USE_NOTIFIERMASK) &&
>> +            k->guest_notifier_mask) {
>>              kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
>>          }
>>          kvm_virtio_pci_vq_vector_release(proxy, vector);
>> @@ -849,7 +851,8 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy 
>> *proxy, int nvqs)
>>          /* If guest supports masking, clean up irqfd now.
>>           * Otherwise, it was cleaned when masked in the frontend.
>>           */
>> -        if (k->guest_notifier_mask) {
>> +        if ((proxy->flags & VIRTIO_PCI_FLAG_USE_NOTIFIERMASK) &&
>> +            k->guest_notifier_mask) {
>>              kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
>>          }
>>          kvm_virtio_pci_vq_vector_release(proxy, vector);
>> @@ -882,7 +885,8 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy 
>> *proxy,
>>      /* If guest supports masking, irqfd is already setup, unmask it.
>>       * Otherwise, set it up now.
>>       */
>> -    if (k->guest_notifier_mask) {
>> +    if ((proxy->flags & VIRTIO_PCI_FLAG_USE_NOTIFIERMASK) &&
>> +        k->guest_notifier_mask) {
>>          k->guest_notifier_mask(vdev, queue_no, false);
>>          /* Test after unmasking to avoid losing events. */
>>          if (k->guest_notifier_pending &&
>> @@ -905,7 +909,8 @@ static void virtio_pci_vq_vector_mask(VirtIOPCIProxy 
>> *proxy,
>>      /* If guest supports masking, keep irqfd but mask it.
>>       * Otherwise, clean it up now.
>>       */
>> -    if (k->guest_notifier_mask) {
>> +    if ((proxy->flags & VIRTIO_PCI_FLAG_USE_NOTIFIERMASK) &&
>> +        k->guest_notifier_mask) {
>>          k->guest_notifier_mask(vdev, queue_no, true);
>>      } else {
>>          kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
>> @@ -1022,7 +1027,9 @@ static int virtio_pci_set_guest_notifier(DeviceState 
>> *d, int n, bool assign,
>>          event_notifier_cleanup(notifier);
>>      }
>>
>> -    if (!msix_enabled(&proxy->pci_dev) && vdc->guest_notifier_mask) {
>> +    if (!msix_enabled(&proxy->pci_dev) &&
>> +        (proxy->flags & VIRTIO_PCI_FLAG_USE_NOTIFIERMASK) &&
>> +        vdc->guest_notifier_mask) {
>>          vdc->guest_notifier_mask(vdev, n, !assign);
>>      }
>>
>> @@ -1164,6 +1171,8 @@ static void virtio_9p_pci_realize(VirtIOPCIProxy 
>> *vpci_dev, Error **errp)
>>  static Property virtio_9p_pci_properties[] = {
>>      DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
>>                      VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
>> +    DEFINE_PROP_BIT("usenotifiermask", VirtIOPCIProxy, flags,
>> +                    VIRTIO_PCI_FLAG_USE_NOTIFIERMASK_BIT, true),
>>      DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
>>      DEFINE_PROP_END_OF_LIST(),
>>  };
>> @@ -1908,6 +1917,8 @@ static Property virtio_blk_pci_properties[] = {
>>      DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
>>      DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
>>                      VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
>> +    DEFINE_PROP_BIT("usenotifiermask", VirtIOPCIProxy, flags,
>> +                    VIRTIO_PCI_FLAG_USE_NOTIFIERMASK_BIT, true),
>>      DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
>>      DEFINE_PROP_END_OF_LIST(),
>>  };
>> @@ -1961,6 +1972,8 @@ static const TypeInfo virtio_blk_pci_info = {
>>  static Property virtio_scsi_pci_properties[] = {
>>      DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
>>                      VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
>> +    DEFINE_PROP_BIT("usenotifiermask", VirtIOPCIProxy, flags,
>> +                    VIRTIO_PCI_FLAG_USE_NOTIFIERMASK_BIT, true),
>>      DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
>>                         DEV_NVECTORS_UNSPECIFIED),
>>      DEFINE_PROP_END_OF_LIST(),
>> @@ -2175,6 +2188,8 @@ static void virtio_serial_pci_realize(VirtIOPCIProxy 
>> *vpci_dev, Error **errp)
>>  static Property virtio_serial_pci_properties[] = {
>>      DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
>>                      VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
>> +    DEFINE_PROP_BIT("usenotifiermask", VirtIOPCIProxy, flags,
>> +                    VIRTIO_PCI_FLAG_USE_NOTIFIERMASK_BIT, true),
>>      DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
>>      DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
>>      DEFINE_PROP_END_OF_LIST(),
>> @@ -2215,6 +2230,8 @@ static const TypeInfo virtio_serial_pci_info = {
>>  static Property virtio_net_properties[] = {
>>      DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
>>                      VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false),
>> +    DEFINE_PROP_BIT("usenotifiermask", VirtIOPCIProxy, flags,
>> +                    VIRTIO_PCI_FLAG_USE_NOTIFIERMASK_BIT, true),
>>      DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
>>      DEFINE_PROP_END_OF_LIST(),
>>  };
>> diff --git a/hw/virtio/virtio-pci.h b/hw/virtio/virtio-pci.h
>> index ffb74bb..aecd4eb 100644
>> --- a/hw/virtio/virtio-pci.h
>> +++ b/hw/virtio/virtio-pci.h
>> @@ -86,6 +86,12 @@ typedef struct VirtioBusClass VirtioPCIBusClass;
>>  #define VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY \
>>      (1 << VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT)
>>
>> +/* Where vhost-user implementation exists, using the guest notifier mask
>> + * feature can lead to improper interrupt management. Add a flag to
>> + * allow to disable this guest notifier mask if desired */
>> +#define VIRTIO_PCI_FLAG_USE_NOTIFIERMASK_BIT 6
>> +#define VIRTIO_PCI_FLAG_USE_NOTIFIERMASK (1 << 
>> VIRTIO_PCI_FLAG_USE_NOTIFIERMASK_BIT)
>> +
>>  typedef struct {
>>      MSIMessage msg;
>>      int virq;
>> --
>> 2.1.4
>>
>>
>
>
>
> --
> Marc-André Lureau



-- 
Marc-André Lureau



reply via email to

[Prev in Thread] Current Thread [Next in Thread]