qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH] main: force enabling of I/O thread


From: Anthony Liguori
Subject: Re: [Qemu-devel] [PATCH] main: force enabling of I/O thread
Date: Mon, 29 Aug 2011 13:03:13 -0500
User-agent: Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.17) Gecko/20110516 Lightning/1.0b2 Thunderbird/3.1.10

On 08/22/2011 08:35 AM, Jan Kiszka wrote:
On 2011-08-22 15:24, Anthony Liguori wrote:
Enabling the I/O thread by default seems like an important part of declaring
1.0.  Besides allowing true SMP support with KVM, the I/O thread means that the
TCG VCPU doesn't have to multiplex itself with the I/O dispatch routines which
currently requires a (racey) signal based alarm system.

I know there have been concerns about performance.  I think so far the ones that
have come up (virtio-net) are most likely due to secondary reasons like
decreased batching.

iothread enabled without [1] gives unacceptable performance for ARM
emulation (Musicpal board) here. With that series applied, the host CPU
load is measurably higher, but no longer excessively.

Given that [1] has been applied now, is there any objections to this patch?

Blue, Aurelien?

Regards,

Anthony Liguori


As that series demonstrates, a major issue for TCG is that the iothread
and the VCPU thread run in lock-step, almost nothing is parallelized. At
least so far. Paolo and Peter (and to some degree /me) discussed some
better TCG treading last Thursday.

Jan

[1] http://thread.gmane.org/gmane.comp.emulators.qemu/99658


I think we ought to force enabling I/O thread early in 1.0 development and
commit to resolving any lingering issues.

Signed-off-by: Anthony Liguori<address@hidden>
---
  configure    |   13 -----
  cpus.c       |  143 ----------------------------------------------------------
  hw/qxl.c     |    4 --
  kvm-all.c    |    2 +-
  qemu-timer.c |   53 ---------------------
  vl.c         |   17 -------
  6 files changed, 1 insertions(+), 231 deletions(-)

diff --git a/configure b/configure
index 234a4c5..ef8d7ae 100755
--- a/configure
+++ b/configure
@@ -165,7 +165,6 @@ darwin_user="no"
  bsd_user="no"
  guest_base=""
  uname_release=""
-io_thread="no"
  mixemu="no"
  aix="no"
  blobs="yes"
@@ -722,8 +721,6 @@ for opt do
    ;;
    --enable-attr) attr="yes"
    ;;
-  --enable-io-thread) io_thread="yes"
-  ;;
    --disable-blobs) blobs="no"
    ;;
    --with-pkgversion=*) pkgversion=" ($optarg)"
@@ -2159,12 +2156,6 @@ EOF

  if compile_prog "" "" ; then
    signalfd=yes
-elif test "$kvm" = "yes" -a "$io_thread" != "yes"; then
-  echo
-  echo "ERROR: Host kernel lacks signalfd() support,"
-  echo "but KVM depends on it when the IO thread is disabled."
-  echo
-  exit 1
  fi

  # check if eventfd is supported
@@ -2711,7 +2702,6 @@ echo "NPTL support      $nptl"
  echo "GUEST_BASE        $guest_base"
  echo "PIE user targets  $user_pie"
  echo "vde support       $vde"
-echo "IO thread         $io_thread"
  echo "Linux AIO support $linux_aio"
  echo "ATTR/XATTR support $attr"
  echo "Install blobs     $blobs"
@@ -2969,9 +2959,6 @@ if test "$xen" = "yes" ; then
    echo "CONFIG_XEN_BACKEND=y">>  $config_host_mak
    echo "CONFIG_XEN_CTRL_INTERFACE_VERSION=$xen_ctrl_version">>  
$config_host_mak
  fi
-if test "$io_thread" = "yes" ; then
-  echo "CONFIG_IOTHREAD=y">>  $config_host_mak
-fi
  if test "$linux_aio" = "yes" ; then
    echo "CONFIG_LINUX_AIO=y">>  $config_host_mak
  fi
diff --git a/cpus.c b/cpus.c
index c996ac5..fe18a60 100644
--- a/cpus.c
+++ b/cpus.c
@@ -173,12 +173,9 @@ static void cpu_handle_guest_debug(CPUState *env)
  {
      gdb_set_stop_cpu(env);
      qemu_system_debug_request();
-#ifdef CONFIG_IOTHREAD
      env->stopped = 1;
-#endif
  }

-#ifdef CONFIG_IOTHREAD
  static void cpu_signal(int sig)
  {
      if (cpu_single_env) {
@@ -186,7 +183,6 @@ static void cpu_signal(int sig)
      }
      exit_request = 1;
  }
-#endif

  #ifdef CONFIG_LINUX
  static void sigbus_reraise(void)
@@ -262,12 +258,6 @@ static void qemu_kvm_eat_signals(CPUState *env)
              exit(1);
          }
      } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
-
-#ifndef CONFIG_IOTHREAD
-    if (sigismember(&chkset, SIGIO) || sigismember(&chkset, SIGALRM)) {
-        qemu_notify_event();
-    }
-#endif
  }

  #else /* !CONFIG_LINUX */
@@ -390,7 +380,6 @@ static int qemu_signal_init(void)
      int sigfd;
      sigset_t set;

-#ifdef CONFIG_IOTHREAD
      /* SIGUSR2 used by posix-aio-compat.c */
      sigemptyset(&set);
      sigaddset(&set, SIGUSR2);
@@ -409,18 +398,6 @@ static int qemu_signal_init(void)
      sigaddset(&set, SIGIO);
      sigaddset(&set, SIGALRM);
      sigaddset(&set, SIGBUS);
-#else
-    sigemptyset(&set);
-    sigaddset(&set, SIGBUS);
-    if (kvm_enabled()) {
-        /*
-         * We need to process timer signals synchronously to avoid a race
-         * between exit_request check and KVM vcpu entry.
-         */
-        sigaddset(&set, SIGIO);
-        sigaddset(&set, SIGALRM);
-    }
-#endif
      pthread_sigmask(SIG_BLOCK,&set, NULL);

      sigfd = qemu_signalfd(&set);
@@ -447,7 +424,6 @@ static void qemu_kvm_init_cpu_signals(CPUState *env)
      sigact.sa_handler = dummy_signal;
      sigaction(SIG_IPI,&sigact, NULL);

-#ifdef CONFIG_IOTHREAD
      pthread_sigmask(SIG_BLOCK, NULL,&set);
      sigdelset(&set, SIG_IPI);
      sigdelset(&set, SIGBUS);
@@ -456,17 +432,7 @@ static void qemu_kvm_init_cpu_signals(CPUState *env)
          fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
          exit(1);
      }
-#else
-    sigemptyset(&set);
-    sigaddset(&set, SIG_IPI);
-    sigaddset(&set, SIGIO);
-    sigaddset(&set, SIGALRM);
-    pthread_sigmask(SIG_BLOCK,&set, NULL);

-    pthread_sigmask(SIG_BLOCK, NULL,&set);
-    sigdelset(&set, SIGIO);
-    sigdelset(&set, SIGALRM);
-#endif
      sigdelset(&set, SIG_IPI);
      sigdelset(&set, SIGBUS);
      r = kvm_set_signal_mask(env,&set);
@@ -478,7 +444,6 @@ static void qemu_kvm_init_cpu_signals(CPUState *env)

  static void qemu_tcg_init_cpu_signals(void)
  {
-#ifdef CONFIG_IOTHREAD
      sigset_t set;
      struct sigaction sigact;

@@ -489,7 +454,6 @@ static void qemu_tcg_init_cpu_signals(void)
      sigemptyset(&set);
      sigaddset(&set, SIG_IPI);
      pthread_sigmask(SIG_UNBLOCK,&set, NULL);
-#endif
  }

  #else /* _WIN32 */
@@ -535,106 +499,6 @@ static void qemu_tcg_init_cpu_signals(void)
  }
  #endif /* _WIN32 */

-#ifndef CONFIG_IOTHREAD
-int qemu_init_main_loop(void)
-{
-    int ret;
-
-    ret = qemu_signal_init();
-    if (ret) {
-        return ret;
-    }
-
-    qemu_init_sigbus();
-
-    return qemu_event_init();
-}
-
-void qemu_main_loop_start(void)
-{
-}
-
-void qemu_init_vcpu(void *_env)
-{
-    CPUState *env = _env;
-    int r;
-
-    env->nr_cores = smp_cores;
-    env->nr_threads = smp_threads;
-
-    if (kvm_enabled()) {
-        r = kvm_init_vcpu(env);
-        if (r<  0) {
-            fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
-            exit(1);
-        }
-        qemu_kvm_init_cpu_signals(env);
-    } else {
-        qemu_tcg_init_cpu_signals();
-    }
-}
-
-int qemu_cpu_is_self(void *env)
-{
-    return 1;
-}
-
-void run_on_cpu(CPUState *env, void (*func)(void *data), void *data)
-{
-    func(data);
-}
-
-void resume_all_vcpus(void)
-{
-}
-
-void pause_all_vcpus(void)
-{
-}
-
-void qemu_cpu_kick(void *env)
-{
-}
-
-void qemu_cpu_kick_self(void)
-{
-#ifndef _WIN32
-    assert(cpu_single_env);
-
-    raise(SIG_IPI);
-#else
-    abort();
-#endif
-}
-
-void qemu_notify_event(void)
-{
-    CPUState *env = cpu_single_env;
-
-    qemu_event_increment ();
-    if (env) {
-        cpu_exit(env);
-    }
-    if (next_cpu&&  env != next_cpu) {
-        cpu_exit(next_cpu);
-    }
-    exit_request = 1;
-}
-
-void qemu_mutex_lock_iothread(void) {}
-void qemu_mutex_unlock_iothread(void) {}
-
-void cpu_stop_current(void)
-{
-}
-
-void vm_stop(int reason)
-{
-    do_vm_stop(reason);
-}
-
-#else /* CONFIG_IOTHREAD */
-
  QemuMutex qemu_global_mutex;
  static QemuCond qemu_io_proceeded_cond;
  static bool iothread_requesting_mutex;
@@ -1036,8 +900,6 @@ void vm_stop(int reason)
      do_vm_stop(reason);
  }

-#endif
-
  static int tcg_cpu_exec(CPUState *env)
  {
      int ret;
@@ -1092,11 +954,6 @@ bool cpu_exec_all(void)
          qemu_clock_enable(vm_clock,
                            (env->singlestep_enabled&  SSTEP_NOTIMER) == 0);

-#ifndef CONFIG_IOTHREAD
-        if (qemu_alarm_pending()) {
-            break;
-        }
-#endif
          if (cpu_can_run(env)) {
              if (kvm_enabled()) {
                  r = kvm_cpu_exec(env);
diff --git a/hw/qxl.c b/hw/qxl.c
index bab60a5..c3a3e97 100644
--- a/hw/qxl.c
+++ b/hw/qxl.c
@@ -1388,11 +1388,7 @@ static void init_pipe_signaling(PCIQXLDevice *d)
         dprint(d, 1, "%s: pipe creation failed\n", __FUNCTION__);
         return;
     }
-#ifdef CONFIG_IOTHREAD
     fcntl(d->pipe[0], F_SETFL, O_NONBLOCK);
-#else
-   fcntl(d->pipe[0], F_SETFL, O_NONBLOCK /* | O_ASYNC */);
-#endif
     fcntl(d->pipe[1], F_SETFL, O_NONBLOCK);
     fcntl(d->pipe[0], F_SETOWN, getpid());

diff --git a/kvm-all.c b/kvm-all.c
index 0ae2e26..fbb9ff3 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -479,7 +479,7 @@ static int kvm_check_many_ioeventfds(void)
       * Older kernels have a 6 device limit on the KVM io bus.  Find out so we
       * can avoid creating too many ioeventfds.
       */
-#if defined(CONFIG_EVENTFD)&&  defined(CONFIG_IOTHREAD)
+#if defined(CONFIG_EVENTFD)
      int ioeventfds[7];
      int i, ret = 0;
      for (i = 0; i<  ARRAY_SIZE(ioeventfds); i++) {
diff --git a/qemu-timer.c b/qemu-timer.c
index 19313d3..46dd483 100644
--- a/qemu-timer.c
+++ b/qemu-timer.c
@@ -101,22 +101,6 @@ static int64_t cpu_get_clock(void)
      }
  }

-#ifndef CONFIG_IOTHREAD
-static int64_t qemu_icount_delta(void)
-{
-    if (!use_icount) {
-        return 5000 * (int64_t) 1000000;
-    } else if (use_icount == 1) {
-        /* When not using an adaptive execution frequency
-           we tend to get badly out of sync with real time,
-           so just delay for a reasonable amount of time.  */
-        return 0;
-    } else {
-        return cpu_get_icount() - cpu_get_clock();
-    }
-}
-#endif
-
  /* enable cpu_get_ticks() */
  void cpu_enable_ticks(void)
  {
@@ -688,9 +672,7 @@ void configure_icount(const char *option)
      if (!option)
          return;

-#ifdef CONFIG_IOTHREAD
      vm_clock->warp_timer = qemu_new_timer_ns(rt_clock, icount_warp_rt, NULL);
-#endif

      if (strcmp(option, "auto") != 0) {
          icount_time_shift = strtol(option, NULL, 0);
@@ -1178,41 +1160,6 @@ void quit_timers(void)

  int qemu_calculate_timeout(void)
  {
-#ifndef CONFIG_IOTHREAD
-    int timeout;
-
-    if (!vm_running)
-        timeout = 5000;
-    else {
-     /* XXX: use timeout computed from timers */
-        int64_t add;
-        int64_t delta;
-        /* Advance virtual time to the next event.  */
-       delta = qemu_icount_delta();
-        if (delta>  0) {
-            /* If virtual time is ahead of real time then just
-               wait for IO.  */
-            timeout = (delta + 999999) / 1000000;
-        } else {
-            /* Wait for either IO to occur or the next
-               timer event.  */
-            add = qemu_next_icount_deadline();
-            /* We advance the timer before checking for IO.
-               Limit the amount we advance so that early IO
-               activity won't get the guest too far ahead.  */
-            if (add>  10000000)
-                add = 10000000;
-            delta += add;
-            qemu_icount += qemu_icount_round (add);
-            timeout = delta / 1000000;
-            if (timeout<  0)
-                timeout = 0;
-        }
-    }
-
-    return timeout;
-#else /* CONFIG_IOTHREAD */
      return 1000;
-#endif
  }

diff --git a/vl.c b/vl.c
index d661e8e..11f5669 100644
--- a/vl.c
+++ b/vl.c
@@ -1438,17 +1438,6 @@ void main_loop_wait(int nonblocking)

  }

-#ifndef CONFIG_IOTHREAD
-static int vm_request_pending(void)
-{
-    return powerdown_requested ||
-           reset_requested ||
-           shutdown_requested ||
-           debug_requested ||
-           vmstop_requested;
-}
-#endif
-
  qemu_irq qemu_system_powerdown;

  static void main_loop(void)
@@ -1462,12 +1451,6 @@ static void main_loop(void)
      qemu_main_loop_start();

      for (;;) {
-#ifndef CONFIG_IOTHREAD
-        nonblocking = cpu_exec_all();
-        if (vm_request_pending()) {
-            nonblocking = true;
-        }
-#endif
  #ifdef CONFIG_PROFILER
          ti = profile_getclock();
  #endif





reply via email to

[Prev in Thread] Current Thread [Next in Thread]