qemu-s390x
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[qemu-s390x] [PULL 12/29] s390x/flic: optimize CPU wakeup for TCG


From: Cornelia Huck
Subject: [qemu-s390x] [PULL 12/29] s390x/flic: optimize CPU wakeup for TCG
Date: Fri, 9 Feb 2018 10:25:07 +0100

From: David Hildenbrand <address@hidden>

Kicking all CPUs on every floating interrupt is far from efficient.
Let's optimize it at least a little bit.

Signed-off-by: David Hildenbrand <address@hidden>
Message-Id: <address@hidden>
Signed-off-by: Cornelia Huck <address@hidden>
---
 hw/intc/s390_flic.c     | 31 +++++++++++++++++++++++++++++--
 target/s390x/cpu.h      |  4 ++++
 target/s390x/internal.h |  5 -----
 3 files changed, 33 insertions(+), 7 deletions(-)

diff --git a/hw/intc/s390_flic.c b/hw/intc/s390_flic.c
index cb216de9ba..5febde2d65 100644
--- a/hw/intc/s390_flic.c
+++ b/hw/intc/s390_flic.c
@@ -161,10 +161,37 @@ static void qemu_s390_flic_notify(uint32_t type)
 
     /*
      * We have to make all CPUs see CPU_INTERRUPT_HARD, so they might
-     * consider it. TODO: don't kick/wakeup all VCPUs but try to be
-     * smarter (using the interrupt type).
+     * consider it. We will kick all running CPUs and only relevant
+     * sleeping ones.
      */
     CPU_FOREACH(cs) {
+        S390CPU *cpu = S390_CPU(cs);
+
+        cs->interrupt_request |= CPU_INTERRUPT_HARD;
+
+        /* ignore CPUs that are not sleeping */
+        if (s390_cpu_get_state(cpu) != CPU_STATE_OPERATING &&
+            s390_cpu_get_state(cpu) != CPU_STATE_LOAD) {
+            continue;
+        }
+
+        /* we always kick running CPUs for now, this is tricky */
+        if (cs->halted) {
+            /* don't check for subclasses, CPUs double check when waking up */
+            if (type & FLIC_PENDING_SERVICE) {
+                if (!(cpu->env.psw.mask & PSW_MASK_EXT)) {
+                    continue;
+                }
+            } else if (type & FLIC_PENDING_IO) {
+                if (!(cpu->env.psw.mask & PSW_MASK_IO)) {
+                    continue;
+                }
+            } else if (type & FLIC_PENDING_MCHK_CR) {
+                if (!(cpu->env.psw.mask & PSW_MASK_MCHECK)) {
+                    continue;
+                }
+            }
+        }
         cpu_interrupt(cs, CPU_INTERRUPT_HARD);
     }
 }
diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h
index ba6cf0cda5..76c31d970f 100644
--- a/target/s390x/cpu.h
+++ b/target/s390x/cpu.h
@@ -692,6 +692,10 @@ static inline unsigned int s390_cpu_set_state(uint8_t 
cpu_state, S390CPU *cpu)
     return 0;
 }
 #endif /* CONFIG_USER_ONLY */
+static inline uint8_t s390_cpu_get_state(S390CPU *cpu)
+{
+    return cpu->env.cpu_state;
+}
 
 
 /* cpu_models.c */
diff --git a/target/s390x/internal.h b/target/s390x/internal.h
index fea165ffe4..d911e84958 100644
--- a/target/s390x/internal.h
+++ b/target/s390x/internal.h
@@ -278,11 +278,6 @@ static inline void s390_do_cpu_full_reset(CPUState *cs, 
run_on_cpu_data arg)
     cpu_reset(cs);
 }
 
-static inline uint8_t s390_cpu_get_state(S390CPU *cpu)
-{
-    return cpu->env.cpu_state;
-}
-
 
 /* arch_dump.c */
 int s390_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
-- 
2.13.6




reply via email to

[Prev in Thread] Current Thread [Next in Thread]