qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 1/2 v4] [S390-KVM] Allow selective runtime register


From: Jason J. Herne
Subject: [Qemu-devel] [PATCH 1/2 v4] [S390-KVM] Allow selective runtime register synchronization
Date: Thu, 25 Apr 2013 10:25:50 -0400

From: "Jason J. Herne" <address@hidden>

We want to avoid expensive register synchronization IOCTL's on the hot path so
a new kvm_s390_get_registers_partial() is introduced as a compliment to
kvm_arch_get_registers().  The new function is called on the hot path, and
kvm_arch_get_registers() is called when we need the complete runtime register
state.

kvm_arch_put_registers() is updated to only sync the partial runtime set when
we've only dirtied the partial runtime set.  This is to avoid sending bad data
back to KVM if we've only partially synced the runtime register set.

Signed-off-by: Jason J. Herne <address@hidden>
Reviewed-by: Christian Borntraeger <address@hidden>
---
 target-s390x/cpu.h |   17 +++++++++++++
 target-s390x/kvm.c |   67 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 84 insertions(+)

diff --git a/target-s390x/cpu.h b/target-s390x/cpu.h
index e351005..0ce82cf 100644
--- a/target-s390x/cpu.h
+++ b/target-s390x/cpu.h
@@ -78,6 +78,11 @@ typedef struct MchkQueue {
     uint16_t type;
 } MchkQueue;
 
+/* Defined values for CPUS390XState.runtime_reg_dirty_mask */
+#define KVM_S390_RUNTIME_DIRTY_NONE     0
+#define KVM_S390_RUNTIME_DIRTY_PARTIAL  1
+#define KVM_S390_RUNTIME_DIRTY_FULL     2
+
 typedef struct CPUS390XState {
     uint64_t regs[16];     /* GP registers */
     CPU_DoubleU fregs[16]; /* FP registers */
@@ -121,6 +126,13 @@ typedef struct CPUS390XState {
     uint64_t cputm;
     uint32_t todpr;
 
+    /* on S390 the runtime register set has two dirty states:
+     * a partial dirty state in which only the registers that
+     * are needed all the time are fetched. And a fully dirty
+     * state in which all runtime registers are fetched.
+     */
+    uint32_t runtime_reg_dirty_mask;
+
     CPU_COMMON
 
     /* reset does memset(0) up to here */
@@ -1068,6 +1080,7 @@ void kvm_s390_io_interrupt(S390CPU *cpu, uint16_t 
subchannel_id,
                            uint32_t io_int_word);
 void kvm_s390_crw_mchk(S390CPU *cpu);
 void kvm_s390_enable_css_support(S390CPU *cpu);
+int kvm_s390_get_registers_partial(CPUState *cpu);
 #else
 static inline void kvm_s390_io_interrupt(S390CPU *cpu,
                                         uint16_t subchannel_id,
@@ -1082,6 +1095,10 @@ static inline void kvm_s390_crw_mchk(S390CPU *cpu)
 static inline void kvm_s390_enable_css_support(S390CPU *cpu)
 {
 }
+static inline int kvm_s390_get_registers_partial(CPUState *cpu)
+{
+    return -ENOSYS;
+}
 #endif
 
 static inline void s390_io_interrupt(S390CPU *cpu,
diff --git a/target-s390x/kvm.c b/target-s390x/kvm.c
index 644f484..02b2e39 100644
--- a/target-s390x/kvm.c
+++ b/target-s390x/kvm.c
@@ -123,6 +123,7 @@ int kvm_arch_put_registers(CPUState *cs, int level)
 {
     S390CPU *cpu = S390_CPU(cs);
     CPUS390XState *env = &cpu->env;
+    struct kvm_one_reg reg;
     struct kvm_sregs sregs;
     struct kvm_regs regs;
     int ret;
@@ -147,6 +148,30 @@ int kvm_arch_put_registers(CPUState *cs, int level)
         }
     }
 
+    if (env->runtime_reg_dirty_mask == KVM_S390_RUNTIME_DIRTY_FULL) {
+        reg.id = KVM_REG_S390_CPU_TIMER;
+        reg.addr = (__u64)&(env->cputm);
+        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+        if (ret < 0) {
+            return ret;
+        }
+
+        reg.id = KVM_REG_S390_CLOCK_COMP;
+        reg.addr = (__u64)&(env->ckc);
+        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+        if (ret < 0) {
+            return ret;
+        }
+
+        reg.id = KVM_REG_S390_TODPR;
+        reg.addr = (__u64)&(env->todpr);
+        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+        if (ret < 0) {
+            return ret;
+        }
+    }
+    env->runtime_reg_dirty_mask = KVM_S390_RUNTIME_DIRTY_NONE;
+
     /* Do we need to save more than that? */
     if (level == KVM_PUT_RUNTIME_STATE) {
         return 0;
@@ -186,11 +211,52 @@ int kvm_arch_get_registers(CPUState *cs)
 {
     S390CPU *cpu = S390_CPU(cs);
     CPUS390XState *env = &cpu->env;
+    struct kvm_one_reg reg;
+    int r;
+
+    r = kvm_s390_get_registers_partial(cs);
+    if (r < 0) {
+        return r;
+    }
+
+    reg.id = KVM_REG_S390_CPU_TIMER;
+    reg.addr = (__u64)&(env->cputm);
+    r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+    if (r < 0) {
+        return r;
+    }
+
+    reg.id = KVM_REG_S390_CLOCK_COMP;
+    reg.addr = (__u64)&(env->ckc);
+    r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+    if (r < 0) {
+        return r;
+    }
+
+    reg.id = KVM_REG_S390_TODPR;
+    reg.addr = (__u64)&(env->todpr);
+    r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+    if (r < 0) {
+        return r;
+    }
+
+    env->runtime_reg_dirty_mask = KVM_S390_RUNTIME_DIRTY_FULL;
+    return 0;
+}
+
+int kvm_s390_get_registers_partial(CPUState *cs)
+{
+    S390CPU *cpu = S390_CPU(cs);
+    CPUS390XState *env = &cpu->env;
     struct kvm_sregs sregs;
     struct kvm_regs regs;
     int ret;
     int i;
 
+    if (env->runtime_reg_dirty_mask) {
+        return 0;
+    }
+
     /* get the PSW */
     env->psw.addr = cs->kvm_run->psw_addr;
     env->psw.mask = cs->kvm_run->psw_mask;
@@ -236,6 +302,7 @@ int kvm_arch_get_registers(CPUState *cs)
         /* no prefix without sync regs */
     }
 
+    env->runtime_reg_dirty_mask = KVM_S390_RUNTIME_DIRTY_PARTIAL;
     return 0;
 }
 
-- 
1.7.10.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]