qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 4/5] Introduce kvm logging interface.


From: Glauber Costa
Subject: [Qemu-devel] [PATCH 4/5] Introduce kvm logging interface.
Date: Tue, 11 Nov 2008 00:16:08 -0200

Introduce functions to stop and start logging of memory regions.
We select region based on its start address.

Signed-off-by: Glauber Costa <address@hidden>
---
 kvm-all.c |  139 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 kvm.h     |    5 ++
 2 files changed, 144 insertions(+), 0 deletions(-)

diff --git a/kvm-all.c b/kvm-all.c
index 6d50609..cb7bf6a 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -31,8 +31,13 @@
     do { } while (0)
 #endif
 
+#define warning(fmt, ...) \
+    do { dprintf("%s:%d" fmt, __func__, __LINE__, ## __VA_ARGS__); } while (0)
+
 typedef struct kvm_userspace_memory_region KVMSlot;
 
+typedef struct kvm_dirty_log KVMDirtyLog;
+
 int kvm_allowed = 0;
 
 struct KVMState
@@ -109,6 +114,140 @@ err:
     return ret;
 }
 
+/*
+ * dirty pages logging control
+ */
+static int kvm_dirty_pages_log_change(KVMSlot *mem,
+                                      unsigned flags,
+                                      unsigned mask)
+{
+    int r = -1;
+    KVMState *s = kvm_state;
+
+    flags = (mem->flags & ~mask) | flags;
+    if (flags == mem->flags)
+            return 0;
+
+    mem->flags = flags;
+
+    r = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, mem);
+    if (r == -1)
+        fprintf(stderr, "%s: %m\n", __FUNCTION__);
+
+    return r;
+}
+
+int kvm_log_start(target_phys_addr_t phys_addr, target_phys_addr_t len)
+{
+        KVMState *s = kvm_state;
+        KVMSlot *mem = kvm_lookup_slot(s, phys_addr);
+
+        if (mem == NULL)  {
+                fprintf(stderr, "BUG: %s: invalid parameters\n", __func__);
+                return -EINVAL;
+        }
+
+        /* Already logging, no need to issue ioctl */
+        if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
+            return 0;
+
+        dprintf("slot %d: enable logging (phys %llx, uaddr %llx)\n",
+                 mem->slot, mem->guest_phys_addr, mem->userspace_addr);
+
+        return kvm_dirty_pages_log_change(mem,
+                                          KVM_MEM_LOG_DIRTY_PAGES,
+                                          KVM_MEM_LOG_DIRTY_PAGES);
+}
+
+int kvm_log_stop(target_phys_addr_t phys_addr, target_phys_addr_t len)
+{
+
+        KVMState *s = kvm_state;
+        KVMSlot *mem = kvm_lookup_slot(s, phys_addr);
+
+        if (mem == NULL) {
+                fprintf(stderr, "BUG: %s: invalid parameters\n", __func__);
+                return -EINVAL;
+        }
+
+        /* Not logging, no need to issue ioctl */
+        if (!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES))
+            return 0;
+
+        dprintf("slot %d: disabling logging\n", mem->slot);
+        return kvm_dirty_pages_log_change(mem,
+                                          0,
+                                          KVM_MEM_LOG_DIRTY_PAGES);
+}
+
+static inline int lookup_bitmap_phys(unsigned long *dirty, unsigned nr)
+{
+    unsigned word = nr / (sizeof(*dirty) * 8);
+    unsigned bit = nr % (sizeof(*dirty) * 8);
+    int ret;
+
+    ret = (dirty[word] >> bit) & 1;
+    return ret;
+}
+
+/**
+ * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
+ * If a phys_offset parameter is given, this function updates qemu's dirty
+ * bitmap using cpu_physical_memory_set_dirty(). This means all bits are set
+ * to dirty.
+ *
+ * @start_add: start of logged region. This is what we use to search the 
memslot
+ * @end_addr: end of logged region. Only matters if we are updating qemu dirty 
bitmap.
+ * @phys_offset: the phys_offset we want to use for qemu dirty bitmap update. 
Passing
+ *               NULL makes the update not happen. In this case, we only grab 
the bitmap
+ *               from kernel.
+ */
+void kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, 
target_phys_addr_t end_addr,
+                                    ram_addr_t phys_offset)
+{
+    KVMState *s = kvm_state;
+    KVMDirtyLog d;
+    KVMSlot *mem = kvm_lookup_slot(s, start_addr);
+    unsigned long alloc_size = mem->memory_size >> TARGET_PAGE_BITS / 
sizeof(d.dirty_bitmap);
+    ram_addr_t addr;
+    target_phys_addr_t phys_addr;
+
+    printf("sync addr: %llx into %lx\n", start_addr, phys_offset);
+    if (mem == NULL) {
+            fprintf(stderr, "BUG: %s: invalid parameters\n", __func__);
+            return;
+    }
+
+    d.dirty_bitmap = qemu_mallocz(alloc_size);
+
+    if (d.dirty_bitmap == NULL) {
+        warning("Could not allocate dirty bitmap\n");
+        return;
+    }
+
+    d.slot = mem->slot;
+    dprintf("slot %d, phys_addr %llx, uaddr: %llx\n",
+            d.slot, mem->guest_phys_addr, mem->userspace_addr);
+
+    if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
+        warning("ioctl failed %d\n", errno);
+        goto out;
+    }
+
+    /* Caller don't want to update dirty bitmap */
+    if (!phys_offset)
+        goto out;
+
+    phys_addr = start_addr;
+    for (addr = phys_offset; phys_addr < end_addr; phys_addr+= 
TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
+        unsigned long *bitmap = (unsigned long *)d.dirty_bitmap;
+        if (lookup_bitmap_phys(bitmap, (phys_addr - start_addr) >> 
TARGET_PAGE_BITS))
+            cpu_physical_memory_set_dirty(addr);
+    }
+out:
+    qemu_free(d.dirty_bitmap);
+}
+
 int kvm_init(int smp_cpus)
 {
     KVMState *s;
diff --git a/kvm.h b/kvm.h
index 37102b4..90503e8 100644
--- a/kvm.h
+++ b/kvm.h
@@ -38,6 +38,11 @@ void kvm_set_phys_mem(target_phys_addr_t start_addr,
                       ram_addr_t size,
                       ram_addr_t phys_offset);
 
+void kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, 
target_phys_addr_t end_addr,
+                                    ram_addr_t phys_offset);
+
+int kvm_log_start(target_phys_addr_t phys_addr, target_phys_addr_t len);
+int kvm_log_stop(target_phys_addr_t phys_addr, target_phys_addr_t len);
 /* internal API */
 
 struct KVMState;
-- 
1.5.6.5





reply via email to

[Prev in Thread] Current Thread [Next in Thread]