qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PULL 20/57] Return path: Send responses from destination t


From: Juan Quintela
Subject: [Qemu-devel] [PULL 20/57] Return path: Send responses from destination to source
Date: Mon, 9 Nov 2015 18:28:24 +0100

From: "Dr. David Alan Gilbert" <address@hidden>

Add migrate_send_rp_message to send a message from destination to source along 
the return path.
  (It uses a mutex to let it be called from multiple threads)
Add migrate_send_rp_shut to send a 'shut' message to indicate
  the destination is finished with the RP.
Add migrate_send_rp_ack to send a 'PONG' message in response to a PING
  Use it in the MSG_RP_PING handler

Signed-off-by: Dr. David Alan Gilbert <address@hidden>
Reviewed-by: Amit Shah <address@hidden>
Reviewed-by: Juan Quintela <address@hidden>
Signed-off-by: Juan Quintela <address@hidden>
---
 include/migration/migration.h | 19 ++++++++++++++++++
 migration/migration.c         | 45 +++++++++++++++++++++++++++++++++++++++++++
 migration/savevm.c            |  2 +-
 trace-events                  |  1 +
 4 files changed, 66 insertions(+), 1 deletion(-)

diff --git a/include/migration/migration.h b/include/migration/migration.h
index 98a6d07..3ce3fda 100644
--- a/include/migration/migration.h
+++ b/include/migration/migration.h
@@ -43,12 +43,22 @@ struct MigrationParams {
     bool shared;
 };

+/* Messages sent on the return path from destination to source */
+enum mig_rp_message_type {
+    MIG_RP_MSG_INVALID = 0,  /* Must be 0 */
+    MIG_RP_MSG_SHUT,         /* sibling will not send any more RP messages */
+    MIG_RP_MSG_PONG,         /* Response to a PING; data (seq: be32 ) */
+
+    MIG_RP_MSG_MAX
+};
+
 typedef QLIST_HEAD(, LoadStateEntry) LoadStateEntry_Head;
 /* State for the incoming migration */
 struct MigrationIncomingState {
     QEMUFile *from_src_file;

     QEMUFile *to_src_file;
+    QemuMutex rp_mutex;    /* We send replies from multiple threads */

     /* See savevm.c */
     LoadStateEntry_Head loadvm_handlers;
@@ -181,6 +191,15 @@ int migrate_compress_threads(void);
 int migrate_decompress_threads(void);
 bool migrate_use_events(void);

+/* Sending on the return path - generic and then for each message type */
+void migrate_send_rp_message(MigrationIncomingState *mis,
+                             enum mig_rp_message_type message_type,
+                             uint16_t len, void *data);
+void migrate_send_rp_shut(MigrationIncomingState *mis,
+                          uint32_t value);
+void migrate_send_rp_pong(MigrationIncomingState *mis,
+                          uint32_t value);
+
 void ram_control_before_iterate(QEMUFile *f, uint64_t flags);
 void ram_control_after_iterate(QEMUFile *f, uint64_t flags);
 void ram_control_load_hook(QEMUFile *f, uint64_t flags, void *data);
diff --git a/migration/migration.c b/migration/migration.c
index bb7dcb9..8380e2f 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -97,6 +97,7 @@ MigrationIncomingState 
*migration_incoming_state_new(QEMUFile* f)
     mis_current = g_new0(MigrationIncomingState, 1);
     mis_current->from_src_file = f;
     QLIST_INIT(&mis_current->loadvm_handlers);
+    qemu_mutex_init(&mis_current->rp_mutex);

     return mis_current;
 }
@@ -344,6 +345,50 @@ void process_incoming_migration(QEMUFile *f)
     qemu_coroutine_enter(co, f);
 }

+/*
+ * Send a message on the return channel back to the source
+ * of the migration.
+ */
+void migrate_send_rp_message(MigrationIncomingState *mis,
+                             enum mig_rp_message_type message_type,
+                             uint16_t len, void *data)
+{
+    trace_migrate_send_rp_message((int)message_type, len);
+    qemu_mutex_lock(&mis->rp_mutex);
+    qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
+    qemu_put_be16(mis->to_src_file, len);
+    qemu_put_buffer(mis->to_src_file, data, len);
+    qemu_fflush(mis->to_src_file);
+    qemu_mutex_unlock(&mis->rp_mutex);
+}
+
+/*
+ * Send a 'SHUT' message on the return channel with the given value
+ * to indicate that we've finished with the RP.  Non-0 value indicates
+ * error.
+ */
+void migrate_send_rp_shut(MigrationIncomingState *mis,
+                          uint32_t value)
+{
+    uint32_t buf;
+
+    buf = cpu_to_be32(value);
+    migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
+}
+
+/*
+ * Send a 'PONG' message on the return channel with the given value
+ * (normally in response to a 'PING')
+ */
+void migrate_send_rp_pong(MigrationIncomingState *mis,
+                          uint32_t value)
+{
+    uint32_t buf;
+
+    buf = cpu_to_be32(value);
+    migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
+}
+
 /* amount of nanoseconds we are willing to wait for migration to be down.
  * the choice of nanoseconds is because it is the maximum resolution that
  * get_clock() can achieve. It is an internal measure. All user-visible
diff --git a/migration/savevm.c b/migration/savevm.c
index ea1d1b1..1a29a31 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1102,7 +1102,7 @@ static int loadvm_process_command(QEMUFile *f)
                          tmp32);
             return -1;
         }
-        /* migrate_send_rp_pong(mis, tmp32); TODO: gets added later */
+        migrate_send_rp_pong(mis, tmp32);
         break;
     }

diff --git a/trace-events b/trace-events
index 500f1e3..ca5909b 100644
--- a/trace-events
+++ b/trace-events
@@ -1431,6 +1431,7 @@ migrate_fd_cleanup(void) ""
 migrate_fd_error(void) ""
 migrate_fd_cancel(void) ""
 migrate_pending(uint64_t size, uint64_t max) "pending size %" PRIu64 " max %" 
PRIu64
+migrate_send_rp_message(int msg_type, uint16_t len) "%d: len %d"
 migrate_transferred(uint64_t tranferred, uint64_t time_spent, double 
bandwidth, uint64_t size) "transferred %" PRIu64 " time_spent %" PRIu64 " 
bandwidth %g max_size %" PRId64
 migrate_state_too_big(void) ""
 migrate_global_state_post_load(const char *state) "loaded state: %s"
-- 
2.5.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]