qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [patch 03/18] qemu: dynamic drive/drive_opt index allocatio


From: Marcelo Tosatti
Subject: [Qemu-devel] [patch 03/18] qemu: dynamic drive/drive_opt index allocation
Date: Wed, 04 Feb 2009 11:33:06 -0200
User-agent: quilt/0.46-1

Dynamically allocate drive options and drive table index, to reuse
indexes when devices are removed.

Signed-off-by: Marcelo Tosatti <address@hidden>

Index: trunk/sysemu.h
===================================================================
--- trunk.orig/sysemu.h
+++ trunk/sysemu.h
@@ -134,6 +134,7 @@ typedef struct DriveInfo {
     BlockInterfaceType type;
     int bus;
     int unit;
+    int used;
     BlockInterfaceErrorAction onerror;
     char serial[21];
 } DriveInfo;
Index: trunk/vl.c
===================================================================
--- trunk.orig/vl.c
+++ trunk/vl.c
@@ -246,6 +246,7 @@ static int nb_drives_opt;
 static struct drive_opt {
     const char *file;
     char opt[1024];
+    int used;
 } drives_opt[MAX_DRIVES];
 
 static CPUState *cur_cpu;
@@ -2139,22 +2140,50 @@ static int bt_parse(const char *opt)
 #define MTD_ALIAS "if=mtd"
 #define SD_ALIAS "index=0,if=sd"
 
+static int drive_opt_get_free_idx(void)
+{
+    int index;
+
+    for (index = 0; index < MAX_DRIVES; index++)
+        if (!drives_opt[index].used) {
+            drives_opt[index].used = 1;
+            return index;
+        }
+
+    return -1;
+}
+
+static int drive_get_free_idx(void)
+{
+    int index;
+
+    for (index = 0; index < MAX_DRIVES; index++)
+        if (!drives_table[index].used) {
+            drives_table[index].used = 1;
+            return index;
+        }
+
+    return -1;
+}
+
 static int drive_add(const char *file, const char *fmt, ...)
 {
     va_list ap;
+    int index = drive_opt_get_free_idx();
 
-    if (nb_drives_opt >= MAX_DRIVES) {
+    if (nb_drives_opt >= MAX_DRIVES || index == -1) {
         fprintf(stderr, "qemu: too many drives\n");
         exit(1);
     }
 
-    drives_opt[nb_drives_opt].file = file;
+    drives_opt[index].file = file;
     va_start(ap, fmt);
-    vsnprintf(drives_opt[nb_drives_opt].opt,
+    vsnprintf(drives_opt[index].opt,
               sizeof(drives_opt[0].opt), fmt, ap);
     va_end(ap);
 
-    return nb_drives_opt++;
+    nb_drives_opt++;
+    return index;
 }
 
 int drive_get_index(BlockInterfaceType type, int bus, int unit)
@@ -2163,10 +2192,11 @@ int drive_get_index(BlockInterfaceType t
 
     /* seek interface, bus and unit */
 
-    for (index = 0; index < nb_drives; index++)
+    for (index = 0; index < MAX_DRIVES; index++)
         if (drives_table[index].type == type &&
            drives_table[index].bus == bus &&
-           drives_table[index].unit == unit)
+           drives_table[index].unit == unit &&
+           drives_table[index].used)
         return index;
 
     return -1;
@@ -2231,6 +2261,7 @@ static int drive_init(struct drive_opt *
     int index;
     int cache;
     int bdrv_flags, onerror;
+    int drives_table_idx;
     char *str = arg->opt;
     static const char * const params[] = { "bus", "unit", "if", "index",
                                            "cyls", "heads", "secs", "trans",
@@ -2505,11 +2536,12 @@ static int drive_init(struct drive_opt *
         snprintf(buf, sizeof(buf), "%s%s%i",
                  devname, mediastr, unit_id);
     bdrv = bdrv_new(buf);
-    drives_table[nb_drives].bdrv = bdrv;
-    drives_table[nb_drives].type = type;
-    drives_table[nb_drives].bus = bus_id;
-    drives_table[nb_drives].unit = unit_id;
-    drives_table[nb_drives].onerror = onerror;
+    drives_table_idx = drive_get_free_idx();
+    drives_table[drives_table_idx].bdrv = bdrv;
+    drives_table[drives_table_idx].type = type;
+    drives_table[drives_table_idx].bus = bus_id;
+    drives_table[drives_table_idx].unit = unit_id;
+    drives_table[drives_table_idx].onerror = onerror;
     strncpy(drives_table[nb_drives].serial, serial, sizeof(serial));
     nb_drives++;
 

-- 





reply via email to

[Prev in Thread] Current Thread [Next in Thread]