qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v1 09/13] util/mmap-alloc: Implement resizable mmaps


From: David Hildenbrand
Subject: [PATCH v1 09/13] util/mmap-alloc: Implement resizable mmaps
Date: Mon, 3 Feb 2020 19:31:21 +0100

Implement resizable mmaps. For now, the actual resizing is not wired up.
Introduce qemu_ram_mmap_resizable() and qemu_ram_mmap_resize(). Make
qemu_ram_mmap() a wrapper of qemu_ram_mmap_resizable().

Cc: "Michael S. Tsirkin" <address@hidden>
Cc: Greg Kurz <address@hidden>
Cc: Murilo Opsfelder Araujo <address@hidden>
Cc: Eduardo Habkost <address@hidden>
Cc: "Dr. David Alan Gilbert" <address@hidden>
Signed-off-by: David Hildenbrand <address@hidden>
---
 include/qemu/mmap-alloc.h | 21 ++++++++++++-------
 util/mmap-alloc.c         | 44 ++++++++++++++++++++++++++++-----------
 2 files changed, 45 insertions(+), 20 deletions(-)

diff --git a/include/qemu/mmap-alloc.h b/include/qemu/mmap-alloc.h
index e786266b92..70bc8e9637 100644
--- a/include/qemu/mmap-alloc.h
+++ b/include/qemu/mmap-alloc.h
@@ -7,11 +7,13 @@ size_t qemu_fd_getpagesize(int fd);
 size_t qemu_mempath_getpagesize(const char *mem_path);
 
 /**
- * qemu_ram_mmap: mmap the specified file or device.
+ * qemu_ram_mmap_resizable: reserve a memory region of @max_size to mmap the
+ *                          specified file or device and mmap @size of it.
  *
  * Parameters:
  *  @fd: the file or the device to mmap
  *  @size: the number of bytes to be mmaped
+ *  @max_size: the number of bytes to be reserved
  *  @align: if not zero, specify the alignment of the starting mapping address;
  *          otherwise, the alignment in use will be determined by QEMU.
  *  @shared: map has RAM_SHARED flag.
@@ -21,12 +23,15 @@ size_t qemu_mempath_getpagesize(const char *mem_path);
  *  On success, return a pointer to the mapped area.
  *  On failure, return MAP_FAILED.
  */
-void *qemu_ram_mmap(int fd,
-                    size_t size,
-                    size_t align,
-                    bool shared,
-                    bool is_pmem);
-
-void qemu_ram_munmap(int fd, void *ptr, size_t size);
+void *qemu_ram_mmap_resizable(int fd, size_t size, size_t max_size,
+                              size_t align, bool shared, bool is_pmem);
+void *qemu_ram_mmap_resize(void *ptr, int fd, size_t old_size, size_t new_size,
+                           bool shared, bool is_pmem);
+static inline void *qemu_ram_mmap(int fd, size_t size, size_t align,
+                                  bool shared, bool is_pmem)
+{
+    return qemu_ram_mmap_resizable(fd, size, size, align, shared, is_pmem);
+}
+void qemu_ram_munmap(int fd, void *ptr, size_t max_size);
 
 #endif
diff --git a/util/mmap-alloc.c b/util/mmap-alloc.c
index 63ad6893b7..2d562145e9 100644
--- a/util/mmap-alloc.c
+++ b/util/mmap-alloc.c
@@ -172,11 +172,8 @@ static inline size_t mmap_pagesize(int fd)
 #endif
 }
 
-void *qemu_ram_mmap(int fd,
-                    size_t size,
-                    size_t align,
-                    bool shared,
-                    bool is_pmem)
+void *qemu_ram_mmap_resizable(int fd, size_t size, size_t max_size,
+                              size_t align, bool shared, bool is_pmem)
 {
     const size_t pagesize = mmap_pagesize(fd);
     size_t offset, total;
@@ -184,12 +181,14 @@ void *qemu_ram_mmap(int fd,
 
     /* we can only map whole pages */
     size = QEMU_ALIGN_UP(size, pagesize);
+    max_size = QEMU_ALIGN_UP(max_size, pagesize);
 
     /*
      * Note: this always allocates at least one extra page of virtual address
-     * space, even if size is already aligned.
+     * space, even if the size is already aligned. We will reserve an area of
+     * at least max_size, but only populate the requested part of it.
      */
-    total = size + align;
+    total = max_size + align;
 
     guardptr = mmap_reserve(0, total, fd);
     if (guardptr == MAP_FAILED) {
@@ -217,22 +216,43 @@ void *qemu_ram_mmap(int fd,
      * a guard page guarding against potential buffer overflows.
      */
     total -= offset;
-    if (total > size + pagesize) {
-        munmap(ptr + size + pagesize, total - size - pagesize);
+    if (total > max_size + pagesize) {
+        munmap(ptr + max_size + pagesize, total - max_size - pagesize);
     }
 
     return ptr;
 }
 
-void qemu_ram_munmap(int fd, void *ptr, size_t size)
+void *qemu_ram_mmap_resize(void *ptr, int fd, size_t old_size, size_t new_size,
+                           bool shared, bool is_pmem)
 {
     const size_t pagesize = mmap_pagesize(fd);
 
     /* we can only map whole pages */
-    size = QEMU_ALIGN_UP(size, pagesize);
+    old_size = QEMU_ALIGN_UP(old_size, pagesize);
+    new_size = QEMU_ALIGN_UP(new_size, pagesize);
+
+    /* we support actually resizable memory regions only on Linux */
+    if (old_size < new_size) {
+        /* populate the missing piece into the reserved area */
+        ptr = mmap_populate(ptr + old_size, new_size - old_size, fd, old_size,
+                            shared, is_pmem);
+    } else if (old_size > new_size) {
+        /* discard this piece, keeping the area reserved (should never fail) */
+        ptr = mmap_reserve(ptr + new_size, old_size - new_size, fd);
+    }
+    return ptr;
+}
+
+void qemu_ram_munmap(int fd, void *ptr, size_t max_size)
+{
+    const size_t pagesize = mmap_pagesize(fd);
+
+    /* we can only map whole pages */
+    max_size = QEMU_ALIGN_UP(max_size, pagesize);
 
     if (ptr) {
         /* Unmap both the RAM block and the guard page */
-        munmap(ptr, size + pagesize);
+        munmap(ptr, max_size + pagesize);
     }
 }
-- 
2.24.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]