[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH 05/10] linux-user: init_guest_space: Clarify page al
From: |
Luke Shumaker |
Subject: |
[Qemu-devel] [PATCH 05/10] linux-user: init_guest_space: Clarify page alignment logic |
Date: |
Thu, 28 Dec 2017 13:08:08 -0500 |
From: Luke Shumaker <address@hidden>
There are 3 parts to this change:
- Add a comment showing the relative sizes and positions of the blocks of
memory
- introduce and use new aligned_{start,size} instead of adjusting
real_{start_size}
- When we clean up (on failure), munmap(real_start, real_size) instead of
munmap(aligned_start, aligned_size). It *shouldn't* make any
difference, but I will admit that this does mean we are making the
syscall with different values, so this isn't quite a no-op patch.
Signed-off-by: Luke Shumaker <address@hidden>
---
linux-user/elfload.c | 43 +++++++++++++++++++++++++++++++++----------
1 file changed, 33 insertions(+), 10 deletions(-)
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index f41cecc3cb..22f2632dfa 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -1827,7 +1827,7 @@ unsigned long init_guest_space(unsigned long host_start,
unsigned long guest_start,
bool fixed)
{
- unsigned long current_start, real_start;
+ unsigned long current_start, aligned_start;
int flags;
assert(host_start || host_size);
@@ -1853,7 +1853,8 @@ unsigned long init_guest_space(unsigned long host_start,
/* Otherwise, a non-zero size region of memory needs to be mapped
* and validated. */
while (1) {
- unsigned long real_size = host_size;
+ unsigned long real_start, real_size, aligned_size;
+ aligned_size = real_size = host_size;
/* Do not use mmap_find_vma here because that is limited to the
* guest address space. We are going to make the
@@ -1867,26 +1868,48 @@ unsigned long init_guest_space(unsigned long host_start,
/* Ensure the address is properly aligned. */
if (real_start & ~qemu_host_page_mask) {
+ /* Ideally, we adjust like
+ *
+ * pages: [ ][ ][ ][ ][ ]
+ * old: [ real ]
+ * [ aligned ]
+ * new: [ real ]
+ * [ aligned ]
+ *
+ * But if there is something else mapped right after it,
+ * then obviously it won't have room to grow, and the
+ * kernel will put the new larger real someplace else with
+ * unknown alignment (if we made it to here, then
+ * fixed=false). Which is why we grow real by a full page
+ * size, instead of by part of one; so that even if we get
+ * moved, we can still guarantee alignment. But this does
+ * mean that there is a padding of < 1 page both before
+ * and after the aligned range; the "after" could could
+ * cause problems for ARM emulation where it could butt in
+ * to where we need to put the commpage.
+ */
munmap((void *)real_start, host_size);
- real_size = host_size + qemu_host_page_size;
+ real_size = aligned_size + qemu_host_page_size;
real_start = (unsigned long)
mmap((void *)real_start, real_size, PROT_NONE, flags, -1, 0);
if (real_start == (unsigned long)-1) {
return (unsigned long)-1;
}
- real_start = HOST_PAGE_ALIGN(real_start);
+ aligned_start = HOST_PAGE_ALIGN(real_start);
+ } else {
+ aligned_start = real_start;
}
/* Check to see if the address is valid. */
- if (!host_start || real_start == current_start) {
+ if (!host_start || aligned_start == current_start) {
#if defined(TARGET_ARM) && !defined(TARGET_AARCH64)
/* On 32-bit ARM, we need to also be able to map the commpage. */
- int valid = init_guest_commpage(real_start - guest_start,
- real_size + guest_start);
+ int valid = init_guest_commpage(aligned_start - guest_start,
+ aligned_size + guest_start);
if (valid == 1) {
break;
} else if (valid == -1) {
- munmap((void *)real_start, host_size);
+ munmap((void *)real_start, real_size);
return (unsigned long)-1;
}
/* valid == 0, so try again. */
@@ -1905,7 +1928,7 @@ unsigned long init_guest_space(unsigned long host_start,
* address space randomization put a shared library somewhere
* inconvenient.
*/
- munmap((void *)real_start, host_size);
+ munmap((void *)real_start, real_size);
current_start += qemu_host_page_size;
if (host_start == current_start) {
/* Theoretically possible if host doesn't have any suitably
@@ -1917,7 +1940,7 @@ unsigned long init_guest_space(unsigned long host_start,
qemu_log_mask(CPU_LOG_PAGE, "Reserved 0x%lx bytes of guest address
space\n", host_size);
- return real_start;
+ return aligned_start;
}
static void probe_guest_base(const char *image_name,
--
2.15.1
- [Qemu-devel] [PATCH 00/10] linux-user: Speed up guest space initialization on 32-bit ARM target, Luke Shumaker, 2017/12/28
- [Qemu-devel] [PATCH 02/10] linux-user: Rename validate_guest_space => init_guest_commpage, Luke Shumaker, 2017/12/28
- [Qemu-devel] [PATCH 06/10] linux-user: init_guest_commpage: Add a comment about size check, Luke Shumaker, 2017/12/28
- [Qemu-devel] [PATCH 05/10] linux-user: init_guest_space: Clarify page alignment logic,
Luke Shumaker <=
- [Qemu-devel] [PATCH 04/10] linux-user: init_guest_space: Correctly handle guest_start in commpage initialization, Luke Shumaker, 2017/12/28
- [Qemu-devel] [PATCH 03/10] linux-user: init_guest_space: Clean up if we can't initialize the commpage, Luke Shumaker, 2017/12/28
- [Qemu-devel] [PATCH 08/10] linux-user: init_guest_space: Don't try to align if we'll reject it, Luke Shumaker, 2017/12/28
- [Qemu-devel] [PATCH 10/10] linux-user: init_guest_space: Try to make ARM space+commpage continuous, Luke Shumaker, 2017/12/28
- [Qemu-devel] [PATCH 01/10] linux-user: Use #if to only call validate_guest_space for 32-bit ARM target, Luke Shumaker, 2017/12/28
- [Qemu-devel] [PATCH 07/10] linux-user: init_guest_space: Clean up control flow a bit, Luke Shumaker, 2017/12/28
- [Qemu-devel] [PATCH 09/10] linux-user: init_guest_space: Add a comment about search strategy, Luke Shumaker, 2017/12/28