[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 07/10] target/i386/tcg: Use DPL-level accesses for interrupts and
From: |
Paolo Bonzini |
Subject: |
[PATCH 07/10] target/i386/tcg: Use DPL-level accesses for interrupts and call gates |
Date: |
Wed, 10 Jul 2024 08:29:17 +0200 |
This fixes a bug wherein i386/tcg assumed an interrupt return using
the CALL or JMP instructions were always going from kernel or user mode to
kernel mode, when using a call gate. This assumption is violated if
the call gate has a DPL that is greater than 0.
In addition, the stack accesses should count as explicit, not implicit
("kernel" in QEMU code), so that SMAP is not applied if DPL=3.
Analyzed-by: Robert R. Henry <rrh.henry@gmail.com>
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/249
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
target/i386/tcg/seg_helper.c | 13 ++++++-------
1 file changed, 6 insertions(+), 7 deletions(-)
diff --git a/target/i386/tcg/seg_helper.c b/target/i386/tcg/seg_helper.c
index 07e3667639a..1430f477c43 100644
--- a/target/i386/tcg/seg_helper.c
+++ b/target/i386/tcg/seg_helper.c
@@ -678,7 +678,7 @@ static void do_interrupt_protected(CPUX86State *env, int
intno, int is_int,
sa.env = env;
sa.ra = 0;
- sa.mmu_index = cpu_mmu_index_kernel(env);
+ sa.mmu_index = x86_mmu_index_pl(env, dpl);
if (type == 5) {
/* task gate */
@@ -984,7 +984,7 @@ static void do_interrupt64(CPUX86State *env, int intno, int
is_int,
sa.env = env;
sa.ra = 0;
- sa.mmu_index = cpu_mmu_index_kernel(env);
+ sa.mmu_index = x86_mmu_index_pl(env, dpl);
sa.sp_mask = -1;
sa.ss_base = 0;
if (dpl < cpl || ist != 0) {
@@ -1119,7 +1119,7 @@ static void do_interrupt_real(CPUX86State *env, int
intno, int is_int,
sa.sp = env->regs[R_ESP];
sa.sp_mask = 0xffff;
sa.ss_base = env->segs[R_SS].base;
- sa.mmu_index = cpu_mmu_index_kernel(env);
+ sa.mmu_index = x86_mmu_index_pl(env, 0);
if (is_int) {
old_eip = next_eip;
@@ -1583,7 +1583,7 @@ void helper_lcall_real(CPUX86State *env, uint32_t new_cs,
uint32_t new_eip,
sa.sp = env->regs[R_ESP];
sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
sa.ss_base = env->segs[R_SS].base;
- sa.mmu_index = cpu_mmu_index_kernel(env);
+ sa.mmu_index = x86_mmu_index_pl(env, 0);
if (shift) {
pushl(&sa, env->segs[R_CS].selector);
@@ -1619,17 +1619,17 @@ void helper_lcall_protected(CPUX86State *env, int
new_cs, target_ulong new_eip,
raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
cpl = env->hflags & HF_CPL_MASK;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
LOG_PCALL("desc=%08x:%08x\n", e1, e2);
sa.env = env;
sa.ra = GETPC();
- sa.mmu_index = cpu_mmu_index_kernel(env);
+ sa.mmu_index = x86_mmu_index_pl(env, dpl);
if (e2 & DESC_S_MASK) {
if (!(e2 & DESC_CS_MASK)) {
raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
- dpl = (e2 >> DESC_DPL_SHIFT) & 3;
if (e2 & DESC_C_MASK) {
/* conforming code segment */
if (dpl > cpl) {
@@ -1691,7 +1691,6 @@ void helper_lcall_protected(CPUX86State *env, int new_cs,
target_ulong new_eip,
} else {
/* check gate type */
type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
- dpl = (e2 >> DESC_DPL_SHIFT) & 3;
rpl = new_cs & 3;
#ifdef TARGET_X86_64
--
2.45.2
- [PATCH 00/10] target/i386/tcg: fixes for seg_helper.c, Paolo Bonzini, 2024/07/10
- [PATCH 02/10] target/i386/tcg: Allow IRET from user mode to user mode with SMAP, Paolo Bonzini, 2024/07/10
- [PATCH 01/10] target/i386/tcg: Remove SEG_ADDL, Paolo Bonzini, 2024/07/10
- [PATCH 03/10] target/i386/tcg: use PUSHL/PUSHW for error code, Paolo Bonzini, 2024/07/10
- [PATCH 04/10] target/i386/tcg: Reorg push/pop within seg_helper.c, Paolo Bonzini, 2024/07/10
- [PATCH 05/10] target/i386/tcg: Introduce x86_mmu_index_{kernel_,}pl, Paolo Bonzini, 2024/07/10
- [PATCH 06/10] target/i386/tcg: Compute MMU index once, Paolo Bonzini, 2024/07/10
- [PATCH 07/10] target/i386/tcg: Use DPL-level accesses for interrupts and call gates,
Paolo Bonzini <=
- [PATCH 08/10] target/i386/tcg: check for correct busy state before switching to a new task, Paolo Bonzini, 2024/07/10
- [PATCH 09/10] target/i386/tcg: use X86Access for TSS access, Paolo Bonzini, 2024/07/10
[PATCH 10/10] target/i386/tcg: save current task state before loading new one, Paolo Bonzini, 2024/07/10