[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH 11/12] i386: Fix race in multiprocessor ktss
From: |
Samuel Thibault |
Subject: |
Re: [PATCH 11/12] i386: Fix race in multiprocessor ktss |
Date: |
Mon, 31 Oct 2022 00:01:40 +0100 |
User-agent: |
NeoMutt/20170609 (1.8.3) |
Thanks for looking at it, we definitely need to fix this before enabling
SMP :D
Damien Zammit, le mar. 25 oct. 2022 10:56:32 +0000, a ecrit:
> ---
> i386/i386/io_perm.c | 5 ---
> i386/i386/pcb.c | 79 ++++++++++++++++++++++++---------------------
> 2 files changed, 43 insertions(+), 41 deletions(-)
>
> diff --git a/i386/i386/io_perm.c b/i386/i386/io_perm.c
> index 6db60f73..d9c646fe 100644
> --- a/i386/i386/io_perm.c
> +++ b/i386/i386/io_perm.c
> @@ -266,9 +266,7 @@ i386_io_perm_modify (task_t target_task, io_perm_t
> io_perm, boolean_t enable)
>
> if (!iopb)
> {
> - simple_unlock (&target_task->machine.iopb_lock);
> iopb = (unsigned char *) kmem_cache_alloc (&machine_task_iopb_cache);
> - simple_lock (&target_task->machine.iopb_lock);
? why ?
kmem_cache_alloc can be a long operation, better not keep a simple lock
during it. The safety is properly provided with the tests below with the
simple lock reacquired: if in the end an iopb was allocated by another
thread in the meanwhile, it's used and the just-allocated one freed.
> @@ -308,9 +306,6 @@ i386_io_perm_modify (task_t target_task, io_perm_t
> io_perm, boolean_t enable)
> target_task->machine.iopb_size = iopb_size;
> }
>
> -#if NCPUS>1
> -#warning SMP support missing (notify all CPUs running threads in that of the
> I/O bitmap change).
> -#endif
> if (target_task == current_task())
> update_ktss_iopb (iopb, target_task->machine.iopb_size);
>
> diff --git a/i386/i386/pcb.c b/i386/i386/pcb.c
> index 5ac487b7..4ff7309c 100644
> --- a/i386/i386/pcb.c
> +++ b/i386/i386/pcb.c
> @@ -48,6 +48,7 @@
> #include <i386/user_ldt.h>
> #include <i386/db_interface.h>
> #include <i386/fpu.h>
> +#include <i386/spl.h>
> #include "eflags.h"
> #include "gdt.h"
> #include "ldt.h"
> @@ -122,8 +123,8 @@ vm_offset_t stack_detach(thread_t thread)
> #define curr_gdt(mycpu) (mp_gdt[mycpu])
> #define curr_ktss(mycpu) (mp_ktss[mycpu])
> #else
> -#define curr_gdt(mycpu) ((void)(mycpu), gdt)
> -#define curr_ktss(mycpu) ((void)(mycpu), (struct task_tss
> *)&ktss)
> +#define curr_gdt(mycpu) (gdt)
> +#define curr_ktss(mycpu) ((struct task_tss *)&ktss)
Why? We don't want warnings about the mycpu variable being unused when
building with NCPUS == 1.
> #endif
>
> #define gdt_desc_p(mycpu,sel) \
> @@ -131,6 +132,8 @@ vm_offset_t stack_detach(thread_t thread)
>
> void switch_ktss(pcb_t pcb)
> {
> + spl_t s = splhigh();
What scenario does this protect against?
> int mycpu = cpu_number();
> {
> vm_offset_t pcb_stack_top;
> @@ -222,21 +225,20 @@ void switch_ktss(pcb_t pcb)
> pcb->ims.user_gdt, sizeof pcb->ims.user_gdt);
> #endif /* MACH_PV_DESCRIPTORS */
>
> - db_load_context(pcb);
> + db_set_debug_state(pcb, &pcb->ims.ids)
Why? We do want to load the dr registers.
>
> /*
> * Load the floating-point context, if necessary.
> */
> fpu_load_context(pcb);
>
> + splx(s);
> }
>
> -/* If NEW_IOPB is not null, the SIZE denotes the number of bytes in
> - the new bitmap. Expects iopb_lock to be held. */
> -void
> -update_ktss_iopb (unsigned char *new_iopb, io_port_t size)
> +static void
> +update_ktss_iopb_per_cpu (unsigned char *new_iopb, io_port_t size, int cpu)
Better keep the comment on this function too.
> {
> - struct task_tss *tss = curr_ktss (cpu_number ());
> + struct task_tss *tss = curr_ktss (cpu);
>
> if (new_iopb && size > 0)
> {
> @@ -249,6 +251,24 @@ update_ktss_iopb (unsigned char *new_iopb, io_port_t
> size)
> tss->tss.io_bit_map_offset = IOPB_INVAL;
> }
>
> +/* If NEW_IOPB is not null, the SIZE denotes the number of bytes in
> + the new bitmap. Expects iopb_lock to be held per TASK. */
> +void
> +update_ktss_iopb (unsigned char *new_iopb, io_port_t size)
> +{
> +#if NCPUS > 1
> + int slot;
> +
> + for (slot = 0; slot < NCPUS; slot++)
> + {
> + if (machine_slot[slot].is_cpu)
> + update_ktss_iopb_per_cpu (new_iopb, size, slot);
There is a misunderstanding here.
There are two kinds of update_ktss_iopb calls:
- calls from stack_handoff and switch_context only matter for the
current CPU: they just update the ioperm bitmap.
- calls from i386_io_perm_modify matter for all cpus which are running
the calling task.
So these two cases should be split: we don't want to change ioperms on
all cpus just because one cpu made a thread context switch.
Also, we do not want to change ioperms on all cpus, only on cpus that
happen to be running a thread of the calling task.
(and of course we have to protect both from a cpu that happens to be
switching threads, and another thread that happens to be calling
i386_io_perm_modify too).
> + }
> +#else
> + update_ktss_iopb_per_cpu (new_iopb, size, cpu_number());
> +#endif
> +}
> +
> /*
> * stack_handoff:
> *
> @@ -259,8 +279,12 @@ void stack_handoff(
> thread_t old,
> thread_t new)
> {
> - int mycpu = cpu_number();
> vm_offset_t stack;
> + task_t old_task, new_task;
> + int mycpu;
> + spl_t s;
> +
> + mycpu = cpu_number();
>
> /*
> * Save FP registers if in use.
> @@ -270,8 +294,6 @@ void stack_handoff(
> /*
> * Switch address maps if switching tasks.
> */
> - {
> - task_t old_task, new_task;
>
> if ((old_task = old->task) != (new_task = new->task)) {
> PMAP_DEACTIVATE_USER(vm_map_pmap(old_task->map),
> @@ -279,20 +301,12 @@ void stack_handoff(
> PMAP_ACTIVATE_USER(vm_map_pmap(new_task->map),
> new, mycpu);
>
> + s = splhigh();
What scenario does this protect against?
> simple_lock (&new_task->machine.iopb_lock);
> -#if NCPUS>1
> -#warning SMP support missing (avoid races with io_perm_modify).
> -#else
> - /* This optimization only works on a single processor
> - machine, where old_task's iopb can not change while
> - we are switching. */
> - if (old_task->machine.iopb || new_task->machine.iopb)
> -#endif
> - update_ktss_iopb (new_task->machine.iopb,
> - new_task->machine.iopb_size);
> + update_ktss_iopb (new_task->machine.iopb,
> new_task->machine.iopb_size);
> simple_unlock (&new_task->machine.iopb_lock);
> + splx(s);
> }
> - }
>
> /*
> * Load the rest of the user state for the new thread
> @@ -335,6 +349,10 @@ thread_t switch_context(
> void (*continuation)(),
> thread_t new)
> {
> + task_t old_task, new_task;
> + int mycpu = cpu_number();
> + spl_t s;
> +
> /*
> * Save FP registers if in use.
> */
> @@ -343,9 +361,6 @@ thread_t switch_context(
> /*
> * Switch address maps if switching tasks.
> */
> - {
> - task_t old_task, new_task;
> - int mycpu = cpu_number();
>
> if ((old_task = old->task) != (new_task = new->task)) {
> PMAP_DEACTIVATE_USER(vm_map_pmap(old_task->map),
> @@ -353,20 +368,12 @@ thread_t switch_context(
> PMAP_ACTIVATE_USER(vm_map_pmap(new_task->map),
> new, mycpu);
>
> + s = splhigh();
> simple_lock (&new_task->machine.iopb_lock);
> -#if NCPUS>1
> -#warning SMP support missing (avoid races with io_perm_modify).
> -#else
> - /* This optimization only works on a single processor
> - machine, where old_task's iopb can not change while
> - we are switching. */
> - if (old_task->machine.iopb || new_task->machine.iopb)
> -#endif
> - update_ktss_iopb (new_task->machine.iopb,
> - new_task->machine.iopb_size);
> + update_ktss_iopb (new_task->machine.iopb,
> new_task->machine.iopb_size);
> simple_unlock (&new_task->machine.iopb_lock);
> + splx(s);
> }
> - }
>
> /*
> * Load the rest of the user state for the new thread
> --
> 2.34.1
>
>
- Re: [PATCH 06/12] linux drivers: Don't depend on curr_pic_mask for APIC, (continued)
- [PATCH 10/12] Add cpu_number and cpuboot, Damien Zammit, 2022/10/25
- [PATCH 11/12] i386: Fix race in multiprocessor ktss, Damien Zammit, 2022/10/25
- Re: [PATCH 11/12] i386: Fix race in multiprocessor ktss,
Samuel Thibault <=
- [PATCH 12/12] i386: Refactor int stacks to be per cpu for SMP, Damien Zammit, 2022/10/25
- Re: [PATCH 0/12 - gnumach] SMP almost working!, Almudena Garcia, 2022/10/25
- Re: [PATCH 0/12 - gnumach] SMP almost working!, Almudena Garcia, 2022/10/25