bug-hurd
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 gnumach] smp: Create AP processor set and put all APs inside i


From: Damien Zammit
Subject: [PATCH v2 gnumach] smp: Create AP processor set and put all APs inside it
Date: Sun, 11 Feb 2024 12:00:57 +0000

This has the effect of running with one cpu only with smp,
but has the ability to enable APs in userspace with the right
processor set RPCs.
---
 ddb/db_print.c   | 10 +++++++---
 kern/machine.c   | 13 +++++++++++++
 kern/processor.c |  3 +++
 kern/processor.h |  3 +++
 4 files changed, 26 insertions(+), 3 deletions(-)

diff --git a/ddb/db_print.c b/ddb/db_print.c
index 028cb887..c8d85d26 100644
--- a/ddb/db_print.c
+++ b/ddb/db_print.c
@@ -345,10 +345,14 @@ db_show_all_runqs(
        db_expr_t       count,
        const char *    modif)
 {
-       int i;
+       int i = 0;
+       processor_set_t pset;
 
-       db_printf("Processor set runq:\t");
-       showrq(&default_pset.runq);
+       queue_iterate(&all_psets, pset, processor_set_t, all_psets) {
+               db_printf("Processor set #%d runq:\t", i);
+               showrq(&pset->runq);
+               i++;
+       }
        for (i = 0; i < smp_get_numcpus(); i++) {
            db_printf("Processor #%d runq:\t", i);
            showrq(&cpu_to_processor(i)->runq);
diff --git a/kern/machine.c b/kern/machine.c
index 87fbc4d1..7fed1246 100644
--- a/kern/machine.c
+++ b/kern/machine.c
@@ -84,6 +84,9 @@ void cpu_up(int cpu)
 
        processor = cpu_to_processor(cpu);
        pset_lock(&default_pset);
+#if    MACH_HOST
+       pset_lock(slave_pset);
+#endif
        s = splsched();
        processor_lock(processor);
 #if    NCPUS > 1
@@ -92,10 +95,20 @@ void cpu_up(int cpu)
        ms = &machine_slot[cpu];
        ms->running = TRUE;
        machine_info.avail_cpus++;
+#if    MACH_HOST
+       if (cpu == 0)
+               pset_add_processor(&default_pset, processor);
+       else
+               pset_add_processor(slave_pset, processor);
+#else
        pset_add_processor(&default_pset, processor);
+#endif
        processor->state = PROCESSOR_RUNNING;
        processor_unlock(processor);
        splx(s);
+#if    MACH_HOST
+       pset_unlock(slave_pset);
+#endif
        pset_unlock(&default_pset);
 }
 
diff --git a/kern/processor.c b/kern/processor.c
index 76735381..f06b5d62 100644
--- a/kern/processor.c
+++ b/kern/processor.c
@@ -51,6 +51,7 @@
 #if    MACH_HOST
 #include <kern/slab.h>
 struct kmem_cache pset_cache;
+struct processor_set *slave_pset;
 #endif /* MACH_HOST */
 
 
@@ -124,6 +125,8 @@ void pset_sys_init(void)
                ipc_processor_init(processor);
            }
        }
+
+       processor_set_create(&realhost, &slave_pset, &slave_pset);
 }
 #endif /* MACH_HOST */
 
diff --git a/kern/processor.h b/kern/processor.h
index fc204ffa..747badf2 100644
--- a/kern/processor.h
+++ b/kern/processor.h
@@ -85,6 +85,9 @@ struct processor_set {
        long                    sched_load;     /* load avg for scheduler */
 };
 extern struct processor_set    default_pset;
+#if    MACH_HOST
+extern struct processor_set    *slave_pset;
+#endif
 
 struct processor {
        struct run_queue runq;          /* local runq for this processor */
-- 
2.43.0





reply via email to

[Prev in Thread] Current Thread [Next in Thread]