From 8170da230f0253f143b6b18e5349c9032a3324bb Mon Sep 17 00:00:00 2001 From: Pawel Wieczorkiewicz Date: Thu, 29 Jun 2023 12:33:44 +0200 Subject: [PATCH 1/5] cpu: add complementing interfaces for cpu run states control Signed-off-by: Pawel Wieczorkiewicz --- common/cpu.c | 14 ++++++++++++++ include/cpu.h | 10 ++++++++++ 2 files changed, 24 insertions(+) diff --git a/common/cpu.c b/common/cpu.c index 0a915878..4c63712f 100644 --- a/common/cpu.c +++ b/common/cpu.c @@ -113,6 +113,20 @@ void unblock_all_cpus(void) { set_cpu_unblocked(cpu); } +void block_all_cpus(void) { + cpu_t *cpu; + + list_for_each_entry (cpu, &cpus, list) + set_cpu_blocked(cpu); +} + +void finish_all_cpus(void) { + cpu_t *cpu; + + list_for_each_entry (cpu, &cpus, list) + set_cpu_finished(cpu); +} + void wait_for_all_cpus(void) { cpu_t *cpu, *safe; diff --git a/include/cpu.h b/include/cpu.h index 4b68761b..ed979bfb 100644 --- a/include/cpu.h +++ b/include/cpu.h @@ -60,6 +60,8 @@ extern cpu_t *get_bsp_cpu(void); extern unsigned int get_nr_cpus(void); extern void for_each_cpu(void (*func)(cpu_t *cpu)); extern void unblock_all_cpus(void); +extern void block_all_cpus(void); +extern void finish_all_cpus(void); extern void wait_for_all_cpus(void); /* Static declarations */ @@ -76,6 +78,10 @@ static inline void set_cpu_finished(cpu_t *cpu) { atomic_test_and_set_bit(CPU_FINISHED, &cpu->run_state); } +static inline void set_cpu_unfinished(cpu_t *cpu) { + atomic_test_and_reset_bit(CPU_FINISHED, &cpu->run_state); +} + static inline bool is_cpu_unblocked(cpu_t *cpu) { return atomic_test_bit(CPU_UNBLOCKED, &cpu->run_state); } @@ -84,6 +90,10 @@ static inline void set_cpu_unblocked(cpu_t *cpu) { atomic_test_and_set_bit(CPU_UNBLOCKED, &cpu->run_state); } +static inline void set_cpu_blocked(cpu_t *cpu) { + atomic_test_and_reset_bit(CPU_UNBLOCKED, &cpu->run_state); +} + static inline void wait_cpu_unblocked(cpu_t *cpu) { while (!is_cpu_unblocked(cpu)) cpu_relax(); From 8541e035a8ea0c7b9b464fc7e5c28ba0c883309b Mon Sep 17 00:00:00 2001 From: Pawel Wieczorkiewicz Date: Fri, 21 Jul 2023 12:45:43 +0200 Subject: [PATCH 2/5] kernel: don't run test_main() as a task Running test_main() as a task on BSP blocks scheduling and execution of other tasks on BSP (chicken-egg problem), which get created and scheduled from within the test_main() itself. Signed-off-by: Pawel Wieczorkiewicz --- common/kernel.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/common/kernel.c b/common/kernel.c index 6758391b..e81675cc 100644 --- a/common/kernel.c +++ b/common/kernel.c @@ -43,9 +43,6 @@ static void __noreturn echo_loop(void) { } void kernel_main(void) { - task_t *tests_task; - cpu_t *cpu; - printk("\nKTF - Kernel Test Framework!\n"); zap_boot_mappings(); @@ -54,11 +51,7 @@ void kernel_main(void) { display_multiboot_mmap(); } - tests_task = new_kernel_task("tests", test_main, NULL); - cpu = get_bsp_cpu(); - - schedule_task(tests_task, cpu); - + test_main(NULL); run_tasks(cpu); unblock_all_cpus(); wait_for_all_cpus(); From 21ed82966113b0a32329298c2a6dca67b7b8f1ea Mon Sep 17 00:00:00 2001 From: Pawel Wieczorkiewicz Date: Fri, 21 Jul 2023 12:51:21 +0200 Subject: [PATCH 3/5] cpu: don't unlink cpu after they are finished In the wait_for_all_cpus() wait for all unfinished CPUs and/or CPUs without empty task_queue. But do not wait for BSP as it typically is the expected caller of this function. Also, do not unlink CPU struct from the cpus list, there might be more task upcoming. Signed-off-by: Pawel Wieczorkiewicz --- common/cpu.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/common/cpu.c b/common/cpu.c index 4c63712f..7f01cca4 100644 --- a/common/cpu.c +++ b/common/cpu.c @@ -128,15 +128,13 @@ void finish_all_cpus(void) { } void wait_for_all_cpus(void) { - cpu_t *cpu, *safe; - - do { - list_for_each_entry_safe (cpu, safe, &cpus, list) { - if (is_cpu_finished(cpu)) { - spin_lock(&cpu->lock); - list_unlink(&cpu->list); - spin_unlock(&cpu->lock); - } - } - } while (!list_is_empty(&cpus)); + cpu_t *cpu; + + list_for_each_entry (cpu, &cpus, list) { + if (cpu->bsp) + continue; + + while (!is_cpu_finished(cpu) || !list_is_empty(&cpu->task_queue)) + cpu_relax(); + } } From 8e8bbe1a07bfc0e26f79247ce874ad5508578dfb Mon Sep 17 00:00:00 2001 From: Pawel Wieczorkiewicz Date: Fri, 21 Jul 2023 12:58:34 +0200 Subject: [PATCH 4/5] sched: add simple CPU task execution synchronization barrier All AP CPUs after empting their task_queue get into blocked state, which makes run_tasks() wait on them until they get unblocked. The BSP is never waited on as it is supposed to unblock the AP CPUs. After an unblock, run_tasks() sets the CPU into unfinished state, before executing any tasks from the task_queue. That way BSP controls when all APs start executing their tasks and when they are finished with it. Signed-off-by: Pawel Wieczorkiewicz --- common/sched.c | 6 +++++- smp/smp.c | 4 +--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/common/sched.c b/common/sched.c index 1da14b5e..4d8fa50f 100644 --- a/common/sched.c +++ b/common/sched.c @@ -217,7 +217,9 @@ void wait_for_task_group(const cpu_t *cpu, task_group_t group) { void run_tasks(cpu_t *cpu) { task_t *task, *safe; - wait_cpu_unblocked(cpu); + if (!cpu->bsp) + wait_cpu_unblocked(cpu); + set_cpu_unfinished(cpu); do { list_for_each_entry_safe (task, safe, &cpu->task_queue, list) { @@ -238,5 +240,7 @@ void run_tasks(cpu_t *cpu) { } } while (!list_is_empty(&cpu->task_queue)); + if (!cpu->bsp) + set_cpu_blocked(cpu); set_cpu_finished(cpu); } diff --git a/smp/smp.c b/smp/smp.c index 47457119..438ee89a 100644 --- a/smp/smp.c +++ b/smp/smp.c @@ -64,10 +64,8 @@ void __noreturn ap_startup(void) { if (opt_fpu) enable_fpu(); - run_tasks(cpu); - while (true) - halt(); + run_tasks(cpu); UNREACHABLE(); } From 3dbbe7a6c9ab914a65cdbd64360de42e44acbd40 Mon Sep 17 00:00:00 2001 From: Pawel Wieczorkiewicz Date: Fri, 21 Jul 2023 13:02:44 +0200 Subject: [PATCH 5/5] sched: add execute_tasks() helper function The execute_tasks() helper is responsible for collectively unblocking all CPUs, calling run_tasks() on BSP CPU and waiting for all AP CPUs untile they are finished. In kernel_main the execute_tasks() is called before test_main() just in case. Currently there are no tasks scheduled before the test_main(). The execute_tasks() called after the test_main() makes sure all APs and the BSP get unblocked and their tasks executed or the states of the CPUs are set to finished. Signed-off-by: Pawel Wieczorkiewicz --- common/kernel.c | 8 ++++---- include/sched.h | 5 +++++ tests/test.c | 2 ++ 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/common/kernel.c b/common/kernel.c index e81675cc..3ab05eb6 100644 --- a/common/kernel.c +++ b/common/kernel.c @@ -51,13 +51,13 @@ void kernel_main(void) { display_multiboot_mmap(); } - test_main(NULL); - run_tasks(cpu); - unblock_all_cpus(); - wait_for_all_cpus(); + execute_tasks(); + test_main(NULL); printk("All tasks done.\n"); + execute_tasks(); + #ifdef KTF_PMU pfm_terminate(); #endif diff --git a/include/sched.h b/include/sched.h index d40af72e..2e4af358 100644 --- a/include/sched.h +++ b/include/sched.h @@ -105,4 +105,9 @@ static inline task_t *new_user_task(const char *name, task_func_t func, void *ar return new_task(name, func, arg, TASK_TYPE_USER); } +static inline void execute_tasks(void) { + unblock_all_cpus(); + run_tasks(get_bsp_cpu()); + wait_for_all_cpus(); +} #endif /* KTF_SCHED_H */ diff --git a/tests/test.c b/tests/test.c index 31295511..90ac8d9f 100644 --- a/tests/test.c +++ b/tests/test.c @@ -71,6 +71,8 @@ unsigned long test_main(void *unused) { printk("Running test: %s\n", name); rc = fn(NULL); + execute_tasks(); + printk("Test %s returned: 0x%x\n", name, rc); n++; }