From 84708e9a6186d5e757d086d21fecdbb454e047be Mon Sep 17 00:00:00 2001 From: erlingrj Date: Wed, 15 Feb 2023 20:27:42 +0100 Subject: [PATCH 1/6] If NUMBER_OF_WORKERS is not defined or 0, redefine to 1. --- core/platform/lf_zephyr_support.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/core/platform/lf_zephyr_support.c b/core/platform/lf_zephyr_support.c index 070512e37..8230309aa 100644 --- a/core/platform/lf_zephyr_support.c +++ b/core/platform/lf_zephyr_support.c @@ -337,6 +337,13 @@ int lf_notify_of_event() { #define _LF_STACK_SIZE 1024 // FIXME: What is an appropriate thread prio? #define _LF_THREAD_PRIORITY 5 + +// If NUMBER_OF_WORKERS is not specified, or specified to 0. Then we default to 1 +#if !defined(NUMBER_OF_WORKERS) || NUMBER_OF_WORKERS==0 +#undef NUMBER_OF_WORKERS +#define NUMBER_OF_WORKERS 1 +#endif + static K_THREAD_STACK_ARRAY_DEFINE(stacks, NUMBER_OF_WORKERS, _LF_STACK_SIZE); static struct k_thread threads[NUMBER_OF_WORKERS]; @@ -367,7 +374,8 @@ int lf_available_cores() { int lf_thread_create(lf_thread_t* thread, void *(*lf_thread) (void *), void* arguments) { // Use static id to map each created thread to a static int tid = 0; - + + // Make sure we dont try to create too many threads if (tid > (NUMBER_OF_WORKERS-1)) { return -1; } From 400c43b139449eab39d703ed2e9a59d08189f4c8 Mon Sep 17 00:00:00 2001 From: erlingrj Date: Wed, 15 Feb 2023 20:27:56 +0100 Subject: [PATCH 2/6] Check return status from `lf_thread_create` --- core/threaded/reactor_threaded.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/core/threaded/reactor_threaded.c b/core/threaded/reactor_threaded.c index 7ffad482b..f276652eb 100644 --- a/core/threaded/reactor_threaded.c +++ b/core/threaded/reactor_threaded.c @@ -1031,7 +1031,9 @@ void start_threads() { LF_PRINT_LOG("Starting %u worker threads.", _lf_number_of_workers); _lf_thread_ids = (lf_thread_t*)malloc(_lf_number_of_workers * sizeof(lf_thread_t)); for (unsigned int i = 0; i < _lf_number_of_workers; i++) { - lf_thread_create(&_lf_thread_ids[i], worker, NULL); + if (lf_thread_create(&_lf_thread_ids[i], worker, NULL) != 0) { + lf_print_error_and_exit("Could not start thread-%u", i); + } } } From 0bef657e9fa0acce63144f1f510895da1239d7d0 Mon Sep 17 00:00:00 2001 From: erlingrj Date: Thu, 16 Feb 2023 20:48:32 +0100 Subject: [PATCH 3/6] Fix scheduler_adaptive.c and remove old debug code from lf_zephyr_support.c --- core/platform/lf_zephyr_support.c | 6 +++--- core/threaded/scheduler_adaptive.c | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/core/platform/lf_zephyr_support.c b/core/platform/lf_zephyr_support.c index 8230309aa..107d70656 100644 --- a/core/platform/lf_zephyr_support.c +++ b/core/platform/lf_zephyr_support.c @@ -144,7 +144,9 @@ void lf_initialize_clock() { // Zephyrs Counter API /** - * Return the current time in nanoseconds + * Return the current time in nanoseconds. It gets the current value + * of the hi-res counter device and also keeps track of overflows + * to deliver a monotonically increasing clock. */ int lf_clock_gettime(instant_t* t) { uint32_t now_cycles; @@ -154,8 +156,6 @@ int lf_clock_gettime(instant_t* t) { res = counter_get_value(_lf_counter_dev, &now_cycles); now_nsec = counter_ticks_to_us(_lf_counter_dev, now_cycles)*1000ULL; *t = now_nsec + _lf_timer_last_epoch_nsec; - now_cycles = k_cycle_get_32(); - *t = (SECOND(1)/CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC)*now_cycles; return 0; } diff --git a/core/threaded/scheduler_adaptive.c b/core/threaded/scheduler_adaptive.c index 538a70357..693a2740e 100644 --- a/core/threaded/scheduler_adaptive.c +++ b/core/threaded/scheduler_adaptive.c @@ -27,6 +27,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * This is a non-priority-driven scheduler. See scheduler.h for documentation. * @author{Peter Donovan } */ +#include "lf_types.h" #if defined SCHEDULER && SCHEDULER == ADAPTIVE #ifndef NUMBER_OF_WORKERS #define NUMBER_OF_WORKERS 1 From 96d8dfdcaa03b0ec50bbc499ea6bcf86531fba7a Mon Sep 17 00:00:00 2001 From: erlingrj Date: Thu, 16 Feb 2023 21:18:57 +0100 Subject: [PATCH 4/6] Handle overflows also in qemu emulation --- core/platform/lf_zephyr_support.c | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/core/platform/lf_zephyr_support.c b/core/platform/lf_zephyr_support.c index 107d70656..d89918119 100644 --- a/core/platform/lf_zephyr_support.c +++ b/core/platform/lf_zephyr_support.c @@ -45,6 +45,9 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Combine 2 32-bit words to a 64-bit word #define COMBINE_HI_LO(hi,lo) ((((uint64_t) hi) << 32) | ((uint64_t) lo)) +// Keep track of overflows to keep clocks monotonic +static int64_t _lf_timer_epoch_duration_nsec; +static volatile int64_t _lf_timer_last_epoch_nsec = 0; #if defined(LF_ZEPHYR_CLOCK_HI_RES) // Create semaphore for async wakeup from physical action @@ -55,13 +58,11 @@ const struct device *const _lf_counter_dev = DEVICE_DT_GET(LF_TIMER); static volatile bool _lf_alarm_fired; static uint32_t _lf_timer_freq; -static int64_t _lf_timer_epoch_duration_usec; -static volatile int64_t _lf_timer_last_epoch_nsec = 0; // Timer overflow callback static void _lf_timer_overflow_callback(const struct device *dev, void *user_data) { - _lf_timer_last_epoch_nsec += _lf_timer_epoch_duration_usec*1000LL; + _lf_timer_last_epoch_nsec += _lf_timer_epoch_duration_nsec; } @@ -109,7 +110,7 @@ void lf_initialize_clock() { // Calculate the duration of an epoch counter_max_ticks = counter_get_max_top_value(_lf_counter_dev); - _lf_timer_epoch_duration_usec = counter_ticks_to_us(_lf_counter_dev, counter_max_ticks); + _lf_timer_epoch_duration_nsec = counter_ticks_to_us(_lf_counter_dev, counter_max_ticks) * 1000LL; // Set the max_top value to be the maximum counter_max_ticks = counter_get_max_top_value(_lf_counter_dev); @@ -135,6 +136,9 @@ void lf_initialize_clock() { #else LF_PRINT_LOG("Using Low resolution zephyr kernel clock"); LF_PRINT_LOG("Kernel Clock has frequency of %u Hz\n", CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC); + _lf_timer_last_epoch_nsec = 0; + // Compute the duration of an + _lf_timer_epoch_duration_nsec = ((1LL << 32) * SECONDS(1))/CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC; #endif } @@ -222,11 +226,19 @@ int lf_sleep_until_locked(instant_t wakeup) { } } #else -// Clock and sleep implementation for LO_RES clock - +// Clock and sleep implementation for LO_RES clock. Handle wraps +// by checking if two consecutive reads are monotonic +static uint32_t last_read_cycles=0; int lf_clock_gettime(instant_t* t) { uint32_t now_cycles = k_cycle_get_32(); - *t = (SECOND(1)/CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC)*now_cycles; + + if (now_cycles < last_read_cycles) { + _lf_timer_last_epoch_nsec += _lf_timer_epoch_duration_nsec; + } + + *t = (SECOND(1)/CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC)*now_cycles + _lf_timer_last_epoch_nsec; + + last_read_cycles = now_cycles; return 0; } From b6e1ee8b0198684e9a4d7bd7f3b6ea56c0836424 Mon Sep 17 00:00:00 2001 From: Marten Lohstroh Date: Thu, 16 Feb 2023 12:46:46 -0800 Subject: [PATCH 5/6] Update core/platform/lf_zephyr_support.c --- core/platform/lf_zephyr_support.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/platform/lf_zephyr_support.c b/core/platform/lf_zephyr_support.c index d89918119..365bfc9d7 100644 --- a/core/platform/lf_zephyr_support.c +++ b/core/platform/lf_zephyr_support.c @@ -350,7 +350,7 @@ int lf_notify_of_event() { // FIXME: What is an appropriate thread prio? #define _LF_THREAD_PRIORITY 5 -// If NUMBER_OF_WORKERS is not specified, or specified to 0. Then we default to 1 +// If NUMBER_OF_WORKERS is not specified, or set to 0, then we default to 1. #if !defined(NUMBER_OF_WORKERS) || NUMBER_OF_WORKERS==0 #undef NUMBER_OF_WORKERS #define NUMBER_OF_WORKERS 1 From a369633ec07a693cc3120aa93787be3311a037c4 Mon Sep 17 00:00:00 2001 From: Erling Jellum Date: Thu, 16 Feb 2023 23:45:38 +0100 Subject: [PATCH 6/6] Disable threading in Zephyr --- core/platform/lf_zephyr_support.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/core/platform/lf_zephyr_support.c b/core/platform/lf_zephyr_support.c index 365bfc9d7..a14598a7f 100644 --- a/core/platform/lf_zephyr_support.c +++ b/core/platform/lf_zephyr_support.c @@ -343,7 +343,7 @@ int lf_notify_of_event() { #ifdef LF_THREADED -#warning "Threaded support on Zephyr is still experimental" +#error "Threaded support on Zephyr is not supported" // FIXME: What is an appropriate stack size? #define _LF_STACK_SIZE 1024 @@ -381,7 +381,10 @@ int lf_available_cores() { * getting passed arguments. The new handle is stored in thread_id. * * @return 0 on success, platform-specific error number otherwise. - * + * FIXME: As this function is currently part of the user-facing API, + * it should not care about the number of workers specified. + * If we want static allocation of workers, as implemented now, + * it must be removed from the API. */ int lf_thread_create(lf_thread_t* thread, void *(*lf_thread) (void *), void* arguments) { // Use static id to map each created thread to a