Skip to content

Commit

Permalink
kernel/sched: Eliminate PRESTART thread state
Browse files Browse the repository at this point in the history
Traditionally threads have been initialized with a PRESTART flag set,
which gets cleared when the thread runs for the first time via either
its timeout or the k_thread_start() API.

But if you think about it, this is no different, semantically, than
SUSPENDED: the thread is prevented from running until the flag is
cleared.

So unify the two.  Start threads in the SUSPENDED state, point
everyone looking at the PRESTART bit to the SUSPENDED flag, and make
k_thread_start() be a synonym for k_thread_resume().

There is some mild code size savings from the eliminated duplication,
but the real win here is that we make space in the thread flags byte,
which had run out.

Signed-off-by: Andy Ross <[email protected]>
  • Loading branch information
andyross authored and nashif committed Nov 27, 2024
1 parent 6877b6d commit 7cdf405
Show file tree
Hide file tree
Showing 13 changed files with 29 additions and 86 deletions.
30 changes: 18 additions & 12 deletions include/zephyr/kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -694,18 +694,6 @@ static inline k_tid_t k_current_get(void)
*/
__syscall void k_thread_abort(k_tid_t thread);


/**
* @brief Start an inactive thread
*
* If a thread was created with K_FOREVER in the delay parameter, it will
* not be added to the scheduling queue until this function is called
* on it.
*
* @param thread thread to start
*/
__syscall void k_thread_start(k_tid_t thread);

k_ticks_t z_timeout_expires(const struct _timeout *timeout);
k_ticks_t z_timeout_remaining(const struct _timeout *timeout);

Expand Down Expand Up @@ -1064,6 +1052,24 @@ __syscall void k_thread_suspend(k_tid_t thread);
*/
__syscall void k_thread_resume(k_tid_t thread);

/**
* @brief Start an inactive thread
*
* If a thread was created with K_FOREVER in the delay parameter, it will
* not be added to the scheduling queue until this function is called
* on it.
*
* @note This is a legacy API for compatibility. Modern Zephyr
* threads are initialized in the "suspended" state and no not need
* special handling for "start".
*
* @param thread thread to start
*/
static inline void k_thread_start(k_tid_t thread)
{
k_thread_resume(thread);
}

/**
* @brief Set time-slicing period and scope.
*
Expand Down
3 changes: 0 additions & 3 deletions include/zephyr/kernel_structs.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,6 @@ extern "C" {
/* Thread is waiting on an object */
#define _THREAD_PENDING (BIT(1))

/* Thread has not yet started */
#define _THREAD_PRESTART (BIT(2))

/* Thread has terminated */
#define _THREAD_DEAD (BIT(3))

Expand Down
13 changes: 1 addition & 12 deletions kernel/include/kthread.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@

#define Z_STATE_STR_DUMMY "dummy"
#define Z_STATE_STR_PENDING "pending"
#define Z_STATE_STR_PRESTART "prestart"
#define Z_STATE_STR_DEAD "dead"
#define Z_STATE_STR_SUSPENDED "suspended"
#define Z_STATE_STR_ABORTING "aborting"
Expand Down Expand Up @@ -97,7 +96,7 @@ static inline bool z_is_thread_prevented_from_running(struct k_thread *thread)
{
uint8_t state = thread->base.thread_state;

return (state & (_THREAD_PENDING | _THREAD_PRESTART | _THREAD_DEAD |
return (state & (_THREAD_PENDING | _THREAD_DEAD |
_THREAD_DUMMY | _THREAD_SUSPENDED)) != 0U;

}
Expand All @@ -113,11 +112,6 @@ static inline bool z_is_thread_ready(struct k_thread *thread)
z_is_thread_timeout_active(thread));
}

static inline bool z_has_thread_started(struct k_thread *thread)
{
return (thread->base.thread_state & _THREAD_PRESTART) == 0U;
}

static inline bool z_is_thread_state_set(struct k_thread *thread, uint32_t state)
{
return (thread->base.thread_state & state) != 0U;
Expand All @@ -142,11 +136,6 @@ static inline void z_mark_thread_as_not_suspended(struct k_thread *thread)
SYS_PORT_TRACING_FUNC(k_thread, sched_resume, thread);
}

static inline void z_mark_thread_as_started(struct k_thread *thread)
{
thread->base.thread_state &= ~_THREAD_PRESTART;
}

static inline void z_mark_thread_as_pending(struct k_thread *thread)
{
thread->base.thread_state |= _THREAD_PENDING;
Expand Down
4 changes: 2 additions & 2 deletions kernel/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -598,7 +598,7 @@ static void init_idle_thread(int i)
stack_size, idle, &_kernel.cpus[i],
NULL, NULL, K_IDLE_PRIO, K_ESSENTIAL,
tname);
z_mark_thread_as_started(thread);
z_mark_thread_as_not_suspended(thread);

#ifdef CONFIG_SMP
thread->base.is_idle = 1U;
Expand Down Expand Up @@ -675,7 +675,7 @@ static char *prepare_multithreading(void)
NULL, NULL, NULL,
CONFIG_MAIN_THREAD_PRIORITY,
K_ESSENTIAL, "main");
z_mark_thread_as_started(&z_main_thread);
z_mark_thread_as_not_suspended(&z_main_thread);
z_ready_thread(&z_main_thread);

z_init_cpu(0);
Expand Down
19 changes: 1 addition & 18 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -405,20 +405,6 @@ void z_move_thread_to_end_of_prio_q(struct k_thread *thread)
}
}

void z_sched_start(struct k_thread *thread)
{
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);

if (z_has_thread_started(thread)) {
k_spin_unlock(&_sched_spinlock, key);
return;
}

z_mark_thread_as_started(thread);
ready_thread(thread);
z_reschedule(&_sched_spinlock, key);
}

/* Spins in ISR context, waiting for a thread known to be running on
* another CPU to catch the IPI we sent and halt. Note that we check
* for ourselves being asynchronously halted first to prevent simple
Expand Down Expand Up @@ -645,10 +631,7 @@ void z_sched_wake_thread(struct k_thread *thread, bool is_timeout)
if (thread->base.pended_on != NULL) {
unpend_thread_no_timeout(thread);
}
z_mark_thread_as_started(thread);
if (is_timeout) {
z_mark_thread_as_not_suspended(thread);
}
z_mark_thread_as_not_suspended(thread);
ready_thread(thread);
}
}
Expand Down
19 changes: 1 addition & 18 deletions kernel/thread.c
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,6 @@ const char *k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size)
} state_string[] = {
SS_ENT(DUMMY),
SS_ENT(PENDING),
SS_ENT(PRESTART),
SS_ENT(DEAD),
SS_ENT(SUSPENDED),
SS_ENT(ABORTING),
Expand Down Expand Up @@ -347,22 +346,6 @@ void z_check_stack_sentinel(void)
}
#endif /* CONFIG_STACK_SENTINEL */

void z_impl_k_thread_start(k_tid_t thread)
{
SYS_PORT_TRACING_OBJ_FUNC(k_thread, start, thread);

z_sched_start(thread);
}

#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_thread_start(k_tid_t thread)
{
K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
z_impl_k_thread_start(thread);
}
#include <zephyr/syscalls/k_thread_start_mrsh.c>
#endif /* CONFIG_USERSPACE */

#if defined(CONFIG_STACK_POINTER_RANDOM) && (CONFIG_STACK_POINTER_RANDOM != 0)
int z_stack_adjust_initialized;

Expand Down Expand Up @@ -559,7 +542,7 @@ char *z_setup_new_thread(struct k_thread *new_thread,
z_waitq_init(&new_thread->join_queue);

/* Initialize various struct k_thread members */
z_init_thread_base(&new_thread->base, prio, _THREAD_PRESTART, options);
z_init_thread_base(&new_thread->base, prio, _THREAD_SUSPENDED, options);
stack_ptr = setup_thread_stack(new_thread, stack, stack_size);

#ifdef CONFIG_KERNEL_COHERENCE
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -231,10 +231,6 @@ cy_rslt_t cy_rtos_get_thread_state(cy_thread_t *thread, cy_thread_state_t *state
*state = CY_THREAD_STATE_UNKNOWN;
break;

case _THREAD_PRESTART:
*state = CY_THREAD_STATE_INACTIVE;
break;

case _THREAD_SUSPENDED:
case _THREAD_PENDING:
*state = CY_THREAD_STATE_BLOCKED;
Expand Down
2 changes: 1 addition & 1 deletion subsys/mgmt/mcumgr/transport/src/smp_udp.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,9 @@ BUILD_ASSERT(0, "Either IPv4 or IPv6 SMP must be enabled for the MCUmgr UDP SMP
BUILD_ASSERT(sizeof(struct sockaddr) <= CONFIG_MCUMGR_TRANSPORT_NETBUF_USER_DATA_SIZE,
"CONFIG_MCUMGR_TRANSPORT_NETBUF_USER_DATA_SIZE must be >= sizeof(struct sockaddr)");

/* FIXME: dangerous logic, use a kernel API for this */
#define IS_THREAD_RUNNING(thread) \
(thread.base.thread_state & (_THREAD_PENDING | \
_THREAD_PRESTART | \
_THREAD_SUSPENDED | \
_THREAD_QUEUED) ? true : false)

Expand Down
2 changes: 1 addition & 1 deletion subsys/portability/cmsis_rtos_v1/cmsis_kernel.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,5 +41,5 @@ osStatus osKernelStart(void)
*/
int32_t osKernelRunning(void)
{
return z_has_thread_started(&z_main_thread);
return !z_is_thread_suspended(&z_main_thread);
}
2 changes: 1 addition & 1 deletion subsys/portability/cmsis_rtos_v1/cmsis_thread.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ static inline int _is_thread_cmsis_inactive(struct k_thread *thread)
{
uint8_t state = thread->base.thread_state;

return state & (_THREAD_PRESTART | _THREAD_DEAD);
return state & _THREAD_DEAD;
}

static inline int32_t zephyr_to_cmsis_priority(uint32_t z_prio)
Expand Down
5 changes: 1 addition & 4 deletions subsys/portability/cmsis_rtos_v2/thread.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ static inline int _is_thread_cmsis_inactive(struct k_thread *thread)
{
uint8_t state = thread->base.thread_state;

return state & (_THREAD_PRESTART | _THREAD_DEAD);
return state & _THREAD_DEAD;
}

static inline uint32_t zephyr_to_cmsis_priority(uint32_t z_prio)
Expand Down Expand Up @@ -308,9 +308,6 @@ osThreadState_t osThreadGetState(osThreadId_t thread_id)
case _THREAD_DUMMY:
state = osThreadError;
break;
case _THREAD_PRESTART:
state = osThreadInactive;
break;
case _THREAD_DEAD:
state = osThreadTerminated;
break;
Expand Down
6 changes: 1 addition & 5 deletions tests/benchmarks/thread_metric/src/tm_porting_layer_zephyr.c
Original file line number Diff line number Diff line change
Expand Up @@ -78,11 +78,7 @@ int tm_thread_create(int thread_id, int priority, void (*entry_function)(void *,
*/
int tm_thread_resume(int thread_id)
{
if (test_thread[thread_id].base.thread_state & _THREAD_PRESTART) {
k_thread_start(&test_thread[thread_id]);
} else {
k_thread_resume(&test_thread[thread_id]);
}
k_thread_resume(&test_thread[thread_id]);

return TM_SUCCESS;
}
Expand Down
6 changes: 1 addition & 5 deletions tests/kernel/threads/thread_apis/src/test_kthread_for_each.c
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ ZTEST(threads_lifecycle_1cpu, test_k_thread_foreach_unlocked_null_cb)
* @brief Test k_thread_state_str API with null callback
*
* @details It's impossible to sched a thread step by step manually to
* experience each state from _THREAD_PRESTART to _THREAD_DEAD. To cover each
* experience each state from initialization to _THREAD_DEAD. To cover each
* line of function k_thread_state_str(), set thread_state of tdata1 and check
* the string this function returns
*
Expand Down Expand Up @@ -245,10 +245,6 @@ ZTEST(threads_lifecycle_1cpu, test_k_thread_state_str)
str = k_thread_state_str(tid, state_str, sizeof(state_str));
zassert_str_equal(str, "pending");

tid->base.thread_state = _THREAD_PRESTART;
str = k_thread_state_str(tid, state_str, sizeof(state_str));
zassert_str_equal(str, "prestart");

tid->base.thread_state = _THREAD_DEAD;
str = k_thread_state_str(tid, state_str, sizeof(state_str));
zassert_str_equal(str, "dead");
Expand Down

0 comments on commit 7cdf405

Please sign in to comment.