Skip to content

Commit

Permalink
Merge pull request #5731 from melshuber/master
Browse files Browse the repository at this point in the history
core: recursive mutex implementation
  • Loading branch information
kaspar030 authored Feb 21, 2017
2 parents a53c50a + 6cd3d34 commit 8d207ca
Show file tree
Hide file tree
Showing 6 changed files with 491 additions and 0 deletions.
110 changes: 110 additions & 0 deletions core/include/rmutex.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
/*
* Copyright (C) 2016 Theobroma Systems Design & Consulting GmbH
*
* This file is subject to the terms and conditions of the GNU Lesser
* General Public License v2.1. See the file LICENSE in the top level
* directory for more details.
*/

/**
* @ingroup core_sync Synchronization
* @brief Recursive Mutex for thread synchronization
* @{
*
* @file
* @brief RIOT synchronization API
*
* @author Martin Elshuber <[email protected]>
*
*/

#ifndef RMUTEX_H_
#define RMUTEX_H_

#include <stdatomic.h>

#include "mutex.h"
#include "kernel_types.h"

#ifdef __cplusplus
extern "C" {
#endif

/**
* @brief Mutex structure. Must never be modified by the user.
*/
typedef struct rmutex_t {
/* fields are managed by mutex functions, don't touch */
/**
* @brief The mutex used for locking. **Must never be changed by
* the user.**
* @internal
*/
mutex_t mutex;

/**
* @brief Number of locks owned by the thread owner
* @internal
*/
uint16_t refcount;

/**
* @brief Owner thread of the mutex.
* @details Owner is written by the mutex holder, and read
* concurrently to ensure consistency,
* atomic_int_least16_t is used. Note @ref kernel_pid_t is an int16
* @internal
*/
atomic_int_least16_t owner;
} rmutex_t;

/**
* @brief Static initializer for rmutex_t.
* @details This initializer is preferable to rmutex_init().
*/
#define RMUTEX_INIT { MUTEX_INIT, 0, ATOMIC_VAR_INIT(KERNEL_PID_UNDEF) }

/**
* @brief Initializes a recursive mutex object.
* @details For initialization of variables use RMUTEX_INIT instead.
* Only use the function call for dynamically allocated mutexes.
* @param[out] rmutex pre-allocated mutex structure, must not be NULL.
*/
static inline void rmutex_init(rmutex_t *rmutex)
{
rmutex_t empty_rmutex = RMUTEX_INIT;
*rmutex = empty_rmutex;
}

/**
* @brief Tries to get a recursive mutex, non-blocking.
*
* @param[in] rmutex Recursive mutex object to lock. Has to be
* initialized first. Must not be NULL.
*
* @return 1 if mutex was unlocked, now it is locked.
* @return 0 if the mutex was locked.
*/
int rmutex_trylock(rmutex_t *rmutex);

/**
* @brief Locks a recursive mutex, blocking.
*
* @param[in] rmutex Recursive mutex object to lock. Has to be
* initialized first. Must not be NULL.
*/
void rmutex_lock(rmutex_t *rmutex);

/**
* @brief Unlocks the recursive mutex.
*
* @param[in] rmutex Recursive mutex object to unlock, must not be NULL.
*/
void rmutex_unlock(rmutex_t *rmutex);

#ifdef __cplusplus
}
#endif

#endif /* RMUTEX_H_ */
/** @} */
150 changes: 150 additions & 0 deletions core/rmutex.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
/*
* Copyright (C) 2016 Theobroma Systems Design & Consulting GmbH
*
* This file is subject to the terms and conditions of the GNU Lesser
* General Public License v2.1. See the file LICENSE in the top level
* directory for more details.
*/

/**
* @ingroup core_sync Synchronization
* @brief Recursive Mutex for thread synchronization
* @{
*
* @file
* @brief RIOT synchronization API
*
* @author Martin Elshuber <[email protected]>
*
* The recursive mutex implementation is inspired by the implementetaion of
* Nick v. IJzendoorn <[email protected]>
* @see https://github.com/RIOT-OS/RIOT/pull/4529/files#diff-8f48e1b9ed7a0a48d0c686a87cc5084eR35
*
*/

#include <stdio.h>
#include <inttypes.h>

#include "rmutex.h"
#include "thread.h"
#include "assert.h"

#define ENABLE_DEBUG (0)
#include "debug.h"

static int _lock(rmutex_t *rmutex, int trylock)
{
kernel_pid_t owner;

/* try to lock the mutex */
DEBUG("rmutex %" PRIi16" : trylock\n", thread_getpid());
if (mutex_trylock(&rmutex->mutex) == 0) {
DEBUG("rmutex %" PRIi16" : mutex already held\n", thread_getpid());
/* Mutex is already held
*
* Case 1: Mutex is not held by me
* Condition 1: holds
* rmutex->owner != thread_getpid()
*
* Note for Case 1:
*
* As a consequence it is necessaray to call
* mutex_lock(). However the read access to owner is not
* locked, and owner can be changed by a thread that is
* holding the lock (e.g.: holder unlocks the mutex, new
* holder aquired the lock). The atomic access strategy
* 'relaxed' ensures, that the value of rmutex->owner is read
* consistent.
*
* It is not necessary to synchronize (make written values
* visible) read/write with other threads, because every
* write by other threads let evaluate Condition 1 to
* false. They all write either KERNEL_PID_UNDEF or the
* pid of the other thread.
*
* Other threads never set rmutex->owner to the pid of the
* current thread. Hence, it is guaranteed that mutex_lock
* is eventually called.
*
* Case 2: Mutex is held be me (relock)
* Condition 2: holds
* rmutex->owner == thread_getpid()
*
* Note for Case 2:
*
* Because the mutex rmutex->owner is only written be the
* owner (me), rmutex->owner stays constant througout the
* complete call and rmutex->refcount is protected
* (read/write) by the mutex.
*/

/* ensure that owner is read atomically, since I need a consistent value */
owner = atomic_load_explicit(&rmutex->owner, memory_order_relaxed);
DEBUG("rmutex %" PRIi16" : mutex held by %" PRIi16" \n", thread_getpid(), owner);

/* Case 1: Mutex is not held by me */
if (owner != thread_getpid()) {
/* wait for the mutex */
DEBUG("rmutex %" PRIi16" : locking mutex\n", thread_getpid());

if (trylock) {
return 0;
}
else {
mutex_lock(&rmutex->mutex);
}
}
/* Case 2: Mutex is held be me (relock) */
/* Note: There is nothing to do for Case 2; refcount is incremented below */
}

DEBUG("rmutex %" PRIi16" : I am now holding the mutex\n", thread_getpid());

/* I am holding the recursive mutex */
DEBUG("rmutex %" PRIi16" : settting the owner\n", thread_getpid());

/* ensure that owner is written atomically, since others need a consistent value */
atomic_store_explicit(&rmutex->owner, thread_getpid(), memory_order_relaxed);

DEBUG("rmutex %" PRIi16" : increasing refs\n", thread_getpid());

/* increase the refcount */
rmutex->refcount++;

return 1;
}

void rmutex_lock(rmutex_t *rmutex)
{
_lock(rmutex, 0);
}

int rmutex_trylock(rmutex_t *rmutex)
{
return _lock(rmutex, 1);
}

void rmutex_unlock(rmutex_t *rmutex)
{
assert(atomic_load_explicit(&rmutex->owner,memory_order_relaxed) == thread_getpid());
assert(rmutex->refcount > 0);

DEBUG("rmutex %" PRIi16" : decrementing refs refs\n", thread_getpid());

/* decrement refcount */
rmutex->refcount--;

/* check if I should still hold the mutex */
if (rmutex->refcount == 0) {
/* if not release the mutex */

DEBUG("rmutex %" PRIi16" : resetting owner\n", thread_getpid());

/* ensure that owner is written only once */
atomic_store_explicit(&rmutex->owner, KERNEL_PID_UNDEF, memory_order_relaxed);

DEBUG("rmutex %" PRIi16" : releasing mutex\n", thread_getpid());

mutex_unlock(&rmutex->mutex);
}
}
6 changes: 6 additions & 0 deletions tests/rmutex/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
APPLICATION = rmutex
include ../Makefile.tests_common

BOARD_INSUFFICIENT_MEMORY := stm32f0discovery weio nucleo-f030 nucleo32-f042

include $(RIOTBASE)/Makefile.include
86 changes: 86 additions & 0 deletions tests/rmutex/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
Expected result
===============

When successful, you should see 5 different threads printing their
PID, priority and recursion depth. The thread with the lowest priority
should be able to lock (and unlock) the mutex first, followed by the
other threads in the order of their priority (highest next). If two
threads have the same priority the lower thread id should acquire the
lock. The output should look like the following:

```
main(): This is RIOT! (Version: xxx)
Recursive Mutex test
Please refer to the README.md for more information
Recursive Mutex test
Please refer to the README.md for more information
T3 (prio 6, depth 0): trying to lock rmutex now
T4 (prio 4, depth 0): trying to lock rmutex now
T5 (prio 5, depth 0): trying to lock rmutex now
T6 (prio 2, depth 0): trying to lock rmutex now
T7 (prio 3, depth 0): trying to lock rmutex now
main: unlocking recursive mutex
T6 (prio 2, depth 0): locked rmutex now
T6 (prio 2, depth 1): trying to lock rmutex now
T6 (prio 2, depth 1): locked rmutex now
T6 (prio 2, depth 2): trying to lock rmutex now
T6 (prio 2, depth 2): locked rmutex now
T6 (prio 2, depth 3): trying to lock rmutex now
T6 (prio 2, depth 3): locked rmutex now
T6 (prio 2, depth 3): unlocked rmutex
T6 (prio 2, depth 2): unlocked rmutex
T6 (prio 2, depth 1): unlocked rmutex
T6 (prio 2, depth 0): unlocked rmutex
T7 (prio 3, depth 0): locked rmutex now
T7 (prio 3, depth 1): trying to lock rmutex now
T7 (prio 3, depth 1): locked rmutex now
T7 (prio 3, depth 2): trying to lock rmutex now
T7 (prio 3, depth 2): locked rmutex now
T7 (prio 3, depth 3): trying to lock rmutex now
T7 (prio 3, depth 3): locked rmutex now
T7 (prio 3, depth 4): trying to lock rmutex now
T7 (prio 3, depth 4): locked rmutex now
T7 (prio 3, depth 4): unlocked rmutex
T7 (prio 3, depth 3): unlocked rmutex
T7 (prio 3, depth 2): unlocked rmutex
T7 (prio 3, depth 1): unlocked rmutex
T7 (prio 3, depth 0): unlocked rmutex
T4 (prio 4, depth 0): locked rmutex now
T4 (prio 4, depth 1): trying to lock rmutex now
T4 (prio 4, depth 1): locked rmutex now
T4 (prio 4, depth 2): trying to lock rmutex now
T4 (prio 4, depth 2): locked rmutex now
T4 (prio 4, depth 2): unlocked rmutex
T4 (prio 4, depth 1): unlocked rmutex
T4 (prio 4, depth 0): unlocked rmutex
T5 (prio 5, depth 0): locked rmutex now
T5 (prio 5, depth 1): trying to lock rmutex now
T5 (prio 5, depth 1): locked rmutex now
T5 (prio 5, depth 2): trying to lock rmutex now
T5 (prio 5, depth 2): locked rmutex now
T5 (prio 5, depth 2): unlocked rmutex
T5 (prio 5, depth 1): unlocked rmutex
T5 (prio 5, depth 0): unlocked rmutex
T3 (prio 6, depth 0): locked rmutex now
T3 (prio 6, depth 1): trying to lock rmutex now
T3 (prio 6, depth 1): locked rmutex now
T3 (prio 6, depth 2): trying to lock rmutex now
T3 (prio 6, depth 2): locked rmutex now
T3 (prio 6, depth 3): trying to lock rmutex now
T3 (prio 6, depth 3): locked rmutex now
T3 (prio 6, depth 4): trying to lock rmutex now
T3 (prio 6, depth 4): locked rmutex now
T3 (prio 6, depth 4): unlocked rmutex
T3 (prio 6, depth 3): unlocked rmutex
T3 (prio 6, depth 2): unlocked rmutex
T3 (prio 6, depth 1): unlocked rmutex
T3 (prio 6, depth 0): unlocked rmutex
Test END, check the order of priorities above.
```

Background
==========
This test application stresses a mutex with a number of threads waiting on it.
Loading

0 comments on commit 8d207ca

Please sign in to comment.