diff --git a/impeller/base/base_unittests.cc b/impeller/base/base_unittests.cc index d869420f09aaf..a7c40d868df55 100644 --- a/impeller/base/base_unittests.cc +++ b/impeller/base/base_unittests.cc @@ -143,5 +143,95 @@ TEST(ConditionVariableTest, WaitForever) { ASSERT_EQ(test.rando_ivar, 12u); } +TEST(ConditionVariableTest, TestsCriticalSectionAfterWaitForUntil) { + std::vector threads; + const auto kThreadCount = 10u; + + Mutex mtx; + ConditionVariable cv; + size_t sum = 0u; + + std::condition_variable start_cv; + std::mutex start_mtx; + bool start = false; + auto start_predicate = [&start]() { return start; }; + auto thread_main = [&]() { + { + std::unique_lock start_lock(start_mtx); + start_cv.wait(start_lock, start_predicate); + } + + mtx.Lock(); + cv.WaitFor(mtx, std::chrono::milliseconds{0u}, []() { return true; }); + auto old_val = sum; + std::this_thread::sleep_for(std::chrono::milliseconds{100u}); + sum = old_val + 1u; + mtx.Unlock(); + }; + // Launch all threads. They will wait for the start CV to be signaled. + for (size_t i = 0; i < kThreadCount; i++) { + threads.emplace_back(thread_main); + } + // Notify all threads that the test may start. + { + { + std::scoped_lock start_lock(start_mtx); + start = true; + } + start_cv.notify_all(); + } + // Join all threads. + ASSERT_EQ(threads.size(), kThreadCount); + for (size_t i = 0; i < kThreadCount; i++) { + threads[i].join(); + } + ASSERT_EQ(sum, kThreadCount); +} + +TEST(ConditionVariableTest, TestsCriticalSectionAfterWait) { + std::vector threads; + const auto kThreadCount = 10u; + + Mutex mtx; + ConditionVariable cv; + size_t sum = 0u; + + std::condition_variable start_cv; + std::mutex start_mtx; + bool start = false; + auto start_predicate = [&start]() { return start; }; + auto thread_main = [&]() { + { + std::unique_lock start_lock(start_mtx); + start_cv.wait(start_lock, start_predicate); + } + + mtx.Lock(); + cv.Wait(mtx, []() { return true; }); + auto old_val = sum; + std::this_thread::sleep_for(std::chrono::milliseconds{100u}); + sum = old_val + 1u; + mtx.Unlock(); + }; + // Launch all threads. They will wait for the start CV to be signaled. + for (size_t i = 0; i < kThreadCount; i++) { + threads.emplace_back(thread_main); + } + // Notify all threads that the test may start. + { + { + std::scoped_lock start_lock(start_mtx); + start = true; + } + start_cv.notify_all(); + } + // Join all threads. + ASSERT_EQ(threads.size(), kThreadCount); + for (size_t i = 0; i < kThreadCount; i++) { + threads[i].join(); + } + ASSERT_EQ(sum, kThreadCount); +} + } // namespace testing } // namespace impeller diff --git a/impeller/base/thread.h b/impeller/base/thread.h index 0f8dfd20bbef1..d6b03573a2631 100644 --- a/impeller/base/thread.h +++ b/impeller/base/thread.h @@ -11,6 +11,7 @@ #include #include +#include "flutter/fml/logging.h" #include "flutter/fml/macros.h" #include "flutter/fml/synchronization/shared_mutex.h" #include "impeller/base/thread_safety.h" @@ -159,11 +160,11 @@ class ConditionVariable { //---------------------------------------------------------------------------- /// @brief Atomically unlocks the mutex and waits on the condition - /// variable up to a specified time point. Spurious wakes may - /// happen before the time point is reached. In such cases the - /// predicate is invoked and it must return `false` for the wait - /// to continue. The predicate will be invoked with the mutex - /// locked. + /// variable up to a specified time point. Lock will be reacquired + /// when the wait exits. Spurious wakes may happen before the time + /// point is reached. In such cases the predicate is invoked and + /// it must return `false` for the wait to continue. The predicate + /// will be invoked with the mutex locked. /// /// @note Since the predicate is invoked with the mutex locked, if it /// accesses other guarded resources, the predicate itself must be @@ -191,15 +192,18 @@ class ConditionVariable { const std::chrono::time_point& time_point, const Predicate& should_stop_waiting) IPLR_REQUIRES(mutex) { std::unique_lock lock(mutex.mutex_, std::adopt_lock); - return cv_.wait_until(lock, time_point, should_stop_waiting); + const auto result = cv_.wait_until(lock, time_point, should_stop_waiting); + lock.release(); + return result; } //---------------------------------------------------------------------------- /// @brief Atomically unlocks the mutex and waits on the condition - /// variable for a designated duration. Spurious wakes may happen - /// before the time point is reached. In such cases the predicate - /// is invoked and it must return `false` for the wait to - /// continue. The predicate will be invoked with the mutex locked. + /// variable for a designated duration. Lock will be reacquired + /// when the wait exits. Spurious wakes may happen before the time + /// point is reached. In such cases the predicate is invoked and + /// it must return `false` for the wait to continue. The predicate + /// will be invoked with the mutex locked. /// /// @note Since the predicate is invoked with the mutex locked, if it /// accesses other guarded resources, the predicate itself must be @@ -233,10 +237,11 @@ class ConditionVariable { //---------------------------------------------------------------------------- /// @brief Atomically unlocks the mutex and waits on the condition /// variable indefinitely till the predicate determines that the - /// wait must end. Spurious wakes may happen before the time point - /// is reached. In such cases the predicate is invoked and it must - /// return `false` for the wait to continue. The predicate will be - /// invoked with the mutex locked. + /// wait must end. Lock will be reacquired when the wait exits. + /// Spurious wakes may happen before the time point is reached. In + /// such cases the predicate is invoked and it must return `false` + /// for the wait to continue. The predicate will be invoked with + /// the mutex locked. /// /// @note Since the predicate is invoked with the mutex locked, if it /// accesses other guarded resources, the predicate itself must be @@ -255,6 +260,7 @@ class ConditionVariable { IPLR_REQUIRES(mutex) { std::unique_lock lock(mutex.mutex_, std::adopt_lock); cv_.wait(lock, should_stop_waiting); + lock.release(); } private: