Skip to content

Commit

Permalink
Use acquire() and release() for spinlocks
Browse files Browse the repository at this point in the history
It is more idiomatick than lock() and unlock()

No functional change.
  • Loading branch information
mcostalba committed Mar 16, 2015
1 parent bae4679 commit 13d4df9
Show file tree
Hide file tree
Showing 4 changed files with 31 additions and 30 deletions.
6 changes: 3 additions & 3 deletions src/movepick.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -162,9 +162,9 @@ void MovePicker::score<CAPTURES>() {

template<>
void MovePicker::score<QUIETS>() {
Square prevMoveSq = to_sq((ss-1)->currentMove);
Piece prevMovePiece = pos.piece_on(prevMoveSq);
const HistoryStats &cmh = counterMovesHistory[prevMovePiece][prevMoveSq];

Square prevSq = to_sq((ss-1)->currentMove);
const HistoryStats& cmh = counterMovesHistory[pos.piece_on(prevSq)][prevSq];

for (auto& m : *this)
m.value = history[pos.moved_piece(m)][to_sq(m)]
Expand Down
26 changes: 13 additions & 13 deletions src/search.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -826,7 +826,7 @@ namespace {
continue;

moveCount = ++splitPoint->moveCount;
splitPoint->mutex.unlock();
splitPoint->spinlock.release();
}
else
++moveCount;
Expand Down Expand Up @@ -895,7 +895,7 @@ namespace {
&& moveCount >= FutilityMoveCounts[improving][depth])
{
if (SpNode)
splitPoint->mutex.lock();
splitPoint->spinlock.acquire();

continue;
}
Expand All @@ -914,7 +914,7 @@ namespace {

if (SpNode)
{
splitPoint->mutex.lock();
splitPoint->spinlock.acquire();
if (bestValue > splitPoint->bestValue)
splitPoint->bestValue = bestValue;
}
Expand All @@ -926,7 +926,7 @@ namespace {
if (predictedDepth < 4 * ONE_PLY && pos.see_sign(move) < VALUE_ZERO)
{
if (SpNode)
splitPoint->mutex.lock();
splitPoint->spinlock.acquire();

continue;
}
Expand Down Expand Up @@ -1026,7 +1026,7 @@ namespace {
// Step 18. Check for new best move
if (SpNode)
{
splitPoint->mutex.lock();
splitPoint->spinlock.acquire();
bestValue = splitPoint->bestValue;
alpha = splitPoint->alpha;
}
Expand Down Expand Up @@ -1630,7 +1630,7 @@ void Thread::idle_loop() {
std::memcpy(ss-2, sp->ss-2, 5 * sizeof(Stack));
ss->splitPoint = sp;

sp->mutex.lock();
sp->spinlock.acquire();

assert(activePosition == nullptr);

Expand Down Expand Up @@ -1659,7 +1659,7 @@ void Thread::idle_loop() {
// After releasing the lock we can't access any SplitPoint related data
// in a safe way because it could have been released under our feet by
// the sp master.
sp->mutex.unlock();
sp->spinlock.release();

// Try to late join to another split point if none of its slaves has
// already finished.
Expand Down Expand Up @@ -1699,12 +1699,12 @@ void Thread::idle_loop() {
sp = bestSp;

// Recheck the conditions under lock protection
sp->mutex.lock();
sp->spinlock.acquire();

if ( sp->allSlavesSearching
&& sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT)
{
allocMutex.lock();
spinlock.acquire();

if (can_join(sp))
{
Expand All @@ -1713,10 +1713,10 @@ void Thread::idle_loop() {
searching = true;
}

allocMutex.unlock();
spinlock.release();
}

sp->mutex.unlock();
sp->spinlock.release();
}
}
}
Expand Down Expand Up @@ -1767,15 +1767,15 @@ void check_time() {
{
SplitPoint& sp = th->splitPoints[i];

sp.mutex.lock();
sp.spinlock.acquire();

nodes += sp.nodes;

for (size_t idx = 0; idx < Threads.size(); ++idx)
if (sp.slavesMask.test(idx) && Threads[idx]->activePosition)
nodes += Threads[idx]->activePosition->nodes_searched();

sp.mutex.unlock();
sp.spinlock.release();
}

if (nodes >= Limits.nodes)
Expand Down
12 changes: 6 additions & 6 deletions src/thread.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
// Pick and init the next available split point
SplitPoint& sp = splitPoints[splitPointsSize];

sp.mutex.lock(); // No contention here until we don't increment splitPointsSize
sp.spinlock.acquire(); // No contention here until we don't increment splitPointsSize

sp.master = this;
sp.parentSplitPoint = activeSplitPoint;
Expand Down Expand Up @@ -174,7 +174,7 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
while ( sp.slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT
&& (slave = Threads.available_slave(&sp)) != nullptr)
{
slave->allocMutex.lock();
slave->spinlock.acquire();

if (slave->can_join(activeSplitPoint))
{
Expand All @@ -183,14 +183,14 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
slave->searching = true;
}

slave->allocMutex.unlock();
slave->spinlock.release();
}

// Everything is set up. The master thread enters the idle loop, from which
// it will instantly launch a search, because its 'searching' flag is set.
// The thread will return from the idle loop when all slaves have finished
// their work at this split point.
sp.mutex.unlock();
sp.spinlock.release();

Thread::idle_loop(); // Force a call to base class idle_loop()

Expand All @@ -205,7 +205,7 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
// We have returned from the idle loop, which means that all threads are
// finished. Note that decreasing splitPointsSize must be done under lock
// protection to avoid a race with Thread::can_join().
sp.mutex.lock();
sp.spinlock.acquire();

--splitPointsSize;
activeSplitPoint = sp.parentSplitPoint;
Expand All @@ -214,7 +214,7 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
*bestMove = sp.bestMove;
*bestValue = sp.bestValue;

sp.mutex.unlock();
sp.spinlock.release();
}


Expand Down
17 changes: 9 additions & 8 deletions src/thread.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,16 +41,17 @@ const size_t MAX_SPLITPOINTS_PER_THREAD = 8;
const size_t MAX_SLAVES_PER_SPLITPOINT = 4;

class Spinlock {
std::atomic_int _lock;

std::atomic_int lock;

public:
Spinlock() { _lock = 1; } // Init here to workaround a bug with MSVC 2013
void lock() {
while (_lock.fetch_sub(1, std::memory_order_acquire) != 1)
for (int cnt = 0; _lock.load(std::memory_order_relaxed) <= 0; ++cnt)
Spinlock() { lock = 1; } // Init here to workaround a bug with MSVC 2013
void acquire() {
while (lock.fetch_sub(1, std::memory_order_acquire) != 1)
for (int cnt = 0; lock.load(std::memory_order_relaxed) <= 0; ++cnt)
if (cnt >= 10000) std::this_thread::yield(); // Be nice to hyperthreading
}
void unlock() { _lock.store(1, std::memory_order_release); }
void release() { lock.store(1, std::memory_order_release); }
};


Expand All @@ -73,7 +74,7 @@ struct SplitPoint {
SplitPoint* parentSplitPoint;

// Shared variable data
Spinlock mutex;
Spinlock spinlock;
std::bitset<MAX_THREADS> slavesMask;
volatile bool allSlavesSearching;
volatile uint64_t nodes;
Expand All @@ -97,6 +98,7 @@ struct ThreadBase {

std::thread nativeThread;
Mutex mutex;
Spinlock spinlock;
ConditionVariable sleepCondition;
volatile bool exit = false;
};
Expand Down Expand Up @@ -127,7 +129,6 @@ struct Thread : public ThreadBase {
SplitPoint* volatile activeSplitPoint;
volatile size_t splitPointsSize;
volatile bool searching;
Spinlock allocMutex;
};


Expand Down

0 comments on commit 13d4df9

Please sign in to comment.