libfoedus-core
FOEDUS Core Library
foedus::xct::McsImpl< ADAPTOR, McsRwSimpleBlock > Class Template Reference

The Simple MCS-RW lock. More...

Detailed Description

template<typename ADAPTOR>
class foedus::xct::McsImpl< ADAPTOR, McsRwSimpleBlock >

The Simple MCS-RW lock.

These need partial specialization on RW_BLOCK=McsRwSimpleBlock. However, C++ standard doesn't allow function partial specialization. We thus make it a class partial specialization as below. Stupid? Totally agree!

Definition at line 348 of file xct_mcs_impl.cpp.

Public Member Functions

McsBlockIndex acquire_try_rw_writer (McsRwLock *lock)
 
McsBlockIndex acquire_try_rw_reader (McsRwLock *lock)
 
McsBlockIndex acquire_unconditional_rw_reader (McsRwLock *mcs_rw_lock)
 
void release_rw_reader (McsRwLock *mcs_rw_lock, McsBlockIndex block_index)
 
McsBlockIndex acquire_unconditional_rw_writer (McsRwLock *mcs_rw_lock)
 
void release_rw_writer (McsRwLock *mcs_rw_lock, McsBlockIndex block_index)
 
AcquireAsyncRet acquire_async_rw_reader (McsRwLock *lock)
 
AcquireAsyncRet acquire_async_rw_writer (McsRwLock *lock)
 
bool retry_async_rw_reader (McsRwLock *lock, McsBlockIndex block_index)
 
bool retry_async_rw_writer (McsRwLock *lock, McsBlockIndex block_index)
 
void cancel_async_rw_reader (McsRwLock *, McsBlockIndex)
 
void cancel_async_rw_writer (McsRwLock *, McsBlockIndex)
 

Static Public Member Functions

static bool does_support_try_rw_reader ()
 

Member Function Documentation

template<typename ADAPTOR >
AcquireAsyncRet foedus::xct::McsImpl< ADAPTOR, McsRwSimpleBlock >::acquire_async_rw_reader ( McsRwLock lock)
inline

Definition at line 544 of file xct_mcs_impl.cpp.

References foedus::xct::McsImpl< ADAPTOR, RW_BLOCK >::retry_async_rw_reader().

544  {
545  // In simple version, no distinction between try/async/retry. Same logic.
546  McsBlockIndex block_index = adaptor_.issue_new_block();
547  bool success = retry_async_rw_reader(lock, block_index);
548  return {success, block_index};
549  }
bool retry_async_rw_reader(McsRwLock *lock, McsBlockIndex block_index)
uint32_t McsBlockIndex
Index in thread-local MCS block.
Definition: xct_id.hpp:153

Here is the call graph for this function:

template<typename ADAPTOR >
AcquireAsyncRet foedus::xct::McsImpl< ADAPTOR, McsRwSimpleBlock >::acquire_async_rw_writer ( McsRwLock lock)
inline

Definition at line 550 of file xct_mcs_impl.cpp.

References foedus::xct::McsImpl< ADAPTOR, RW_BLOCK >::retry_async_rw_writer().

550  {
551  McsBlockIndex block_index = adaptor_.issue_new_block();
552  bool success = retry_async_rw_writer(lock, block_index);
553  return {success, block_index};
554  }
uint32_t McsBlockIndex
Index in thread-local MCS block.
Definition: xct_id.hpp:153
bool retry_async_rw_writer(McsRwLock *lock, McsBlockIndex block_index)

Here is the call graph for this function:

template<typename ADAPTOR >
McsBlockIndex foedus::xct::McsImpl< ADAPTOR, McsRwSimpleBlock >::acquire_try_rw_reader ( McsRwLock lock)
inline

Definition at line 363 of file xct_mcs_impl.cpp.

References ASSERT_ND, and foedus::xct::McsImpl< ADAPTOR, RW_BLOCK >::retry_async_rw_reader().

363  {
364  McsBlockIndex block_index = adaptor_.issue_new_block();
365  bool success = retry_async_rw_reader(lock, block_index);
366  if (success) {
367 #ifndef NDEBUG
368  auto* my_block = adaptor_.get_rw_my_block(block_index);
369  ASSERT_ND(my_block->is_finalized());
370  ASSERT_ND(my_block->is_granted());
371 #endif // NDEBUG
372  return block_index;
373  } else {
374  // The block is never observed. reuse
375  adaptor_.cancel_new_block(block_index);
376  return 0;
377  }
378  }
bool retry_async_rw_reader(McsRwLock *lock, McsBlockIndex block_index)
uint32_t McsBlockIndex
Index in thread-local MCS block.
Definition: xct_id.hpp:153
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72

Here is the call graph for this function:

template<typename ADAPTOR >
McsBlockIndex foedus::xct::McsImpl< ADAPTOR, McsRwSimpleBlock >::acquire_try_rw_writer ( McsRwLock lock)
inline

Definition at line 350 of file xct_mcs_impl.cpp.

References foedus::xct::McsImpl< ADAPTOR, RW_BLOCK >::retry_async_rw_writer().

350  {
351  McsBlockIndex block_index = adaptor_.issue_new_block();
352  bool success = retry_async_rw_writer(lock, block_index);
353  if (success) {
354  return block_index;
355  } else {
356  // The block is never observed. reuse
357  adaptor_.cancel_new_block(block_index);
358  return 0;
359  }
360  }
uint32_t McsBlockIndex
Index in thread-local MCS block.
Definition: xct_id.hpp:153
bool retry_async_rw_writer(McsRwLock *lock, McsBlockIndex block_index)

Here is the call graph for this function:

template<typename ADAPTOR >
McsBlockIndex foedus::xct::McsImpl< ADAPTOR, McsRwSimpleBlock >::acquire_unconditional_rw_reader ( McsRwLock mcs_rw_lock)
inline

Definition at line 380 of file xct_mcs_impl.cpp.

References ASSERT_ND, foedus::xct::McsRwLock::increment_nreaders(), foedus::xct::spin_until(), foedus::xct::McsRwLock::tail_, and foedus::xct::McsRwLock::to_tail_int().

380  {
381  ASSERT_ND(adaptor_.get_cur_block() < 0xFFFFU);
382  const thread::ThreadId id = adaptor_.get_my_id();
383  const McsBlockIndex block_index = adaptor_.issue_new_block();
384  ASSERT_ND(block_index > 0);
385  ASSERT_ND(sizeof(McsRwSimpleBlock) == sizeof(McsWwBlock));
386  auto* my_block = adaptor_.get_rw_my_block(block_index);
387 
388  // So I'm a reader
389  my_block->init_reader();
390  ASSERT_ND(my_block->is_blocked() && my_block->is_reader());
391  ASSERT_ND(!my_block->has_successor());
392  ASSERT_ND(my_block->successor_block_index_ == 0);
393 
394  // Now ready to XCHG
395  uint32_t tail_desired = McsRwLock::to_tail_int(id, block_index);
396  uint32_t* tail_address = &(mcs_rw_lock->tail_);
397  uint32_t pred_tail_int = assorted::raw_atomic_exchange<uint32_t>(tail_address, tail_desired);
398 
399  if (pred_tail_int == 0) {
400  mcs_rw_lock->increment_nreaders();
401  my_block->unblock(); // reader successors will know they don't need to wait
402  } else {
403  // See if the predecessor is a reader; if so, if it already acquired the lock.
404  auto* pred_block = adaptor_.dereference_rw_tail_block(pred_tail_int);
405  uint16_t* pred_state_address = &pred_block->self_.data_;
406  uint16_t pred_state_expected = pred_block->make_blocked_with_no_successor_state();
407  uint16_t pred_state_desired = pred_block->make_blocked_with_reader_successor_state();
408  if (!pred_block->is_reader() || assorted::raw_atomic_compare_exchange_strong<uint16_t>(
409  pred_state_address,
410  &pred_state_expected,
411  pred_state_desired)) {
412  // Predecessor is a writer or a waiting reader. The successor class field and the
413  // blocked state in pred_block are separated, so we can blindly set_successor().
414  pred_block->set_successor_next_only(id, block_index);
415  spin_until([my_block]{ return my_block->is_granted(); });
416  } else {
417  // Join the active, reader predecessor
418  ASSERT_ND(!pred_block->is_blocked());
419  mcs_rw_lock->increment_nreaders();
420  pred_block->set_successor_next_only(id, block_index);
421  my_block->unblock();
422  }
423  }
424  finalize_acquire_reader_simple(mcs_rw_lock, my_block);
425  ASSERT_ND(my_block->is_finalized());
426  return block_index;
427  }
static uint32_t to_tail_int(thread::ThreadId tail_waiter, McsBlockIndex tail_waiter_block)
Definition: xct_id.hpp:848
void spin_until(COND spin_until_cond)
uint32_t McsBlockIndex
Index in thread-local MCS block.
Definition: xct_id.hpp:153
uint16_t ThreadId
Typedef for a global ID of Thread (core), which is unique across NUMA nodes.
Definition: thread_id.hpp:80
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72

Here is the call graph for this function:

template<typename ADAPTOR >
McsBlockIndex foedus::xct::McsImpl< ADAPTOR, McsRwSimpleBlock >::acquire_unconditional_rw_writer ( McsRwLock mcs_rw_lock)
inline

Definition at line 476 of file xct_mcs_impl.cpp.

References ASSERT_ND, foedus::xct::McsRwLock::get_next_writer(), foedus::xct::McsRwLock::kNextWriterNone, foedus::xct::McsRwLock::next_writer_, foedus::xct::McsRwLock::nreaders(), foedus::xct::spin_until(), foedus::xct::McsRwLock::tail_, and foedus::xct::McsRwLock::to_tail_int().

477  {
478  const thread::ThreadId id = adaptor_.get_my_id();
479  const McsBlockIndex block_index = adaptor_.issue_new_block();
480  ASSERT_ND(adaptor_.get_cur_block() < 0xFFFFU);
481  ASSERT_ND(block_index > 0);
482  ASSERT_ND(sizeof(McsRwSimpleBlock) == sizeof(McsWwBlock));
483  auto* my_block = adaptor_.get_rw_my_block(block_index);
484 
485  my_block->init_writer();
486  ASSERT_ND(my_block->is_blocked() && !my_block->is_reader());
487  ASSERT_ND(!my_block->has_successor());
488  ASSERT_ND(my_block->successor_block_index_ == 0);
489 
490  // Now ready to XCHG
491  uint32_t tail_desired = McsRwLock::to_tail_int(id, block_index);
492  uint32_t* tail_address = &(mcs_rw_lock->tail_);
493  uint32_t pred_tail_int = assorted::raw_atomic_exchange<uint32_t>(tail_address, tail_desired);
494  ASSERT_ND(pred_tail_int != tail_desired);
495  thread::ThreadId old_next_writer = 0xFFFFU;
496  if (pred_tail_int == 0) {
497  ASSERT_ND(mcs_rw_lock->get_next_writer() == McsRwLock::kNextWriterNone);
498  assorted::raw_atomic_exchange<thread::ThreadId>(&mcs_rw_lock->next_writer_, id);
499  if (mcs_rw_lock->nreaders() == 0) {
500  old_next_writer = assorted::raw_atomic_exchange<thread::ThreadId>(
501  &mcs_rw_lock->next_writer_,
503  if (old_next_writer == id) {
504  my_block->unblock();
505  return block_index;
506  }
507  }
508  } else {
509  auto* pred_block = adaptor_.dereference_rw_tail_block(pred_tail_int);
510  pred_block->set_successor_class_writer();
511  pred_block->set_successor_next_only(id, block_index);
512  }
513  spin_until([my_block]{ return my_block->is_granted(); });
514  return block_index;
515  }
static uint32_t to_tail_int(thread::ThreadId tail_waiter, McsBlockIndex tail_waiter_block)
Definition: xct_id.hpp:848
void spin_until(COND spin_until_cond)
uint32_t McsBlockIndex
Index in thread-local MCS block.
Definition: xct_id.hpp:153
uint16_t ThreadId
Typedef for a global ID of Thread (core), which is unique across NUMA nodes.
Definition: thread_id.hpp:80
static const thread::ThreadId kNextWriterNone
Definition: xct_id.hpp:796
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72

Here is the call graph for this function:

template<typename ADAPTOR >
void foedus::xct::McsImpl< ADAPTOR, McsRwSimpleBlock >::cancel_async_rw_reader ( McsRwLock ,
McsBlockIndex   
)
inline

Definition at line 643 of file xct_mcs_impl.cpp.

643  {
644  // In simple version, we don't actually have any mechanism to retry.
645  // so, we don't have to do any cancel, either. No-op.
646  }
template<typename ADAPTOR >
void foedus::xct::McsImpl< ADAPTOR, McsRwSimpleBlock >::cancel_async_rw_writer ( McsRwLock ,
McsBlockIndex   
)
inline

Definition at line 647 of file xct_mcs_impl.cpp.

647  {
648  }
template<typename ADAPTOR >
static bool foedus::xct::McsImpl< ADAPTOR, McsRwSimpleBlock >::does_support_try_rw_reader ( )
inlinestatic

Definition at line 362 of file xct_mcs_impl.cpp.

362 { return false; }
template<typename ADAPTOR >
void foedus::xct::McsImpl< ADAPTOR, McsRwSimpleBlock >::release_rw_reader ( McsRwLock mcs_rw_lock,
McsBlockIndex  block_index 
)
inline

Definition at line 429 of file xct_mcs_impl.cpp.

References ASSERT_ND, foedus::xct::McsRwLock::decrement_nreaders(), foedus::xct::McsRwSimpleBlock::is_blocked(), foedus::xct::McsRwSimpleBlock::is_reader(), foedus::xct::McsRwLock::kNextWriterNone, foedus::xct::McsRwLock::next_writer_, foedus::xct::McsRwLock::nreaders(), foedus::xct::spin_until(), foedus::xct::McsRwLock::tail_, foedus::xct::McsRwLock::to_tail_int(), and foedus::xct::McsRwSimpleBlock::unblock().

431  {
432  const thread::ThreadId id = adaptor_.get_my_id();
433  ASSERT_ND(block_index > 0);
434  ASSERT_ND(adaptor_.get_cur_block() >= block_index);
435  McsRwSimpleBlock* my_block = adaptor_.get_rw_my_block(block_index);
436  ASSERT_ND(my_block->is_finalized());
437  // Make sure there is really no successor or wait for it
438  uint32_t* tail_address = &mcs_rw_lock->tail_;
439  uint32_t expected = McsRwLock::to_tail_int(id, block_index);
440  if (my_block->successor_is_ready() ||
441  !assorted::raw_atomic_compare_exchange_strong<uint32_t>(tail_address, &expected, 0)) {
442  // Have to wait for the successor to install itself after me
443  // Don't check for curr_block->has_successor()! It only tells whether the state bit
444  // is set, not whether successor_thread_id_ and successor_block_index_ are set.
445  // But remember to skip trying readers who failed.
446  spin_until([my_block]{ return my_block->successor_is_ready(); });
447  if (my_block->has_writer_successor()) {
448  assorted::raw_atomic_exchange<thread::ThreadId>(
449  &mcs_rw_lock->next_writer_,
450  my_block->successor_thread_id_);
451  }
452  }
453 
454  if (mcs_rw_lock->decrement_nreaders() == 1) {
455  // I'm the last active reader
456  thread::ThreadId next_writer
457  = assorted::atomic_load_acquire<thread::ThreadId>(&mcs_rw_lock->next_writer_);
458  if (next_writer != McsRwLock::kNextWriterNone &&
459  mcs_rw_lock->nreaders() == 0 &&
460  assorted::raw_atomic_compare_exchange_strong<thread::ThreadId>(
461  &mcs_rw_lock->next_writer_,
462  &next_writer,
464  // I have a waiting writer, wake it up
465  // Assuming a thread can wait for one and only one MCS lock at any instant
466  // before starting to acquire the next.
467  McsBlockIndex next_cur_block = adaptor_.get_other_cur_block(next_writer);
468  McsRwSimpleBlock *writer_block = adaptor_.get_rw_other_block(next_writer, next_cur_block);
469  ASSERT_ND(writer_block->is_blocked());
470  ASSERT_ND(!writer_block->is_reader());
471  writer_block->unblock();
472  }
473  }
474  }
static uint32_t to_tail_int(thread::ThreadId tail_waiter, McsBlockIndex tail_waiter_block)
Definition: xct_id.hpp:848
void spin_until(COND spin_until_cond)
uint32_t McsBlockIndex
Index in thread-local MCS block.
Definition: xct_id.hpp:153
uint16_t ThreadId
Typedef for a global ID of Thread (core), which is unique across NUMA nodes.
Definition: thread_id.hpp:80
static const thread::ThreadId kNextWriterNone
Definition: xct_id.hpp:796
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72

Here is the call graph for this function:

template<typename ADAPTOR >
void foedus::xct::McsImpl< ADAPTOR, McsRwSimpleBlock >::release_rw_writer ( McsRwLock mcs_rw_lock,
McsBlockIndex  block_index 
)
inline

Definition at line 517 of file xct_mcs_impl.cpp.

References ASSERT_ND, foedus::xct::McsRwLock::increment_nreaders(), foedus::xct::spin_until(), foedus::xct::McsRwLock::tail_, foedus::xct::McsRwLock::to_tail_int(), and UNLIKELY.

519  {
520  const thread::ThreadId id = adaptor_.get_my_id();
521  ASSERT_ND(block_index > 0);
522  ASSERT_ND(adaptor_.get_cur_block() >= block_index);
523  auto* my_block = adaptor_.get_rw_my_block(block_index);
524  uint32_t expected = McsRwLock::to_tail_int(id, block_index);
525  uint32_t* tail_address = &mcs_rw_lock->tail_;
526  if (my_block->successor_is_ready() ||
527  !assorted::raw_atomic_compare_exchange_strong<uint32_t>(tail_address, &expected, 0)) {
528  if (UNLIKELY(!my_block->successor_is_ready())) {
529  spin_until([my_block]{ return my_block->successor_is_ready(); });
530  }
531  ASSERT_ND(my_block->successor_is_ready());
532  auto* successor_block = adaptor_.get_rw_other_block(
533  my_block->successor_thread_id_,
534  my_block->successor_block_index_);
535  ASSERT_ND(successor_block->is_blocked());
536  if (successor_block->is_reader()) {
537  mcs_rw_lock->increment_nreaders();
538  }
539  successor_block->unblock();
540  }
541  }
static uint32_t to_tail_int(thread::ThreadId tail_waiter, McsBlockIndex tail_waiter_block)
Definition: xct_id.hpp:848
void spin_until(COND spin_until_cond)
uint16_t ThreadId
Typedef for a global ID of Thread (core), which is unique across NUMA nodes.
Definition: thread_id.hpp:80
#define UNLIKELY(x)
Hints that x is highly likely false.
Definition: compiler.hpp:104
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72

Here is the call graph for this function:

template<typename ADAPTOR >
bool foedus::xct::McsImpl< ADAPTOR, McsRwSimpleBlock >::retry_async_rw_reader ( McsRwLock lock,
McsBlockIndex  block_index 
)
inline

Definition at line 556 of file xct_mcs_impl.cpp.

References foedus::xct::McsRwLock::increment_nreaders(), foedus::xct::McsRwLock::tail_, and foedus::xct::McsRwLock::to_tail_int().

558  {
559  /* The following turns out to be unsafe because there might be writers
560  who are not yet detected by the pred-readers. We are still trying
561  to find a way without resorting to extended version.. but for now disabled.
562  does_support_try_rw_reader() returns false for this reason.
563  const thread::ThreadId id = adaptor_.get_my_id();
564  // take a look at the whole lock word, and cas if it's a reader or null
565  uint64_t lock_word
566  = assorted::atomic_load_acquire<uint64_t>(reinterpret_cast<uint64_t*>(lock));
567  McsRwLock ll;
568  std::memcpy(&ll, &lock_word, sizeof(ll));
569  // Note: it's tempting to put this whole function under an infinite retry
570  // loop and only break when this condition is true. That works fine with
571  // a single lock, but might cause deadlocks and making this try version
572  // not really a try, consider this example with two locks A and B.
573  //
574  // Lock: requester 1 -> requester 2
575  //
576  // A: T1 holding as writer -> T2 waiting unconditionally as a writer in canonical mode
577  // B: T2 holding as writer -> T1 trying as a reader in non-canonical mode
578  //
579  // In this case, T1 always sees next_writer=none because T2 consumed it when it got the
580  // lock, and the below CAS fails because now B.tail is T2, a writer. T1 would stay in
581  // the loop forever...
582  if (ll.next_writer_ != McsRwLock::kNextWriterNone) {
583  return false;
584  }
585  McsRwSimpleBlock* block = nullptr;
586  if (ll.tail_) {
587  block = adaptor_.dereference_rw_tail_block(ll.tail_);
588  }
589  if (ll.tail_ == 0 || (block->is_granted() && block->is_reader())) {
590  ll.increment_nreaders();
591  ll.tail_ = McsRwLock::to_tail_int(id, block_index);
592  uint64_t desired = *reinterpret_cast<uint64_t*>(&ll);
593  auto* my_block = adaptor_.get_rw_my_block(block_index);
594  my_block->init_reader();
595 
596  if (assorted::raw_atomic_compare_exchange_weak<uint64_t>(
597  reinterpret_cast<uint64_t*>(lock), &lock_word, desired)) {
598  if (block) {
599  block->set_successor_next_only(id, block_index);
600  }
601  my_block->unblock();
602  finalize_acquire_reader_simple(lock, my_block);
603  return true;
604  }
605  }
606  */
607 
608  // Instead, a best-effort impl here, which is basically same as the writer-case below.
609  // This returns false even if there only are readers.
610  const thread::ThreadId id = adaptor_.get_my_id();
611  auto* my_block = adaptor_.get_rw_my_block(block_index);
612  my_block->init_reader();
613 
614  McsRwLock tmp;
615  uint64_t expected = *reinterpret_cast<uint64_t*>(&tmp);
616  McsRwLock tmp2;
617  tmp2.increment_nreaders();
618  tmp2.tail_ = McsRwLock::to_tail_int(id, block_index);
619  uint64_t desired = *reinterpret_cast<uint64_t*>(&tmp2);
620  if (assorted::raw_atomic_compare_exchange_weak<uint64_t>(
621  reinterpret_cast<uint64_t*>(lock), &expected, desired)) {
622  my_block->unblock();
623  finalize_acquire_reader_simple(lock, my_block);
624  return true;
625  }
626  return false;
627  }
static uint32_t to_tail_int(thread::ThreadId tail_waiter, McsBlockIndex tail_waiter_block)
Definition: xct_id.hpp:848
uint16_t ThreadId
Typedef for a global ID of Thread (core), which is unique across NUMA nodes.
Definition: thread_id.hpp:80

Here is the call graph for this function:

template<typename ADAPTOR >
bool foedus::xct::McsImpl< ADAPTOR, McsRwSimpleBlock >::retry_async_rw_writer ( McsRwLock lock,
McsBlockIndex  block_index 
)
inline

Definition at line 628 of file xct_mcs_impl.cpp.

References foedus::xct::McsRwLock::tail_, and foedus::xct::McsRwLock::to_tail_int().

628  {
629  const thread::ThreadId id = adaptor_.get_my_id();
630  auto* my_block = adaptor_.get_rw_my_block(block_index);
631  my_block->init_writer();
632 
633  McsRwLock tmp;
634  uint64_t expected = *reinterpret_cast<uint64_t*>(&tmp);
635  McsRwLock tmp2;
636  tmp2.tail_ = McsRwLock::to_tail_int(id, block_index);
637  uint64_t desired = *reinterpret_cast<uint64_t*>(&tmp2);
638  my_block->unblock();
639  return assorted::raw_atomic_compare_exchange_weak<uint64_t>(
640  reinterpret_cast<uint64_t*>(lock), &expected, desired);
641  }
static uint32_t to_tail_int(thread::ThreadId tail_waiter, McsBlockIndex tail_waiter_block)
Definition: xct_id.hpp:848
uint16_t ThreadId
Typedef for a global ID of Thread (core), which is unique across NUMA nodes.
Definition: thread_id.hpp:80

Here is the call graph for this function:


The documentation for this class was generated from the following file: