18 #ifndef FOEDUS_XCT_XCT_MCS_ADAPTER_IMPL_HPP_
19 #define FOEDUS_XCT_XCT_MCS_ADAPTER_IMPL_HPP_
56 template<
typename RW_BLOCK>
128 template<
typename RW_BLOCK>
139 void init(uint32_t max_block_count) {
152 if (m.lock_id_ == ulockid) {
153 return m.block_index_;
187 page_id.
set(node_id, in_node_index);
205 template<
typename RW_BLOCK>
210 uint32_t threads_per_node,
211 uint32_t max_block_count,
212 uint32_t pages_per_node) {
216 for (uint32_t t = 0; t < threads_per_node; ++t) {
226 for (uint32_t i = 0; i < pages_per_node; ++i) {
227 pages_[i].
init(dummy_storage_id, node_id, i);
246 template<
typename RW_BLOCK>
251 uint32_t threads_per_node,
252 uint32_t max_block_count,
253 uint32_t max_lock_count) {
263 for (uint32_t n = 0; n < nodes; ++n) {
276 return page->
tid_ + lock_in_page_index;
285 return page->
ww_ + lock_in_page_index;
291 std::vector< McsMockNode<RW_BLOCK> >
nodes_;
304 template<
typename RW_BLOCK>
314 me_(context->nodes_[numa_node_].threads_.data() + local_ordinal_) {}
319 ASSERT_ND(me_->mcs_block_current_ < 0xFFFFU);
320 return ++me_->mcs_block_current_;
323 ASSERT_ND(me_->mcs_block_current_ == the_block);
324 --me_->mcs_block_current_;
329 std::atomic<bool>*
me_waiting() {
return &me_->mcs_waiting_; }
332 ASSERT_ND(index <= me_->mcs_block_current_);
333 return me_->mcs_ww_blocks_.data() + index;
336 ASSERT_ND(index <= me_->mcs_block_current_);
337 return me_->mcs_rw_blocks_.data() + index;
342 ASSERT_ND(node < context_->nodes_.size());
344 ASSERT_ND(ordinal < context_->nodes_[node].threads_.size());
345 return context_->nodes_[node].threads_.data() + ordinal;
357 ASSERT_ND(index <= other->mcs_block_current_);
362 ASSERT_ND(index <= other->mcs_block_current_);
368 ASSERT_ND(index <= other->mcs_block_current_);
373 tail_tmp.
tail_ = tail_int;
390 for (uint32_t i = 0; i < me_->mcs_rw_async_mapping_current_; ++i) {
391 if (me_->mcs_rw_async_mappings_[i].lock_id_ == ulockid) {
397 auto index = me_->mcs_rw_async_mapping_current_++;
399 me_->mcs_rw_async_mappings_[index].lock_id_ = ulockid;
400 me_->mcs_rw_async_mappings_[index].block_index_ = block_index;
403 ASSERT_ND(me_->mcs_rw_async_mapping_current_);
405 for (uint32_t i = 0; i < me_->mcs_rw_async_mapping_current_; ++i) {
406 if (me_->mcs_rw_async_mappings_[i].lock_id_ == lock_id) {
408 --me_->mcs_rw_async_mapping_current_;
423 static_assert(
sizeof(McsMockDataPage) ==
storage::kPageSize,
"McsMockDataPage not in kPageSize?");
427 #endif // FOEDUS_XCT_XCT_MCS_ADAPTER_IMPL_HPP_
void reset() __attribute__((always_inline))
used only while page initialization
std::vector< RW_BLOCK > mcs_rw_blocks_
void remove_rw_async_mapping(xct::McsRwLock *lock)
char header_pad_[kMcsMockDataPageHeaderPad]
uint8_t ThreadLocalOrdinal
Typedef for a local ID of Thread (core), which is NOT unique across NUMA nodes.
RW_BLOCK * dereference_rw_tail_block(uint32_t tail_int)
same as above, but receives a combined int in For McsRwLock
Definitions of IDs in this package and a few related constant values.
void cancel_new_block(McsBlockIndex the_block)
Cancels the most recent issue_new_block() call, decrementing the counter.
uint32_t mcs_block_current_
memory::AlignedMemory page_memory_
RW_BLOCK ThisRwBlock
ThisRwBlock shall indicate the block type.
RW_BLOCK * get_rw_other_block(thread::ThreadId id, McsBlockIndex index)
Dereference other thread's block index for reader-writer locks.
memory::GlobalVolatilePageResolver page_memory_resolver_
All locks managed by this objects are placed in these memory regions.
uint32_t StorageId
Unique ID for storage.
Root package of FOEDUS (Fast Optimistic Engine for Data Unification Services).
std::atomic< bool > * me_waiting()
void init(storage::StorageId dummy_storage_id, uint16_t node_id, uint32_t in_node_index)
Analogous to one thread-group/socket/node.
std::atomic< bool > * other_waiting(thread::ThreadId id)
Typedefs of ID types used in thread package.
std::vector< McsWwBlock > mcs_ww_blocks_
McsBlockIndex get_other_cur_block(thread::ThreadId id)
McsRwExtendedBlock * get_rw_other_async_block(thread::ThreadId id, xct::McsRwLock *lock)
Dereference other thread's block index for extended rwlock.
Defines an adapter template interface for our MCS lock classes.
ThreadLocalOrdinal decompose_numa_local_ordinal(ThreadId global_id)
Extracts local ordinal from the given globally unique ID of Thread (core).
Represents a pointer to a volatile page with modification count for preventing ABA.
RW_BLOCK * get_rw_other_block(uint32_t block_int)
constexpr uint32_t kMcsMockDataPageLocksPerPage
constexpr uint32_t kMcsMockDataPageHeaderPad
Implements McsAdaptorConcept.
void add_rw_async_mapping(xct::McsRwLock *lock, xct::McsBlockIndex block_index)
std::atomic< bool > * other_waiting(thread::ThreadId id)
Returns the bool var on whether other thread is waiting for some lock.
McsWwBlock * get_ww_other_block(thread::ThreadId id, McsBlockIndex index)
void init(storage::StorageId dummy_storage_id, uint16_t node_id, uint32_t threads_per_node, uint32_t max_block_count, uint32_t pages_per_node)
McsWwBlock * get_ww_my_block(McsBlockIndex index)
Dereference my block index for exclusive locks.
McsBlockIndex get_tail_waiter_block() const
void cancel_new_block(McsBlockIndex the_block)
UniversalLockId rw_lock_to_universal_lock_id(const memory::GlobalVolatilePageResolver &resolver, McsRwLock *lock)
McsBlockIndex get_cur_block() const
void add_rw_async_mapping(xct::McsRwLock *lock, xct::McsBlockIndex block_index)
The MCS reader-writer lock variant of LockableXctId.
std::vector< xct::McsRwAsyncMapping > mcs_rw_async_mappings_
char filler_[kMcsMockDataPageFiller]
thread::ThreadGroupId get_my_numa_node() const
Returns group-Id of this thread.
McsMockThread< RW_BLOCK > * get_other_thread(thread::ThreadId id)
uint32_t max_block_count_
Pre-allocated MCS block for WW-locks.
Definitions of IDs in this package and a few related constant values.
An exclusive-only (WW) MCS lock data structure.
Pre-allocated MCS block for extended version of RW-locks.
RW_BLOCK * get_rw_my_block(McsBlockIndex index)
Dereference my block index for reader-writer locks.
void reset() __attribute__((always_inline))
used only while page initialization
McsMockThread(McsMockThread &&rhs)
RwLockableXctId * get_rw_lock_address(uint16_t node_id, uint64_t lock_index)
constexpr uint32_t kMcsMockDataPageFiller
Just a marker to denote that the memory region represents a data page.
constexpr uint32_t kMcsMockDataPageHeaderSize
McsWwLock ww_[kMcsMockDataPageLocksPerPage]
void set(uint8_t numa_node, memory::PagePoolOffset offset)
std::vector< McsMockNode< RW_BLOCK > > nodes_
ThreadGroupId decompose_numa_node(ThreadId global_id)
Extracts NUMA node ID from the given globally unique ID of Thread (core).
thread::ThreadId get_my_id() const
Returns thread-Id of this thread.
void * get_block() const
Returns the memory block.
A dummy implementation that provides McsAdaptorConcept for testing.
void init(uint32_t max_block_count)
McsBlockIndex issue_new_block()
std::vector< McsMockThread< RW_BLOCK > > threads_
uint32_t mcs_rw_async_mapping_current_
void init(storage::StorageId dummy_storage_id, uint32_t nodes, uint32_t threads_per_node, uint32_t max_block_count, uint32_t max_lock_count)
McsMockDataPage * pages_
Locks assigned to this node are stored in these memory.
uint32_t McsBlockIndex
Index in thread-local MCS block.
RwLockableXctId tid_[kMcsMockDataPageLocksPerPage]
uint16_t ThreadId
Typedef for a global ID of Thread (core), which is unique across NUMA nodes.
Represents one memory block aligned to actual OS/hardware pages.
McsWwBlock * get_ww_my_block(McsBlockIndex index)
std::atomic< bool > mcs_waiting_
thread::ThreadId get_my_id() const
McsBlockIndex get_cur_block() const
A dummy page layout to store RwLockableXctId.
RW_BLOCK * get_rw_other_block(thread::ThreadId id, McsBlockIndex index)
McsWwBlock * get_ww_other_block(thread::ThreadId id, McsBlockIndex index)
Dereference other thread's block index for exclusive locks.
xct::McsBlockIndex get_mcs_rw_async_block_index(const memory::GlobalVolatilePageResolver &resolver, xct::McsRwLock *lock)
Analogous to the entire engine.
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
uint32_t max_block_count_
thread::ThreadGroupId get_my_numa_node() const
storage::PageHeader header_
McsBlockIndex get_other_cur_block(thread::ThreadId id)
uint8_t ThreadGroupId
Typedef for an ID of ThreadGroup (NUMA node).
RW_BLOCK * get_rw_my_block(McsBlockIndex index)
void alloc_onnode(uint64_t size, uint64_t alignment, int numa_node) noexcept
Short for alloc(kNumaAllocOnnode)
const uint16_t kPageSize
A constant defining the page size (in bytes) of both snapshot pages and volatile pages.
McsWwLock * get_ww_lock_address(uint16_t node_id, uint64_t lock_index)
An MCS reader-writer lock data structure.
McsBlockIndex issue_new_block()
Issues a new queue node of this thread and returns its block index.
McsRwExtendedBlock * get_rw_other_async_block(thread::ThreadId id, xct::McsRwLock *lock)
McsMockAdaptor(thread::ThreadId id, McsMockContext< RW_BLOCK > *context)
std::atomic< bool > * me_waiting()
Returns the atomic bool var on whether current thread is waiting for some lock.
const UniversalLockId kNullUniversalLockId
This never points to a valid lock, and also evaluates less than any vaild alocks. ...
thread::ThreadId get_tail_waiter() const
RW_BLOCK * dereference_rw_tail_block(uint32_t tail_int)
bool is_null() const
Returns if this object doesn't hold a valid memory block.