18 #ifndef FOEDUS_XCT_RETROSPECTIVE_LOCK_LIST_HPP_
19 #define FOEDUS_XCT_RETROSPECTIVE_LOCK_LIST_HPP_
155 universal_lock_id_ = id;
157 preferred_mode_ = preferred_mode;
158 taken_mode_ = taken_mode;
259 return volatile_page_resolver_;
288 ++last_active_entry_;
289 ASSERT_ND(last_active_entry_ < capacity_);
290 return last_active_entry_;
376 return volatile_page_resolver_;
399 if (array_[pos].is_locked()) {
408 ASSERT_ND(correct == last_locked_entry_);
447 template<
typename MCS_RW_IMPL>
458 template<
typename MCS_RW_IMPL>
461 template<
typename MCS_RW_IMPL>
463 template<
typename MCS_RW_IMPL>
465 template<
typename MCS_RW_IMPL>
468 template<
typename MCS_RW_IMPL>
472 template<
typename MCS_RW_IMPL>
481 template<
typename MCS_RW_IMPL>
484 template<
typename MCS_RW_IMPL>
492 template<
typename MCS_RW_IMPL>
494 template<
typename MCS_RW_IMPL>
527 ++last_active_entry_;
528 ASSERT_ND(last_active_entry_ < capacity_);
529 return last_active_entry_;
532 void release_all_after_debuglog(
533 uint32_t released_read_locks,
534 uint32_t released_write_locks,
535 uint32_t already_released_locks,
536 uint32_t canceled_async_read_locks,
537 uint32_t canceled_async_write_locks)
const;
539 void giveup_all_after_debuglog(
540 uint32_t givenup_read_locks,
541 uint32_t givenup_write_locks,
542 uint32_t givenup_upgrades,
543 uint32_t already_enough_locks,
544 uint32_t canceled_async_read_locks,
545 uint32_t canceled_async_write_locks)
const;
568 template<
typename MCS_RW_IMPL>
571 MCS_RW_IMPL* mcs_rw_impl) {
588 mcs_rw_impl->release_rw_reader(lock_addr, lock_entry->
mcs_block_);
603 lock_entry->
mcs_block_ = mcs_rw_impl->acquire_unconditional_rw_writer(lock_addr);
606 lock_entry->
mcs_block_ = mcs_rw_impl->acquire_unconditional_rw_reader(lock_addr);
609 last_locked_entry_ = pos;
619 lock_entry->
mcs_block_ = mcs_rw_impl->acquire_try_rw_writer(lock_addr);
622 lock_entry->
mcs_block_ = mcs_rw_impl->acquire_try_rw_reader(lock_addr);
635 template<
typename MCS_RW_IMPL>
638 MCS_RW_IMPL* mcs_rw_impl) {
640 ASSERT_ND(upto_pos <= last_active_entry_);
648 template<
typename MCS_RW_IMPL>
651 MCS_RW_IMPL* mcs_rw_impl) {
668 mcs_rw_impl->release_rw_reader(lock_addr, lock_entry->
mcs_block_);
681 async_ret = mcs_rw_impl->acquire_async_rw_writer(lock_addr);
684 async_ret = mcs_rw_impl->acquire_async_rw_reader(lock_addr);
695 template<
typename MCS_RW_IMPL>
698 MCS_RW_IMPL* mcs_rw_impl) {
707 bool acquired =
false;
709 acquired = mcs_rw_impl->retry_async_rw_writer(lock_addr, lock_entry->
mcs_block_);
712 acquired = mcs_rw_impl->retry_async_rw_reader(lock_addr, lock_entry->
mcs_block_);
717 last_locked_entry_ = std::max(last_locked_entry_, pos);
723 template<
typename MCS_RW_IMPL>
726 MCS_RW_IMPL* mcs_rw_impl) {
733 mcs_rw_impl->cancel_async_rw_reader(lock_addr, lock_entry->
mcs_block_);
736 mcs_rw_impl->cancel_async_rw_writer(lock_addr, lock_entry->
mcs_block_);
741 template<
typename MCS_RW_IMPL>
744 MCS_RW_IMPL* mcs_rw_impl) {
746 ASSERT_ND(upto_pos <= last_active_entry_);
752 template<
typename MCS_RW_IMPL>
757 uint32_t released_read_locks = 0;
758 uint32_t released_write_locks = 0;
759 uint32_t already_released_locks = 0;
760 uint32_t canceled_async_read_locks = 0;
761 uint32_t canceled_async_write_locks = 0;
765 if (entry->universal_lock_id_ <= address) {
766 if (entry->is_locked()) {
767 new_last_locked_entry = entry - array_;
771 if (entry->is_locked()) {
773 mcs_rw_impl->release_rw_reader(entry->lock_->get_key_lock(), entry->mcs_block_);
774 ++released_read_locks;
777 mcs_rw_impl->release_rw_writer(entry->lock_->get_key_lock(), entry->mcs_block_);
778 ++released_write_locks;
780 entry->mcs_block_ = 0;
782 }
else if (entry->mcs_block_) {
787 if (entry->preferred_mode_ ==
kReadLock) {
788 mcs_rw_impl->cancel_async_rw_reader(entry->lock_->get_key_lock(), entry->mcs_block_);
789 ++canceled_async_read_locks;
792 mcs_rw_impl->cancel_async_rw_writer(entry->lock_->get_key_lock(), entry->mcs_block_);
793 ++canceled_async_write_locks;
795 entry->mcs_block_ = 0;
798 ++already_released_locks;
802 last_locked_entry_ = new_last_locked_entry;
806 release_all_after_debuglog(
808 released_write_locks,
809 already_released_locks,
810 canceled_async_read_locks,
811 canceled_async_write_locks);
814 template<
typename MCS_RW_IMPL>
817 MCS_RW_IMPL* mcs_rw_impl) {
821 release_all_after<MCS_RW_IMPL>(address - 1U, mcs_rw_impl);
825 template<
typename MCS_RW_IMPL>
828 uint32_t givenup_read_locks = 0;
829 uint32_t givenup_write_locks = 0;
830 uint32_t givenup_upgrades = 0;
831 uint32_t already_enough_locks = 0;
832 uint32_t canceled_async_read_locks = 0;
833 uint32_t canceled_async_write_locks = 0;
836 if (entry->universal_lock_id_ <= address) {
839 if (entry->preferred_mode_ ==
kNoLock) {
842 if (entry->is_enough()) {
843 ++already_enough_locks;
847 if (entry->is_locked()) {
851 entry->preferred_mode_ = entry->taken_mode_;
852 }
else if (entry->mcs_block_) {
853 if (entry->preferred_mode_ ==
kReadLock) {
854 mcs_rw_impl->cancel_async_rw_reader(entry->lock_->get_key_lock(), entry->mcs_block_);
855 ++canceled_async_read_locks;
858 mcs_rw_impl->cancel_async_rw_writer(entry->lock_->get_key_lock(), entry->mcs_block_);
859 ++canceled_async_write_locks;
861 entry->mcs_block_ = 0;
862 entry->preferred_mode_ =
kNoLock;
865 if (entry->preferred_mode_ ==
kReadLock) {
866 ++givenup_read_locks;
869 ++givenup_write_locks;
871 entry->preferred_mode_ =
kNoLock;
876 giveup_all_after_debuglog(
880 already_enough_locks,
881 canceled_async_read_locks,
882 canceled_async_write_locks);
885 template<
typename MCS_RW_IMPL>
888 MCS_RW_IMPL* mcs_rw_impl) {
892 giveup_all_after<MCS_RW_IMPL>(address - 1U, mcs_rw_impl);
902 template<
typename LOCK_LIST,
typename LOCK_ENTRY>
904 const LOCK_LIST& list,
911 const LOCK_ENTRY* array = list.get_array();
914 if (array[last_active_entry].universal_lock_id_ == lock) {
915 return last_active_entry;
916 }
else if (array[last_active_entry].universal_lock_id_ < lock) {
917 return last_active_entry + 1U;
923 array + last_active_entry + 1U,
925 typename LOCK_ENTRY::LessThan())
930 ASSERT_ND(array[pos].universal_lock_id_ >= lock);
931 ASSERT_ND(pos == 1U || array[pos - 1U].universal_lock_id_ < lock);
935 template<
typename LOCK_LIST,
typename LOCK_ENTRY>
937 const LOCK_LIST& list,
942 const LOCK_ENTRY* array = list.get_array();
943 if (array[pos].universal_lock_id_ == lock) {
953 #endif // FOEDUS_XCT_RETROSPECTIVE_LOCK_LIST_HPP_
const LockEntry * get_entry(LockListPosition pos) const
taken_mode_: we took a read-lock, not write-lock yet.
UniversalLockId universal_lock_id_
Used to order locks in canonical order.
void release_all_at_and_after(UniversalLockId address, MCS_RW_IMPL *mcs_rw_impl)
same as release_all_after(address - 1)
LockListPosition binary_search(UniversalLockId lock) const
Analogous to std::binary_search() for the given lock.
const LockEntry * cbegin() const
void try_async_multiple_locks(LockListPosition upto_pos, MCS_RW_IMPL *mcs_rw_impl)
friend std::ostream & operator<<(std::ostream &o, const LockEntry &v)
Debugging.
void set(UniversalLockId id, RwLockableXctId *lock, LockMode preferred_mode, LockMode taken_mode)
void construct(thread::Thread *context, uint32_t read_lock_threshold)
Fill out this retrospetive lock list for the next run of the given transaction.
Root package of FOEDUS (Fast Optimistic Engine for Data Unification Services).
Represents a record of write-access during a transaction.
void release_all_after(UniversalLockId address, MCS_RW_IMPL *mcs_rw_impl)
Release all locks in CLL whose addresses are canonically ordered before the parameter.
const LockEntry * cend() const
Represents one thread running on one NUMA core.
Forward declarations of classes in transaction package.
LockListPosition get_last_active_entry() const
UniversalLockId get_max_locked_id() const
McsBlockIndex mcs_block_
0 means the lock not taken.
const memory::GlobalVolatilePageResolver & get_volatile_page_resolver() const
LockListPosition lower_bound(UniversalLockId lock) const
Analogous to std::lower_bound() for the given lock.
void giveup_all_after(UniversalLockId address, MCS_RW_IMPL *mcs_rw_impl)
This gives-up locks in CLL that are not yet taken.
const LockListPosition kLockListPositionInvalid
An entry in CLL and RLL, representing a lock that is taken or will be taken.
const LockEntry * get_entry(LockListPosition pos) const
bool operator()(const LockEntry &lhs, UniversalLockId rhs) const
McsBlockIndex block_index_
the queue node we pushed.
void prepopulate_for_retrospective_lock_list(const RetrospectiveLockList &rll)
Another batch-insert method used at the beginning of a transaction.
LockListPosition lock_binary_search(const LOCK_LIST &list, UniversalLockId lock)
LockEntry * get_entry(LockListPosition pos)
bool acquired_
whether we immediately acquired the lock or not
const LockEntry * cend() const
taken_mode_: Not taken the lock yet.
uintptr_t UniversalLockId
Universally ordered identifier of each lock.
The MCS reader-writer lock variant of LockableXctId.
0x0AA1 : "XCTION : Lock acquire failed." .
LockListPosition calculate_last_locked_entry_from(LockListPosition from) const
Only searches among entries at or before "from".
void assert_sorted_impl() const
bool operator()(UniversalLockId lhs, const LockEntry &rhs) const
void release_all_locks(MCS_RW_IMPL *mcs_rw_impl)
LockListPosition lock_lower_bound(const LOCK_LIST &list, UniversalLockId lock)
General lower_bound/binary_search logic for any kind of LockList/LockEntry.
friend std::ostream & operator<<(std::ostream &o, const CurrentLockList &v)
Return value of acquire_async_rw.
ErrorCode try_or_acquire_single_lock(LockListPosition pos, MCS_RW_IMPL *mcs_rw_impl)
Methods below take or release locks, so they receive MCS_RW_IMPL, a template param.
bool operator<(const LockEntry &rhs) const
Definitions of IDs in this package and a few related constant values.
McsRwLock * get_key_lock() __attribute__((always_inline))
void giveup_all_at_and_after(UniversalLockId address, MCS_RW_IMPL *mcs_rw_impl)
bool is_valid_entry(LockListPosition pos) const
uint32_t LockListPosition
Index in a lock-list, either RLL or CLL.
LockListPosition binary_search(UniversalLockId lock) const
Analogous to std::binary_search() for the given lock.
bool is_valid_entry(LockListPosition pos) const
LockListPosition calculate_last_locked_entry() const
Calculate last_locked_entry_ by really checking the whole list.
LockMode preferred_mode_
Whick lock mode we should take according to RLL.
taken_mode_: we took a write-lock.
LockListPosition lower_bound(UniversalLockId lock) const
Analogous to std::lower_bound() for the given lock.
void assert_last_locked_entry() const
ErrorCode try_or_acquire_multiple_locks(LockListPosition upto_pos, MCS_RW_IMPL *mcs_rw_impl)
Acquire multiple locks up to the given position in canonical order.
LockMode taken_mode_
Whick lock mode we have taken during the current run (of course initially kNoLock) ...
LockEntry * get_entry(LockListPosition pos)
void init(LockEntry *array, uint32_t capacity, const memory::GlobalVolatilePageResolver &resolver)
uint32_t get_capacity() const
RwLockableXctId * lock_
Virtual address of the lock.
void try_async_single_lock(LockListPosition pos, MCS_RW_IMPL *mcs_rw_impl)
friend std::ostream & operator<<(std::ostream &o, const RetrospectiveLockList &v)
const LockEntry * get_array() const
#define CHECK_ERROR_CODE(x)
This macro calls x and checks its returned error code.
uint32_t McsBlockIndex
Index in thread-local MCS block.
RetrospectiveLockList()
Init/Uninit.
void batch_insert_write_placeholders(const WriteXctAccess *write_set, uint32_t write_set_size)
Create entries for all write-sets in one-shot.
void cancel_async_single_lock(LockListPosition pos, MCS_RW_IMPL *mcs_rw_impl)
LockMode
Represents a mode of lock.
bool retry_async_single_lock(LockListPosition pos, MCS_RW_IMPL *mcs_rw_impl)
void init(LockEntry *array, uint32_t capacity, const memory::GlobalVolatilePageResolver &resolver)
const LockEntry * get_array() const
Sorted list of all locks, either read-lock or write-lock, taken in the current run.
const LockEntry * cbegin() const
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
void assert_sorted() const __attribute__((always_inline))
Forward declarations of classes in thread package.
void assert_sorted_impl() const
#define ALWAYS_INLINE
A function suffix to hint that the function should always be inlined.
void assert_sorted() const __attribute__((always_inline))
for std::binary_search() etc without creating the object
An MCS reader-writer lock data structure.
ErrorCode
Enum of error codes defined in error_code.xmacro.
const UniversalLockId kNullUniversalLockId
This never points to a valid lock, and also evaluates less than any vaild alocks. ...
uint32_t get_capacity() const
const memory::GlobalVolatilePageResolver & get_volatile_page_resolver() const
LockListPosition get_last_locked_entry() const
LockListPosition get_or_add_entry(UniversalLockId lock_id, RwLockableXctId *lock, LockMode preferred_mode)
Adds an entry to this list, re-sorting part of the list if necessary to keep the sortedness.
LockListPosition get_last_active_entry() const