20 #include <glog/logging.h>
55 volatile_page_resolver_ = resolver;
92 volatile_page_resolver_ = resolver;
122 <<
"<TakenMode>" << v.
taken_mode_ <<
"</TakenMode>";
126 o <<
"<Lock>nullptr</Lock>";
133 o <<
"<CurrentLockList>"
134 <<
"<Capacity>" << v.capacity_ <<
"</Capacity>"
135 <<
"<LastActiveEntry>" << v.last_active_entry_ <<
"</LastActiveEntry>"
136 <<
"<LastLockedEntry>" << v.last_locked_entry_ <<
"</LastLockedEntry>";
137 const uint32_t kMaxShown = 32U;
138 for (
auto i = 1U; i <= std::min(v.last_active_entry_, kMaxShown); ++i) {
141 o <<
"</CurrentLockList>";
146 o <<
"<RetrospectiveLockList>"
147 <<
"<Capacity>" << v.capacity_ <<
"</Capacity>"
148 <<
"<LastActiveEntry>" << v.last_active_entry_ <<
"</LastActiveEntry>";
149 const uint32_t kMaxShown = 32U;
150 for (
auto i = 1U; i <= std::min(v.last_active_entry_, kMaxShown); ++i) {
153 o <<
"</RetrospectiveLockList>";
158 template<
typename LOCK_LIST>
160 const LockEntry* array = list.get_array();
167 ASSERT_ND(array[pos - 1U].universal_lock_id_ < array[pos].universal_lock_id_);
168 ASSERT_ND(array[pos].universal_lock_id_ != 0);
171 uintptr_t lock_addr =
reinterpret_cast<uintptr_t
>(array[pos].
lock_);
191 return lock_binary_search<CurrentLockList, LockEntry>(*
this, lock);
194 return lock_binary_search<RetrospectiveLockList, LockEntry>(*
this, lock);
198 return lock_lower_bound<CurrentLockList, LockEntry>(*
this, lock);
201 return lock_lower_bound<RetrospectiveLockList, LockEntry>(*
this, lock);
215 if (insert_pos > last_active_entry_) {
216 ASSERT_ND(insert_pos == last_active_entry_ + 1U);
218 array_[new_pos].
set(
id, lock, preferred_mode,
kNoLock);
224 ASSERT_ND(array_[insert_pos].universal_lock_id_ >=
id);
225 if (array_[insert_pos].universal_lock_id_ ==
id) {
227 if (array_[insert_pos].preferred_mode_ < preferred_mode) {
233 DVLOG(1) <<
"not an easy case. We need to adjust the order. This is costly!";
234 ASSERT_ND(insert_pos <= last_active_entry_);
235 ASSERT_ND(array_[insert_pos].universal_lock_id_ >
id);
236 ASSERT_ND(insert_pos == 1U || array_[insert_pos - 1U].universal_lock_id_ <
id);
240 uint64_t moved_bytes =
sizeof(
LockEntry) * (new_last_pos - insert_pos);
241 std::memmove(array_ + insert_pos + 1U, array_ + insert_pos, moved_bytes);
242 DVLOG(1) <<
"Re-sorted. hope this won't happen often";
243 array_[insert_pos].
set(
id, lock, preferred_mode,
kNoLock);
244 if (last_locked_entry_ >= insert_pos) {
245 ++last_locked_entry_;
260 ASSERT_ND(capacity_ >= read_set_size + write_set_size);
263 if (read_set_size == 0 && write_set_size == 0) {
267 for (uint32_t i = 0; i < read_set_size; ++i) {
271 && lock->
xct_id_ == read_set[i].observed_owner_id_) {
276 auto pos = issue_new_position();
278 read_set[i].owner_lock_id_ ==
282 DVLOG(1) <<
"Added " << last_active_entry_ <<
" to RLL for read-locks";
285 for (uint32_t i = 0; i < write_set_size; ++i) {
286 auto pos = issue_new_position();
288 write_set[i].owner_lock_id_ ==
291 write_set[i].owner_lock_id_,
292 write_set[i].owner_id_address_,
301 std::sort(array_ + 1U, array_ + last_active_entry_ + 1U);
303 uint32_t merged_count = 0U;
306 ASSERT_ND(array_[prev_pos].universal_lock_id_ <= array_[pos].universal_lock_id_);
307 if (array_[prev_pos].universal_lock_id_ == array_[pos].universal_lock_id_) {
309 if (array_[pos].preferred_mode_ ==
kWriteLock) {
315 if (prev_pos + 1U < pos) {
316 std::memcpy(array_ + prev_pos + 1U, array_ + pos,
sizeof(
LockEntry));
324 ASSERT_ND(prev_pos + merged_count == last_active_entry_);
325 last_active_entry_ = prev_pos;
331 uint32_t write_set_size) {
334 if (write_set_size == 0) {
338 for (uint32_t i = 1; i < write_set_size; ++i) {
339 ASSERT_ND(write_set[i - 1].ordinal_ != write_set[i].ordinal_);
340 if (write_set[i].owner_lock_id_ == write_set[i - 1].owner_lock_id_) {
341 ASSERT_ND(write_set[i - 1].ordinal_ < write_set[i].ordinal_);
343 ASSERT_ND(write_set[i - 1].owner_lock_id_ < write_set[i].owner_lock_id_);
358 for (uint32_t write_pos = 0; write_pos < write_set_size; ++write_pos) {
373 last_active_entry_ = added;
375 uint32_t write_pos = 0;
377 for (
LockListPosition pos = 1U; pos <= last_active_entry_ && write_pos < write_set_size;) {
392 LockEntry* new_entry = array_ + last_active_entry_ + added;
396 for (++write_pos; write_pos < write_set_size; ++write_pos) {
399 ASSERT_ND(next_write_id >= write_lock_id);
400 if (next_write_id > write_lock_id) {
407 while (write_pos < write_set_size) {
413 || array_[last_active_entry_].universal_lock_id_ < write_lock_id);
417 LockEntry* new_entry = array_ + last_active_entry_ + added;
419 for (++write_pos; write_pos < write_set_size; ++write_pos) {
422 ASSERT_ND(next_write_id >= write_lock_id);
423 if (next_write_id > write_lock_id) {
430 last_active_entry_ += added;
431 std::sort(array_ + 1U, array_ + 1U + last_active_entry_);
439 for (uint32_t i = 0; i < write_set_size; ++i) {
456 void CurrentLockList::release_all_after_debuglog(
457 uint32_t released_read_locks,
458 uint32_t released_write_locks,
459 uint32_t already_released_locks,
460 uint32_t canceled_async_read_locks,
461 uint32_t canceled_async_write_locks)
const {
462 DVLOG(1) <<
" Unlocked " << released_read_locks <<
" read locks and"
463 <<
" " << released_write_locks <<
" write locks. " << already_released_locks
464 <<
" Also cancelled " << canceled_async_read_locks <<
" async-waiting read locks, "
465 <<
" " << canceled_async_write_locks <<
" async-waiting write locks. "
466 <<
" " << already_released_locks <<
" were already unlocked";
469 void CurrentLockList::giveup_all_after_debuglog(
470 uint32_t givenup_read_locks,
471 uint32_t givenup_write_locks,
472 uint32_t givenup_upgrades,
473 uint32_t already_enough_locks,
474 uint32_t canceled_async_read_locks,
475 uint32_t canceled_async_write_locks)
const {
476 DVLOG(1) <<
" Gave up " << givenup_read_locks <<
" read locks and"
477 <<
" " << givenup_write_locks <<
" write locks, " << givenup_upgrades <<
" upgrades."
478 <<
" Also cancelled " << canceled_async_read_locks <<
" async-waiting read locks, "
479 <<
" " << canceled_async_write_locks <<
" async-waiting write locks. "
480 <<
" " << already_enough_locks <<
" already had enough lock mode";
xct::Xct & get_current_xct()
Returns the transaction that is currently running on this thread.
taken_mode_: we took a read-lock, not write-lock yet.
UniversalLockId universal_lock_id_
Used to order locks in canonical order.
std::ostream & operator<<(std::ostream &o, const LockEntry &v)
Debugging.
LockListPosition binary_search(UniversalLockId lock) const
Analogous to std::binary_search() for the given lock.
Page * to_page(const void *address)
super-dirty way to obtain Page the address belongs to.
void set(UniversalLockId id, RwLockableXctId *lock, LockMode preferred_mode, LockMode taken_mode)
uint32_t get_write_set_size() const
uint32_t ordinal_
Indicates the ordinal among ReadXctAccess/WriteXctAccess of this transaction.
void construct(thread::Thread *context, uint32_t read_lock_threshold)
Fill out this retrospetive lock list for the next run of the given transaction.
Root package of FOEDUS (Fast Optimistic Engine for Data Unification Services).
Represents a record of write-access during a transaction.
Represents one thread running on one NUMA core.
uint8_t value_
Log arithmic counter of aborts.
Represents a user transaction.
LockListPosition get_last_active_entry() const
LockListPosition lower_bound(UniversalLockId lock) const
Analogous to std::lower_bound() for the given lock.
const LockListPosition kLockListPositionInvalid
Represents a record of read-access during a transaction.
bool is_active() const
Returns whether the object is an active transaction.
XctId xct_id_
the second 64bit: Persistent status part of TID.
An entry in CLL and RLL, representing a lock that is taken or will be taken.
void prepopulate_for_retrospective_lock_list(const RetrospectiveLockList &rll)
Another batch-insert method used at the beginning of a transaction.
ReadXctAccess * get_read_set()
taken_mode_: Not taken the lock yet.
uintptr_t UniversalLockId
Universally ordered identifier of each lock.
The MCS reader-writer lock variant of LockableXctId.
void assert_sorted_impl() const
UniversalLockId to_universal_lock_id(storage::VolatilePagePointer page_id, uintptr_t addr)
VolatilePagePointer get_volatile_page_id() const
UniversalLockId xct_id_to_universal_lock_id(const memory::GlobalVolatilePageResolver &resolver, RwLockableXctId *lock)
uint32_t get_read_set_size() const
RwLockableXctId * owner_id_address_
Pointer to the accessed record.
uint32_t LockListPosition
Index in a lock-list, either RLL or CLL.
LockListPosition binary_search(UniversalLockId lock) const
Analogous to std::binary_search() for the given lock.
LockListPosition calculate_last_locked_entry() const
Calculate last_locked_entry_ by really checking the whole list.
LockMode preferred_mode_
Whick lock mode we should take according to RLL.
taken_mode_: we took a write-lock.
LockListPosition lower_bound(UniversalLockId lock) const
Analogous to std::lower_bound() for the given lock.
void assert_last_locked_entry() const
LockMode taken_mode_
Whick lock mode we have taken during the current run (of course initially kNoLock) ...
Just a marker to denote that the memory region represents a data page.
void init(LockEntry *array, uint32_t capacity, const memory::GlobalVolatilePageResolver &resolver)
RwLockableXctId * lock_
Virtual address of the lock.
const LockEntry * get_array() const
WriteXctAccess * get_write_set()
void lock_assert_sorted(const LOCK_LIST &list)
RetrospectiveLockList()
Init/Uninit.
void batch_insert_write_placeholders(const WriteXctAccess *write_set, uint32_t write_set_size)
Create entries for all write-sets in one-shot.
LockMode
Represents a mode of lock.
void init(LockEntry *array, uint32_t capacity, const memory::GlobalVolatilePageResolver &resolver)
PageHeader & get_header()
At least the basic header exists in all pages.
Sorted list of all locks, either read-lock or write-lock, taken in the current run.
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
void assert_sorted() const __attribute__((always_inline))
UniversalLockId owner_lock_id_
Universal Lock ID of the lock in the record.
void assert_sorted_impl() const
void assert_sorted() const __attribute__((always_inline))
LockListPosition get_or_add_entry(UniversalLockId lock_id, RwLockableXctId *lock, LockMode preferred_mode)
Adds an entry to this list, re-sorting part of the list if necessary to keep the sortedness.