20 #include <glog/logging.h>
36 o <<
"<SysxctLockEntry>"
40 o <<
"<mcs_block_>" << v.
mcs_block_ <<
"</mcs_block_>";
49 o <<
"</SysxctLockEntry>";
54 o <<
"<SysxctLockList>"
59 const uint32_t kMaxShown = 32U;
60 for (
auto i = 1U; i <= std::min(v.last_active_entry_, kMaxShown); ++i) {
61 o << std::endl << v.array_[i];
63 if (v.last_active_entry_ > kMaxShown) {
64 o << std::endl <<
"<too_many />";
66 o <<
"</SysxctLockList>";
71 o <<
"<SysxctWorkspace>"
74 <<
"</SysxctWorkspace>";
86 ASSERT_ND(array[pos - 1U].universal_lock_id_ < array[pos].universal_lock_id_);
87 ASSERT_ND(array[pos].universal_lock_id_ != 0);
100 return lock_lower_bound<SysxctLockList, SysxctLockEntry>(*
this, lock);
109 LOG(FATAL) <<
"SysxctLockList full. This must not happen";
118 if (insert_pos > last_active_entry_) {
119 ASSERT_ND(insert_pos == last_active_entry_ + 1U);
121 array_[new_pos].
set(
id, lock_addr, page_lock);
127 ASSERT_ND(array_[insert_pos].universal_lock_id_ >=
id);
128 if (array_[insert_pos].universal_lock_id_ ==
id) {
133 DVLOG(1) <<
"not an easy case. We need to adjust the order. This is costly!";
134 ASSERT_ND(insert_pos <= last_active_entry_);
135 ASSERT_ND(array_[insert_pos].universal_lock_id_ >
id);
136 ASSERT_ND(insert_pos == 1U || array_[insert_pos - 1U].universal_lock_id_ <
id);
140 uint64_t moved_bytes =
sizeof(
SysxctLockEntry) * (new_last_pos - insert_pos);
141 std::memmove(array_ + insert_pos + 1U, array_ + insert_pos, moved_bytes);
142 DVLOG(1) <<
"Re-sorted. hope this won't happen often";
143 array_[insert_pos].
set(
id, lock_addr, page_lock);
144 if (last_locked_entry_ >= insert_pos) {
145 ++last_locked_entry_;
155 uintptr_t* lock_addr,
160 LOG(FATAL) <<
"SysxctLockList full. This must not happen";
162 if (lock_count == 0) {
167 if (std::is_sorted(lock_addr, lock_addr + lock_count)) {
168 DVLOG(3) <<
"Okay, sorted.";
173 LOG(INFO) <<
"Given array is not sorted. We have to sort it here. Is it intended??";
175 std::sort(lock_addr, lock_addr + lock_count);
191 if (insert_pos > last_active_entry_) {
192 ASSERT_ND(insert_pos == last_active_entry_ + 1U);
197 if (next_id > max_id) {
198 DVLOG(3) <<
"Okay, no overlap. Much easier.";
202 DVLOG(0) <<
"Overlapped entry for a large batch. Well, unfortunate. but it happens.";
206 for (uint32_t i = 0; i < lock_count; ++i) {
214 ASSERT_ND(insert_pos > last_active_entry_ || array_[insert_pos].universal_lock_id_ >= max_id);
217 ASSERT_ND(last_active_entry_ + 1U >= insert_pos);
218 const uint32_t move_count = last_active_entry_ + 1U - insert_pos;
219 if (move_count > 0) {
221 std::memmove(array_ + insert_pos + lock_count, array_ + insert_pos, moved_bytes);
223 if (last_locked_entry_ >= insert_pos) {
224 last_locked_entry_ += lock_count;
227 for (uint32_t i = 0; i < lock_count; ++i) {
229 array_[insert_pos + i].
set(
id, lock_addr[i], page_lock);
232 last_active_entry_ += lock_count;
244 if (array_[pos].used_in_this_run_) {
246 if (pos != next_compressed_pos) {
247 array_[next_compressed_pos] = array_[pos];
250 ++next_compressed_pos;
253 last_active_entry_ = next_compressed_pos - 1U;
254 enclosing_max_lock_id_ = enclosing_max_lock_id;
LockListPosition get_or_add_entry(storage::VolatilePagePointer page_id, uintptr_t lock_addr, bool page_lock)
std::ostream & operator<<(std::ostream &o, const LockEntry &v)
Debugging.
storage::Page * get_as_page_lock() const
Page * to_page(const void *address)
super-dirty way to obtain Page the address belongs to.
SysxctLockList lock_list_
Lock list for the system transaction.
Root package of FOEDUS (Fast Optimistic Engine for Data Unification Services).
An entry in CLL/RLL for system transactions.
Represents a pointer to a volatile page with modification count for preventing ABA.
const LockListPosition kLockListPositionInvalid
Maximum number of locks one system transaction might take.
uintptr_t UniversalLockId
Universally ordered identifier of each lock.
uint32_t get_capacity() const
UniversalLockId to_universal_lock_id(storage::VolatilePagePointer page_id, uintptr_t addr)
VolatilePagePointer get_volatile_page_id() const
LockListPosition get_last_locked_entry() const
RwLockableXctId * get_as_record_lock() const
void assert_sorted_impl() const
uint32_t LockListPosition
Index in a lock-list, either RLL or CLL.
bool used_in_this_run_
whether the lock was requested at least once in this run.
LockListPosition lower_bound(UniversalLockId lock) const
Analogous to std::lower_bound() for the given lock.
McsBlockIndex mcs_block_
0 means the lock not taken.
Just a marker to denote that the memory region represents a data page.
bool page_lock_
Whether this is a pge lock or not.
bool is_full() const
When this returns full, it's catastrophic.
UniversalLockId get_enclosing_max_lock_id() const
RLL/CLL of a system transaction.
void set(UniversalLockId lock_id, uintptr_t lock, bool page_lock)
void assert_sorted() const __attribute__((always_inline))
Inline definitions of SysxctLockList methods.
UniversalLockId universal_lock_id_
Used to order locks in canonical order.
PageHeader & get_header()
At least the basic header exists in all pages.
bool running_sysxct_
Whether we are already running a sysxct using this workspace.
void compress_entries(UniversalLockId enclosing_max_lock_id)
Unlike clear_entries(), this is used when a sysxct is aborted and will be retried.
void assert_last_locked_entry() const
#define UNLIKELY(x)
Hints that x is highly likely false.
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
LockListPosition get_last_active_entry() const
Per-thread reused work memory for system transactions.
LockListPosition batch_get_or_add_entries(storage::VolatilePagePointer page_id, uint32_t lock_count, uintptr_t *lock_addr, bool page_lock)
Batched version of get_or_add_entry().
const SysxctLockEntry * get_array() const
const UniversalLockId kNullUniversalLockId
This never points to a valid lock, and also evaluates less than any vaild alocks. ...