libfoedus-core
FOEDUS Core Library
xct_id.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014-2015, Hewlett-Packard Development Company, LP.
3  * This program is free software; you can redistribute it and/or modify it
4  * under the terms of the GNU General Public License as published by the Free
5  * Software Foundation; either version 2 of the License, or (at your option)
6  * any later version.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11  * more details. You should have received a copy of the GNU General Public
12  * License along with this program; if not, write to the Free Software
13  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14  *
15  * HP designates this particular file as subject to the "Classpath" exception
16  * as provided by HP in the LICENSE.txt file that accompanied this code.
17  */
18 #include "foedus/xct/xct_id.hpp"
19 
20 #include <ostream>
21 
22 #include "foedus/engine.hpp"
28 #include "foedus/storage/page.hpp"
29 #include "foedus/thread/thread.hpp"
32 
33 namespace foedus {
34 namespace xct {
35 
37  const memory::GlobalVolatilePageResolver& resolver,
38  uintptr_t lock_ptr) {
39  storage::assert_within_valid_volatile_page(resolver, reinterpret_cast<void*>(lock_ptr));
40  const storage::Page* page = storage::to_page(reinterpret_cast<void*>(lock_ptr));
41  const auto& page_header = page->get_header();
42  ASSERT_ND(!page_header.snapshot_);
44  const uint64_t node = vpp.get_numa_node();
45  const uint64_t page_index = vpp.get_offset();
46 
47  // See assert_within_valid_volatile_page() why we can't do these assertions.
48  // ASSERT_ND(lock_ptr >= base + vpp.components.offset * storage::kPageSize);
49  // ASSERT_ND(lock_ptr < base + (vpp.components.offset + 1U) * storage::kPageSize);
50 
51  // Although we have the addresses in resolver, we can NOT use it to calculate the offset
52  // because the base might be a different VA (though pointing to the same physical address).
53  // We thus calculate UniversalLockId purely from PageId in the page header and in_page_offset.
54  // Thus, actually this function uses resolver only for assertions (so far)!
55  ASSERT_ND(node < resolver.numa_node_count_);
56  ASSERT_ND(vpp.get_offset() >= resolver.begin_);
57  ASSERT_ND(vpp.get_offset() < resolver.end_);
58  return to_universal_lock_id(node, page_index, lock_ptr);
59 }
60 
62  const memory::GlobalVolatilePageResolver& resolver,
63  UniversalLockId universal_lock_id) {
64  uint16_t node = universal_lock_id >> 48;
65  uint64_t offset = universal_lock_id & ((1ULL << 48) - 1ULL);
66  uintptr_t base = reinterpret_cast<uintptr_t>(resolver.bases_[node]);
67  return reinterpret_cast<RwLockableXctId*>(base + offset);
68 }
69 
72 }
73 
76 }
77 
80 }
81 
84 }
85 
88 }
89 
90 McsOwnerlessLockScope::McsOwnerlessLockScope() : lock_(nullptr), locked_by_me_(false) {}
92  McsWwLock* lock,
93  bool acquire_now,
94  bool non_racy_acquire)
95  : lock_(lock), locked_by_me_(false) {
96  if (acquire_now) {
97  acquire(non_racy_acquire);
98  }
99 }
101  release();
102  lock_ = nullptr;
103  locked_by_me_ = false;
104 }
105 
106 void McsOwnerlessLockScope::acquire(bool non_racy_acquire) {
107  if (is_valid()) {
108  if (!is_locked_by_me()) {
109  if (non_racy_acquire) {
110  lock_->ownerless_initial_lock();
111  } else {
112  lock_->ownerless_acquire_lock();
113  }
114  locked_by_me_ = true;
115  }
116  }
117 }
118 
120  if (is_valid()) {
121  if (is_locked_by_me()) {
122  lock_->ownerless_release_lock();
123  locked_by_me_ = false;
124  }
125  }
126 }
127 
133 bool McsRwSimpleBlock::timeout_granted(int32_t timeout) {
134  if (timeout == kTimeoutNever) {
135  assorted::spin_until([this]{ return this->is_granted(); });
136  return true;
137  } else {
138  while (--timeout) {
139  if (is_granted()) {
140  return true;
141  }
143  }
144  return is_granted();
145  }
146 }
147 
148 bool McsRwExtendedBlock::timeout_granted(int32_t timeout) {
149  if (timeout == kTimeoutZero) {
150  return pred_flag_is_granted();
151  } else if (timeout == kTimeoutNever) {
152  assorted::spin_until([this]{ return this->pred_flag_is_granted(); });
154  } else {
155  int32_t cycles = 0;
156  do {
157  if (pred_flag_is_granted()) {
158  return true;
159  }
161  } while (++cycles < timeout);
162  }
163  return pred_flag_is_granted();
164 }
165 
171 std::ostream& operator<<(std::ostream& o, const McsWwLock& v) {
172  o << "<McsWwLock><locked>" << v.is_locked() << "</locked><tail_waiter>"
173  << v.get_tail_waiter() << "</tail_waiter><tail_block>" << v.get_tail_waiter_block()
174  << "</tail_block></McsWwLock>";
175  return o;
176 }
177 
178 std::ostream& operator<<(std::ostream& o, const XctId& v) {
179  o << "<XctId epoch=\"" << v.get_epoch()
180  << "\" ordinal=\"" << v.get_ordinal()
181  << "\" status=\""
182  << (v.is_deleted() ? "D" : " ")
183  << (v.is_moved() ? "M" : " ")
184  << (v.is_being_written() ? "W" : " ")
185  << (v.is_next_layer() ? "N" : " ")
186  << "\" />";
187  return o;
188 }
189 
190 std::ostream& operator<<(std::ostream& o, const LockableXctId& v) {
191  o << "<LockableXctId>" << v.xct_id_ << v.lock_ << "</LockableXctId>";
192  return o;
193 }
194 
195 std::ostream& operator<<(std::ostream& o, const McsRwLock& v) {
196  o << "<McsRwLock><locked>" << v.is_locked() << "</locked><tail_waiter>"
197  << v.get_tail_waiter() << "</tail_waiter><tail_block>" << v.get_tail_waiter_block()
198  << "</tail_block></McsRwLock>";
199  return o;
200 }
201 
202 std::ostream& operator<<(std::ostream& o, const RwLockableXctId& v) {
203  o << "<RwLockableXctId>" << v.xct_id_ << v.lock_ << "</RwLockableXctId>";
204  return o;
205 }
206 
207 static_assert(storage::kPageSize == kLockPageSize, "kLockPageSize incorrect");
208 
209 } // namespace xct
210 } // namespace foedus
static void ownerless_release(McsWwLock *lock)
std::ostream & operator<<(std::ostream &o, const LockEntry &v)
Debugging.
void ownerless_initial_lock()
The followings are implemented in thread_pimpl.cpp along with the above methods, but these don't use ...
Definition: xct_id.cpp:86
void ownerless_release_lock()
Definition: xct_id.cpp:82
Page * to_page(const void *address)
super-dirty way to obtain Page the address belongs to.
Definition: page.hpp:395
bool timeout_granted(int32_t timeout)
Definition: xct_id.cpp:148
Root package of FOEDUS (Fast Optimistic Engine for Data Unification Services).
Definition: assert_nd.hpp:44
thread::ThreadId get_tail_waiter() const __attribute__((always_inline))
This is a "relaxed" check.
Definition: xct_id.hpp:344
Transaction ID, a 128-bit data to manage record versions and provide locking mechanism.
Definition: xct_id.hpp:1107
void hotter(thread::Thread *context) const
Definition: xct_id.cpp:74
Represents one thread running on one NUMA core.
Definition: thread.hpp:48
uint64_t spin_until(COND spin_until_cond)
Spin locally until the given condition returns true.
uint16_t numa_node_count_
number of NUMA nodes in this engine.
RwLockableXctId * from_universal_lock_id(const memory::GlobalVolatilePageResolver &resolver, const UniversalLockId universal_lock_id)
Always use this method rather than doing the conversion yourself.
Definition: xct_id.cpp:61
Represents a pointer to a volatile page with modification count for preventing ABA.
Definition: storage_id.hpp:194
static const int32_t kTimeoutZero
Definition: xct_id.hpp:552
Persistent status part of Transaction ID.
Definition: xct_id.hpp:955
PagePoolOffset begin_
where a valid page entry starts.
void release()
Release the lock if acquired.
Definition: xct_id.cpp:119
XctId xct_id_
the second 64bit: Persistent status part of TID.
Definition: xct_id.hpp:1137
uint32_t get_ordinal() const __attribute__((always_inline))
Definition: xct_id.hpp:976
PagePoolOffset end_
where a valid page entry ends.
McsBlockIndex get_tail_waiter_block() const
Definition: xct_id.hpp:817
McsRwLock lock_
the first 64bit: Locking part of TID
Definition: xct_id.hpp:1134
uintptr_t UniversalLockId
Universally ordered identifier of each lock.
Definition: xct_id.hpp:134
The MCS reader-writer lock variant of LockableXctId.
Definition: xct_id.hpp:1132
static void ownerless_initial(McsWwLock *lock)
bool contains_hot_records(thread::Thread *context)
Definition: page.cpp:81
XctId xct_id_
the second 64bit: Persistent status part of TID.
Definition: xct_id.hpp:1111
UniversalLockId to_universal_lock_id(storage::VolatilePagePointer page_id, uintptr_t addr)
Definition: sysxct_impl.hpp:63
void increment(UniformRandom *rnd)
const uint64_t kLockPageSize
Must be same as storage::kPageSize.
Definition: xct_id.hpp:1215
memory::PagePoolOffset get_offset() const
Definition: storage_id.hpp:202
Definitions of IDs in this package and a few related constant values.
An exclusive-only (WW) MCS lock data structure.
Definition: xct_id.hpp:324
void acquire(bool non_racy_acquire)
Acquires the lock.
Definition: xct_id.cpp:106
bool timeout_granted(int32_t timeout)
MCS block classes.
Definition: xct_id.cpp:133
void assert_within_valid_volatile_page(const memory::GlobalVolatilePageResolver &resolver, const void *address)
Definition: page.hpp:428
Epoch get_epoch() const __attribute__((always_inline))
Definition: xct_id.hpp:964
bool is_next_layer() const __attribute__((always_inline))
Definition: xct_id.hpp:1042
Just a marker to denote that the memory region represents a data page.
Definition: page.hpp:334
bool is_deleted() const __attribute__((always_inline))
Definition: xct_id.hpp:1040
bool is_locked() const
Definition: xct_id.hpp:857
static const int32_t kTimeoutNever
Definition: xct_id.hpp:551
bool is_locked() const
This is a "relaxed" check.
Definition: xct_id.hpp:334
VolatilePagePointer construct_volatile_page_pointer(uint64_t word)
Definition: storage_id.hpp:230
static const int32_t kTimeoutNever
Definition: xct_id.hpp:401
assorted::UniformRandom & get_lock_rnd()
Definition: thread.hpp:368
bool is_being_written() const __attribute__((always_inline))
Definition: xct_id.hpp:1038
Resolves an offset in a volatile page pool to an actual pointer and vice versa.
PageHeader & get_header()
At least the basic header exists in all pages.
Definition: page.hpp:336
McsBlockIndex get_tail_waiter_block() const __attribute__((always_inline))
This is a "relaxed" check.
Definition: xct_id.hpp:346
void ownerless_acquire_lock()
Definition: xct_id.cpp:78
void yield_if_valgrind()
Use this in your while as a stop-gap before switching to spin_until().
assorted::ProbCounter hotness_
Loosely maintained statistics on data temperature.
Definition: page.hpp:268
bool is_moved() const __attribute__((always_inline))
Definition: xct_id.hpp:1041
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
McsWwLock lock_
the first 64bit: Locking part of TID
Definition: xct_id.hpp:1109
Raw atomic operations that work for both C++11 and non-C++11 code.
bool is_hot(thread::Thread *context) const
Definition: xct_id.cpp:70
static void ownerless_acquire_unconditional(McsWwLock *lock)
[WW-Guest] Unconditionally takes exclusive-only guest lock on the given MCSg lock.
const uint16_t kPageSize
A constant defining the page size (in bytes) of both snapshot pages and volatile pages.
Definition: storage_id.hpp:45
Base bases_[kMaxNumaNode]
base address to calculate from/to offset.
An MCS reader-writer lock data structure.
Definition: xct_id.hpp:795
thread::ThreadId get_tail_waiter() const
Definition: xct_id.hpp:818