libfoedus-core
FOEDUS Core Library
sysxct_impl.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014-2015, Hewlett-Packard Development Company, LP.
3  * This program is free software; you can redistribute it and/or modify it
4  * under the terms of the GNU General Public License as published by the Free
5  * Software Foundation; either version 2 of the License, or (at your option)
6  * any later version.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11  * more details. You should have received a copy of the GNU General Public
12  * License along with this program; if not, write to the Free Software
13  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14  *
15  * HP designates this particular file as subject to the "Classpath" exception
16  * as provided by HP in the LICENSE.txt file that accompanied this code.
17  */
19 
20 #include <glog/logging.h>
21 
22 #include <algorithm>
23 #include <ostream>
24 
25 #include "foedus/storage/page.hpp"
28 
29 namespace foedus {
30 namespace xct {
31 
35 std::ostream& operator<<(std::ostream& o, const SysxctLockEntry& v) {
36  o << "<SysxctLockEntry>"
37  << "<LockId>" << v.universal_lock_id_ << "</LockId>"
38  << "<used>" << v.used_in_this_run_ << "</used>";
39  if (v.mcs_block_) {
40  o << "<mcs_block_>" << v.mcs_block_ << "</mcs_block_>";
41  if (v.page_lock_) {
42  o << v.get_as_page_lock()->get_header();
43  } else {
44  o << *(v.get_as_record_lock());
45  }
46  } else {
47  o << "<NotLocked />";
48  }
49  o << "</SysxctLockEntry>";
50  return o;
51 }
52 
53 std::ostream& operator<<(std::ostream& o, const SysxctLockList& v) {
54  o << "<SysxctLockList>"
55  << "<Capacity>" << v.get_capacity() << "</Capacity>"
56  << "<LastActiveEntry>" << v.get_last_active_entry() << "</LastActiveEntry>"
57  << "<LastLockedEntry>" << v.get_last_locked_entry() << "</LastLockedEntry>"
58  << "<EnclosingMaxLockId>" << v.get_enclosing_max_lock_id() << "</EnclosingMaxLockId>";
59  const uint32_t kMaxShown = 32U;
60  for (auto i = 1U; i <= std::min(v.last_active_entry_, kMaxShown); ++i) {
61  o << std::endl << v.array_[i];
62  }
63  if (v.last_active_entry_ > kMaxShown) {
64  o << std::endl << "<too_many />";
65  }
66  o << "</SysxctLockList>";
67  return o;
68 }
69 
70 std::ostream& operator<<(std::ostream& o, const SysxctWorkspace& v) {
71  o << "<SysxctWorkspace>"
72  << "<running_sysxct_>" << v.running_sysxct_ << "</running_sysxct_>"
73  << v.lock_list_
74  << "</SysxctWorkspace>";
75  return o;
76 }
77 
80  const SysxctLockEntry* array = get_array();
81  ASSERT_ND(array[kLockListPositionInvalid].universal_lock_id_ == 0);
83  ASSERT_ND(array[kLockListPositionInvalid].mcs_block_ == 0);
84  const LockListPosition last_active_entry = get_last_active_entry();
85  for (LockListPosition pos = 2U; pos <= last_active_entry; ++pos) {
86  ASSERT_ND(array[pos - 1U].universal_lock_id_ < array[pos].universal_lock_id_);
87  ASSERT_ND(array[pos].universal_lock_id_ != 0);
88  ASSERT_ND(array[pos].lock_ != kNullUniversalLockId);
89  const storage::Page* page = storage::to_page(reinterpret_cast<void*>(array[pos].lock_));
90  auto page_id = page->get_volatile_page_id();
91  ASSERT_ND(array[pos].universal_lock_id_ == to_universal_lock_id(page_id, array[pos].lock_));
92  }
93 }
94 
100  return lock_lower_bound<SysxctLockList, SysxctLockEntry>(*this, lock);
101 }
102 
105  uintptr_t lock_addr,
106  bool page_lock) {
107  ASSERT_ND(!is_full());
108  if (UNLIKELY(is_full())) {
109  LOG(FATAL) << "SysxctLockList full. This must not happen";
110  }
111  const UniversalLockId id = to_universal_lock_id(page_id, lock_addr);
112 
113  // Easy case? (lock >= the last entry)
114  LockListPosition insert_pos = lower_bound(id);
115  ASSERT_ND(insert_pos != kLockListPositionInvalid);
116 
117  // Larger than all existing entries? Append to the last!
118  if (insert_pos > last_active_entry_) {
119  ASSERT_ND(insert_pos == last_active_entry_ + 1U);
120  LockListPosition new_pos = issue_new_position();
121  array_[new_pos].set(id, lock_addr, page_lock);
122  ASSERT_ND(new_pos == insert_pos);
123  return new_pos;
124  }
125 
126  // lower_bound returns the first entry that is NOT less than. is it equal?
127  ASSERT_ND(array_[insert_pos].universal_lock_id_ >= id);
128  if (array_[insert_pos].universal_lock_id_ == id) {
129  // Found existing!
130  return insert_pos;
131  }
132 
133  DVLOG(1) << "not an easy case. We need to adjust the order. This is costly!";
134  ASSERT_ND(insert_pos <= last_active_entry_); // otherwise we went into the 1st branch
135  ASSERT_ND(array_[insert_pos].universal_lock_id_ > id); // if ==, we went into the 2nd branch
136  ASSERT_ND(insert_pos == 1U || array_[insert_pos - 1U].universal_lock_id_ < id);
137 
138  LockListPosition new_last_pos = issue_new_position();
139  ASSERT_ND(new_last_pos > insert_pos);
140  uint64_t moved_bytes = sizeof(SysxctLockEntry) * (new_last_pos - insert_pos);
141  std::memmove(array_ + insert_pos + 1U, array_ + insert_pos, moved_bytes);
142  DVLOG(1) << "Re-sorted. hope this won't happen often";
143  array_[insert_pos].set(id, lock_addr, page_lock);
144  if (last_locked_entry_ >= insert_pos) {
145  ++last_locked_entry_;
146  }
147 
148  assert_sorted();
149  return insert_pos;
150 }
151 
154  uint32_t lock_count,
155  uintptr_t* lock_addr,
156  bool page_lock) {
157  ASSERT_ND(lock_count);
159  if (UNLIKELY(get_last_active_entry() + lock_count > kMaxSysxctLocks)) {
160  LOG(FATAL) << "SysxctLockList full. This must not happen";
161  }
162  if (lock_count == 0) {
164  }
165 
166  // Address order within the same page guarantees the Universal lock ID order.
167  if (std::is_sorted(lock_addr, lock_addr + lock_count)) {
168  DVLOG(3) << "Okay, sorted.";
169  } else {
170  // Most likely we hit this case for page locks (just 2 or 3 locks)
171  // Otherwise something wrong...
172  if (UNLIKELY(lock_count > 5U)) {
173  LOG(INFO) << "Given array is not sorted. We have to sort it here. Is it intended??";
174  }
175  std::sort(lock_addr, lock_addr + lock_count);
176  // If this is the majority case, the is_sorted() is a wasted cost (std::sort often
177  // do something like this internally), but this should almost never happen. In that
178  // case, std::is_sorted() is faster.
179  }
180 
181  const UniversalLockId min_id = to_universal_lock_id(page_id, lock_addr[0]);
182  const UniversalLockId max_id = to_universal_lock_id(page_id, lock_addr[lock_count - 1]);
183  ASSERT_ND(min_id <= max_id);
184 
185  // We optimize for the case where this list has nothing in [min_lock_addr,max_lock_addr].
186  // When this is not the case, we just call get_or_add_entry with for loop.
187  // A large lock_count should mean page split sysxct, so this should never happen.
188 
189  const LockListPosition insert_pos = lower_bound(min_id);
190  ASSERT_ND(insert_pos != kLockListPositionInvalid);
191  if (insert_pos > last_active_entry_) {
192  ASSERT_ND(insert_pos == last_active_entry_ + 1U);
193  // Larger than all existing entries. This is easy. We just append all to the end.
194  } else {
195  const UniversalLockId next_id = array_[insert_pos].universal_lock_id_;
196  ASSERT_ND(next_id >= min_id); // lower_bound()'s contract
197  if (next_id > max_id) {
198  DVLOG(3) << "Okay, no overlap. Much easier.";
199  } else {
200  // This is unfortunate, but it happens.. especially with pessimisitic locks
201  if (UNLIKELY(lock_count > 5U)) {
202  DVLOG(0) << "Overlapped entry for a large batch. Well, unfortunate. but it happens.";
203  }
204 
205  // Fall back to the slow path
206  for (uint32_t i = 0; i < lock_count; ++i) {
207  LockListPosition pos = get_or_add_entry(page_id, lock_addr[i], page_lock);
208  ASSERT_ND(pos >= insert_pos);
209  }
210  return insert_pos;
211  }
212  }
213 
214  ASSERT_ND(insert_pos > last_active_entry_ || array_[insert_pos].universal_lock_id_ >= max_id);
215 
216  // Move existing entries at once, then set() in a tight for loop
217  ASSERT_ND(last_active_entry_ + 1U >= insert_pos);
218  const uint32_t move_count = last_active_entry_ + 1U - insert_pos;
219  if (move_count > 0) {
220  const uint64_t moved_bytes = sizeof(SysxctLockEntry) * move_count;
221  std::memmove(array_ + insert_pos + lock_count, array_ + insert_pos, moved_bytes);
222  }
223  if (last_locked_entry_ >= insert_pos) {
224  last_locked_entry_ += lock_count;
225  }
226 
227  for (uint32_t i = 0; i < lock_count; ++i) {
228  const UniversalLockId id = to_universal_lock_id(page_id, lock_addr[i]);
229  array_[insert_pos + i].set(id, lock_addr[i], page_lock);
230  }
231 
232  last_active_entry_ += lock_count;
233  assert_sorted();
234  return insert_pos;
235 }
236 
238  assert_sorted();
239  ASSERT_ND(last_locked_entry_ == kLockListPositionInvalid);
240  LockListPosition next_compressed_pos = 1U;
241  for (LockListPosition pos = 1U; pos <= last_active_entry_; ++pos) {
242  ASSERT_ND(!array_[pos].is_locked());
243  ASSERT_ND(pos >= next_compressed_pos);
244  if (array_[pos].used_in_this_run_) {
245  // Okay, let's inherit this entry
246  if (pos != next_compressed_pos) {
247  array_[next_compressed_pos] = array_[pos];
248  }
249  array_[next_compressed_pos].used_in_this_run_ = false;
250  ++next_compressed_pos;
251  }
252  }
253  last_active_entry_ = next_compressed_pos - 1U;
254  enclosing_max_lock_id_ = enclosing_max_lock_id;
255  assert_sorted();
256 }
257 
258 
259 } // namespace xct
260 } // namespace foedus
LockListPosition get_or_add_entry(storage::VolatilePagePointer page_id, uintptr_t lock_addr, bool page_lock)
std::ostream & operator<<(std::ostream &o, const LockEntry &v)
Debugging.
storage::Page * get_as_page_lock() const
Page * to_page(const void *address)
super-dirty way to obtain Page the address belongs to.
Definition: page.hpp:395
SysxctLockList lock_list_
Lock list for the system transaction.
Root package of FOEDUS (Fast Optimistic Engine for Data Unification Services).
Definition: assert_nd.hpp:44
An entry in CLL/RLL for system transactions.
Definition: sysxct_impl.hpp:80
Represents a pointer to a volatile page with modification count for preventing ABA.
Definition: storage_id.hpp:194
const LockListPosition kLockListPositionInvalid
Definition: xct_id.hpp:149
Maximum number of locks one system transaction might take.
uintptr_t UniversalLockId
Universally ordered identifier of each lock.
Definition: xct_id.hpp:134
uint32_t get_capacity() const
UniversalLockId to_universal_lock_id(storage::VolatilePagePointer page_id, uintptr_t addr)
Definition: sysxct_impl.hpp:63
VolatilePagePointer get_volatile_page_id() const
Definition: page.hpp:339
LockListPosition get_last_locked_entry() const
RwLockableXctId * get_as_record_lock() const
void assert_sorted_impl() const
Definition: sysxct_impl.cpp:78
uint32_t LockListPosition
Index in a lock-list, either RLL or CLL.
Definition: xct_id.hpp:148
bool used_in_this_run_
whether the lock was requested at least once in this run.
LockListPosition lower_bound(UniversalLockId lock) const
Analogous to std::lower_bound() for the given lock.
Definition: sysxct_impl.cpp:98
McsBlockIndex mcs_block_
0 means the lock not taken.
Definition: sysxct_impl.hpp:98
Just a marker to denote that the memory region represents a data page.
Definition: page.hpp:334
bool page_lock_
Whether this is a pge lock or not.
bool is_full() const
When this returns full, it's catastrophic.
UniversalLockId get_enclosing_max_lock_id() const
RLL/CLL of a system transaction.
void set(UniversalLockId lock_id, uintptr_t lock, bool page_lock)
void assert_sorted() const __attribute__((always_inline))
Inline definitions of SysxctLockList methods.
UniversalLockId universal_lock_id_
Used to order locks in canonical order.
Definition: sysxct_impl.hpp:85
PageHeader & get_header()
At least the basic header exists in all pages.
Definition: page.hpp:336
bool running_sysxct_
Whether we are already running a sysxct using this workspace.
void compress_entries(UniversalLockId enclosing_max_lock_id)
Unlike clear_entries(), this is used when a sysxct is aborted and will be retried.
void assert_last_locked_entry() const
#define UNLIKELY(x)
Hints that x is highly likely false.
Definition: compiler.hpp:104
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
LockListPosition get_last_active_entry() const
Per-thread reused work memory for system transactions.
LockListPosition batch_get_or_add_entries(storage::VolatilePagePointer page_id, uint32_t lock_count, uintptr_t *lock_addr, bool page_lock)
Batched version of get_or_add_entry().
const SysxctLockEntry * get_array() const
const UniversalLockId kNullUniversalLockId
This never points to a valid lock, and also evaluates less than any vaild alocks. ...
Definition: xct_id.hpp:137