libfoedus-core
FOEDUS Core Library
xct.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014-2015, Hewlett-Packard Development Company, LP.
3  * This program is free software; you can redistribute it and/or modify it
4  * under the terms of the GNU General Public License as published by the Free
5  * Software Foundation; either version 2 of the License, or (at your option)
6  * any later version.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11  * more details. You should have received a copy of the GNU General Public
12  * License along with this program; if not, write to the Free Software
13  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14  *
15  * HP designates this particular file as subject to the "Classpath" exception
16  * as provided by HP in the LICENSE.txt file that accompanied this code.
17  */
18 #include "foedus/xct/xct.hpp"
19 
20 #include <glog/logging.h>
21 
22 #include <cstring>
23 #include <ostream>
24 
25 #include "foedus/engine.hpp"
33 #include "foedus/thread/thread.hpp"
38 
39 namespace foedus {
40 namespace xct {
41 Xct::Xct(Engine* engine, thread::Thread* context, thread::ThreadId thread_id)
42  : engine_(engine), context_(context), thread_id_(thread_id) {
43  id_ = XctId();
44  active_ = false;
45 
46  default_rll_for_this_xct_ = false;
47  enable_rll_for_this_xct_ = default_rll_for_this_xct_;
48  default_hot_threshold_for_this_xct_ = storage::StorageOptions::kDefaultHotThreshold;
49  hot_threshold_for_this_xct_ = default_hot_threshold_for_this_xct_;
50  default_rll_threshold_for_this_xct_ = XctOptions::kDefaultHotThreshold;
51  rll_threshold_for_this_xct_ = default_rll_threshold_for_this_xct_;
52 
53  sysxct_workspace_ = nullptr;
54 
55  read_set_ = nullptr;
56  read_set_size_ = 0;
57  max_read_set_size_ = 0;
58  write_set_ = nullptr;
59  write_set_size_ = 0;
60  max_write_set_size_ = 0;
61  lock_free_read_set_ = nullptr;
62  lock_free_read_set_size_ = 0;
63  max_lock_free_read_set_size_ = 0;
64  lock_free_write_set_ = nullptr;
65  lock_free_write_set_size_ = 0;
66  max_lock_free_write_set_size_ = 0;
67  pointer_set_size_ = 0;
68  page_version_set_size_ = 0;
69  isolation_level_ = kSerializable;
70  mcs_block_current_ = nullptr;
71  mcs_rw_async_mapping_current_ = nullptr;
72  local_work_memory_ = nullptr;
73  local_work_memory_size_ = 0;
74  local_work_memory_cur_ = 0;
75 }
76 
78  memory::NumaCoreMemory* core_memory,
79  uint32_t* mcs_block_current,
80  uint32_t* mcs_rw_async_mapping_current) {
82  id_.set_ordinal(0); // ordinal 0 is possible only as a dummy "latest" XctId
83  ASSERT_ND(id_.is_valid());
85  = core_memory->get_small_thread_local_memory_pieces();
86  const XctOptions& xct_opt = engine_->get_options().xct_;
87 
88  default_rll_for_this_xct_ = xct_opt.enable_retrospective_lock_list_;
89  enable_rll_for_this_xct_ = default_rll_for_this_xct_;
90  default_hot_threshold_for_this_xct_ = engine_->get_options().storage_.hot_threshold_;
91  hot_threshold_for_this_xct_ = default_hot_threshold_for_this_xct_;
92  default_rll_threshold_for_this_xct_ = xct_opt.hot_threshold_for_retrospective_lock_list_;
93  rll_threshold_for_this_xct_ = default_rll_threshold_for_this_xct_;
94 
95  sysxct_workspace_ = reinterpret_cast<SysxctWorkspace*>(pieces.sysxct_workspace_memory_);
96 
97  read_set_ = reinterpret_cast<ReadXctAccess*>(pieces.xct_read_access_memory_);
98  read_set_size_ = 0;
99  max_read_set_size_ = xct_opt.max_read_set_size_;
100  write_set_ = reinterpret_cast<WriteXctAccess*>(pieces.xct_write_access_memory_);
101  write_set_size_ = 0;
102  max_write_set_size_ = xct_opt.max_write_set_size_;
103  lock_free_read_set_ = reinterpret_cast<LockFreeReadXctAccess*>(
104  pieces.xct_lock_free_read_access_memory_);
105  lock_free_read_set_size_ = 0;
106  max_lock_free_read_set_size_ = xct_opt.max_lock_free_read_set_size_;
107  lock_free_write_set_ = reinterpret_cast<LockFreeWriteXctAccess*>(
108  pieces.xct_lock_free_write_access_memory_);
109  lock_free_write_set_size_ = 0;
110  max_lock_free_write_set_size_ = xct_opt.max_lock_free_write_set_size_;
111  pointer_set_ = reinterpret_cast<PointerAccess*>(pieces.xct_pointer_access_memory_);
112  pointer_set_size_ = 0;
113  page_version_set_ = reinterpret_cast<PageVersionAccess*>(pieces.xct_page_version_memory_);
114  page_version_set_size_ = 0;
115  mcs_block_current_ = mcs_block_current;
116  *mcs_block_current_ = 0;
117  mcs_rw_async_mapping_current_ = mcs_rw_async_mapping_current;
118  *mcs_rw_async_mapping_current_ = 0;
119  local_work_memory_ = core_memory->get_local_work_memory();
120  local_work_memory_size_ = core_memory->get_local_work_memory_size();
121  local_work_memory_cur_ = 0;
122 
123  sysxct_workspace_->init(context_);
124  current_lock_list_.init(
125  core_memory->get_current_lock_list_memory(),
126  core_memory->get_current_lock_list_capacity(),
128  retrospective_lock_list_.init(
129  core_memory->get_retrospective_lock_list_memory(),
132 }
133 
134 void Xct::issue_next_id(XctId max_xct_id, Epoch *epoch) {
135  ASSERT_ND(id_.is_valid());
136 
137  while (true) {
138  // invariant 1: Larger than latest XctId of this thread.
139  XctId new_id = id_;
140  // invariant 2: Larger than every XctId of any record read or written by this transaction.
141  new_id.store_max(max_xct_id);
142  // invariant 3: in the epoch
143  if (new_id.get_epoch().before(*epoch)) {
144  new_id.set_epoch(*epoch);
145  new_id.set_ordinal(0);
146  }
147  ASSERT_ND(new_id.get_epoch() == *epoch);
148 
149  // Now, is it possible to get an ordinal one larger than this one?
150  if (UNLIKELY(new_id.get_ordinal() >= kMaxXctOrdinal)) {
151  // oh, that's rare.
152  LOG(WARNING) << "Reached the maximum ordinal in this epoch. Advancing current epoch"
153  << " just for this reason. It's rare, but not an error.";
156  // we have already issued fence by now, so we can use nonatomic version.
157  *epoch = engine_->get_xct_manager()->get_current_global_epoch_weak();
158  continue; // try again with this epoch.
159  }
160 
162  new_id.set_ordinal(new_id.get_ordinal() + 1U);
163  remember_previous_xct_id(new_id);
164  break;
165  }
166 }
167 
168 std::ostream& operator<<(std::ostream& o, const Xct& v) {
169  o << "<Xct>"
170  << "<active_>" << v.is_active() << "</active_>";
171  o << "<enable_rll_for_this_xct_>" << v.is_enable_rll_for_this_xct()
172  << "</enable_rll_for_this_xct_>";
173  o << "<default_rll_for_this_xct_>" << v.is_default_rll_for_this_xct()
174  << "</default_rll_for_this_xct_>";
175  o << "<hot_threshold>" << v.get_hot_threshold_for_this_xct() << "</hot_threshold>";
176  o << "<default_hot_threshold>" << v.get_default_hot_threshold_for_this_xct()
177  << "</default_hot_threshold>";
178  o << "<rll_threshold>" << v.get_rll_threshold_for_this_xct() << "</rll_threshold>";
179  o << "<default_rll_threshold>" << v.get_default_rll_threshold_for_this_xct()
180  << "</default_rll_threshold>";
181  if (v.is_active()) {
182  o << "<id_>" << v.get_id() << "</id_>"
183  << "<read_set_size>" << v.get_read_set_size() << "</read_set_size>"
184  << "<write_set_size>" << v.get_write_set_size() << "</write_set_size>"
185  << "<pointer_set_size>" << v.get_pointer_set_size() << "</pointer_set_size>"
186  << "<page_version_set_size>" << v.get_page_version_set_size() << "</page_version_set_size>"
187  << "<lock_free_read_set_size>" << v.get_lock_free_read_set_size()
188  << "</lock_free_read_set_size>"
189  << "<lock_free_write_set_size>" << v.get_lock_free_write_set_size()
190  << "</lock_free_write_set_size>";
191  const SysxctWorkspace* sysxct_workspace = v.get_sysxct_workspace();
192  o << *sysxct_workspace;
193  }
194  o << "</Xct>";
195  return o;
196 }
197 
199  const storage::VolatilePagePointer* pointer_address,
200  storage::VolatilePagePointer observed) {
201  ASSERT_ND(pointer_address);
202  if (isolation_level_ != kSerializable) {
203  return kErrorCodeOk;
204  }
205 
206  // TASK(Hideaki) even though pointer set should be small, we don't want sequential search
207  // everytime. but insertion sort requires shifting. mmm.
208  for (uint32_t i = 0; i < pointer_set_size_; ++i) {
209  if (pointer_set_[i].address_ == pointer_address) {
210  pointer_set_[i].observed_ = observed;
211  return kErrorCodeOk;
212  }
213  }
214 
215  if (UNLIKELY(pointer_set_size_ >= kMaxPointerSets)) {
217  }
218 
219  // no need for fence. the observed pointer itself is the only data to verify
220  pointer_set_[pointer_set_size_].address_ = pointer_address;
221  pointer_set_[pointer_set_size_].observed_ = observed;
222  ++pointer_set_size_;
223  return kErrorCodeOk;
224 }
225 
227  const storage::VolatilePagePointer* pointer_address,
228  storage::VolatilePagePointer observed) {
229  ASSERT_ND(pointer_address);
230  if (isolation_level_ != kSerializable) {
231  return;
232  }
233 
234  for (uint32_t i = 0; i < pointer_set_size_; ++i) {
235  if (pointer_set_[i].address_ == pointer_address) {
236  pointer_set_[i].observed_ = observed;
237  return;
238  }
239  }
240 }
241 
243  const storage::PageVersion* version_address,
244  storage::PageVersionStatus observed) {
245  ASSERT_ND(version_address);
246  if (isolation_level_ != kSerializable) {
247  return kErrorCodeOk;
248  } else if (UNLIKELY(page_version_set_size_ >= kMaxPointerSets)) {
250  }
251 
252  page_version_set_[page_version_set_size_].address_ = version_address;
253  page_version_set_[page_version_set_size_].observed_ = observed;
254  ++page_version_set_size_;
255  return kErrorCodeOk;
256 }
257 
259  bool intended_for_write,
260  RwLockableXctId* tid_address,
261  XctId* observed_xid,
262  ReadXctAccess** read_set_address,
263  bool no_readset_if_moved,
264  bool no_readset_if_next_layer) {
265  ASSERT_ND(tid_address);
266  ASSERT_ND(observed_xid);
267  ASSERT_ND(read_set_address);
268  *read_set_address = nullptr;
269 
270  const storage::Page* page = storage::to_page(reinterpret_cast<const void*>(tid_address));
271  const auto& page_header = page->get_header();
272  if (page_header.snapshot_) {
273  // Snapshot page is immutable.
274  // No read-set, lock, or check for being_written flag needed.
275  *observed_xid = tid_address->xct_id_;
276  ASSERT_ND(!observed_xid->is_being_written());
277  return kErrorCodeOk;
278  } else if (isolation_level_ != kSerializable) {
279  // No read-set or read-locks needed in non-serializable transactions.
280  // Also no point to conservatively take write-locks recommended by RLL
281  // because we don't take any read locks in these modes, so the
282  // original SILO's write-lock protocol is enough and abort-free.
283  ASSERT_ND(isolation_level_ == kDirtyRead || isolation_level_ == kSnapshot);
284  *observed_xid = tid_address->xct_id_.spin_while_being_written();
285  ASSERT_ND(!observed_xid->is_being_written());
286  return kErrorCodeOk;
287  }
288 
290 #ifndef NDEBUG
291  const auto& resolver = context_->get_global_volatile_page_resolver();
292  storage::assert_within_valid_volatile_page(resolver, tid_address);
293 
294  ASSERT_ND(vpp.get_numa_node() < resolver.numa_node_count_);
295  ASSERT_ND(vpp.get_offset() >= resolver.begin_);
296  ASSERT_ND(vpp.get_offset() < resolver.end_);
297 #endif // NDEBUG
298 
299  // This is a serializable transaction, and we are reading a record from a volatile page.
300  // We might take a pessimisitic lock for the record, which is our MOCC protocol.
301  // However, we need to do this _before_ observing XctId. Otherwise there is a
302  // chance of aborts even with the lock.
303  const UniversalLockId lock_id = to_universal_lock_id(
304  vpp.get_numa_node(),
305  vpp.get_offset(),
306  reinterpret_cast<uintptr_t>(tid_address));
307  on_record_read_take_locks_if_needed(intended_for_write, page, lock_id, tid_address);
308 
309  *observed_xid = tid_address->xct_id_.spin_while_being_written();
310  ASSERT_ND(!observed_xid->is_being_written());
311 
312  // Now that we observe XID in its own (non-inlined) function, probably not needed...
313  assorted::memory_fence_acquire(); // following reads must happen *after* observing xid
314 
315  // check non-reversible flags and skip read-set
316  if (observed_xid->is_moved() && no_readset_if_moved) {
317  return kErrorCodeOk;
318  } else if (observed_xid->is_next_layer() && no_readset_if_next_layer) {
319  return kErrorCodeOk;
320  }
321 
322  const storage::StorageId storage_id = page->get_header().storage_id_;
323  ASSERT_ND(storage_id != 0);
325  storage_id,
326  *observed_xid,
327  lock_id,
328  tid_address,
329  read_set_address));
330 
331  return kErrorCodeOk;
332 }
333 
335  bool intended_for_write,
336  const storage::Page* page_address,
337  UniversalLockId lock_id,
338  RwLockableXctId* tid_address) {
339 #ifndef NDEBUG
340  const auto& resolver = context_->get_global_volatile_page_resolver();
341  storage::assert_within_valid_volatile_page(resolver, tid_address);
342  ASSERT_ND(lock_id == xct_id_to_universal_lock_id(resolver, tid_address));
343 #endif // NDEBUG
344 
346  bool lets_take_lock = false;
347  if (!retrospective_lock_list_.is_empty()) {
348  // RLL is set, which means the previous run aborted for race.
349  // binary-search for each read-set is not cheap, but in this case better than aborts.
350  // So, let's see if we should take the lock.
351  rll_pos = retrospective_lock_list_.binary_search(lock_id);
352  if (rll_pos != kLockListPositionInvalid) {
353  ASSERT_ND(retrospective_lock_list_.get_array()[rll_pos].universal_lock_id_ == lock_id);
354  DVLOG(1) << "RLL recommends to take lock on this record!";
355  lets_take_lock = true;
356  }
357  }
358 
359  if (!lets_take_lock && context_->is_hot_page(page_address)) {
360  lets_take_lock = true;
361  }
362 
363  if (lets_take_lock) {
364  LockMode mode = intended_for_write ? kWriteLock : kReadLock;
365  LockListPosition cll_pos = current_lock_list_.get_or_add_entry(lock_id, tid_address, mode);
366  LockEntry* cll_entry = current_lock_list_.get_entry(cll_pos);
367  if (cll_entry->is_enough()) {
368  return; // already had the lock
369  }
370 
371  ErrorCode lock_ret;
372  if (rll_pos == kLockListPositionInvalid) {
373  // Then, this is a single read-lock to take.
374  lock_ret = context_->cll_try_or_acquire_single_lock(cll_pos);
375  // TODO(Hideaki) The above locks unconditionally in canonnical mode. Even in non-canonical,
376  // when it returns kErrorCodeXctLockAbort AND we haven't taken any write-lock yet,
377  // we might still want a retry here.. but it has pros/cons. Revisit later.
378  } else {
379  // Then we should take all locks before this too.
380  lock_ret = context_->cll_try_or_acquire_multiple_locks(cll_pos);
381  }
382 
383  if (lock_ret != kErrorCodeOk) {
384  ASSERT_ND(lock_ret == kErrorCodeXctLockAbort);
385  DVLOG(0) << "Failed to take some of the lock that might be beneficial later"
386  << ". We still go on because the locks here are not mandatory.";
387  // At this point, no point to be advised by RLL any longer.
388  // Let's clear it, and let's give-up all incomplete locks in CLL.
390  retrospective_lock_list_.clear_entries();
391  }
392  }
393 }
394 
396  storage::StorageId storage_id,
397  XctId observed_owner_id,
398  RwLockableXctId* owner_id_address,
399  ReadXctAccess** read_set_address) {
400  const auto& resolver = retrospective_lock_list_.get_volatile_page_resolver();
401  UniversalLockId owner_lock_id = xct_id_to_universal_lock_id(resolver, owner_id_address);
402  return add_to_read_set(
403  storage_id,
404  observed_owner_id,
405  owner_lock_id,
406  owner_id_address,
407  read_set_address);
408 }
409 
411  storage::StorageId storage_id,
412  XctId observed_owner_id,
413  UniversalLockId owner_lock_id,
414  RwLockableXctId* owner_id_address,
415  ReadXctAccess** read_set_address) {
416 #ifndef NDEBUG
417  const auto& resolver = retrospective_lock_list_.get_volatile_page_resolver();
418  storage::assert_within_valid_volatile_page(resolver, owner_id_address);
419  ASSERT_ND(owner_lock_id == xct_id_to_universal_lock_id(resolver, owner_id_address));
420 #endif // NDEBUG
421 
422  ASSERT_ND(storage_id != 0);
423  ASSERT_ND(owner_id_address);
424  ASSERT_ND(!observed_owner_id.is_being_written());
425  ASSERT_ND(read_set_address);
426  if (UNLIKELY(read_set_size_ >= max_read_set_size_)) {
428  }
429  // if the next-layer bit is ON, the record is not logically a record, so why we are adding
430  // it to read-set? we should have already either aborted or retried in this case.
431  ASSERT_ND(!observed_owner_id.is_next_layer());
432  ReadXctAccess* entry = read_set_ + read_set_size_;
433  *read_set_address = entry;
434  entry->ordinal_ = read_set_size_;
435  entry->storage_id_ = storage_id;
436  entry->set_owner_id_and_lock_id(owner_id_address, owner_lock_id);
437  entry->observed_owner_id_ = observed_owner_id;
438  entry->related_write_ = nullptr;
439  ++read_set_size_;
440  return kErrorCodeOk;
441 }
442 
443 
445  storage::StorageId storage_id,
446  RwLockableXctId* owner_id_address,
447  char* payload_address,
448  log::RecordLogType* log_entry) {
449  ASSERT_ND(storage_id != 0);
450  ASSERT_ND(owner_id_address);
451  ASSERT_ND(payload_address);
452  ASSERT_ND(log_entry);
453  const auto& resolver = retrospective_lock_list_.get_volatile_page_resolver();
454 #ifndef NDEBUG
455  storage::assert_within_valid_volatile_page(resolver, owner_id_address);
456  log::invoke_assert_valid(log_entry);
457 #endif // NDEBUG
458 
459  if (UNLIKELY(write_set_size_ >= max_write_set_size_)) {
461  }
462  WriteXctAccess* write = write_set_ + write_set_size_;
463  write->ordinal_ = write_set_size_;
464  write->payload_address_ = payload_address;
465  write->log_entry_ = log_entry;
466  write->storage_id_ = storage_id;
467  write->set_owner_id_resolve_lock_id(resolver, owner_id_address);
468  write->related_read_ = CXX11_NULLPTR;
469  ++write_set_size_;
470  return kErrorCodeOk;
471 }
472 
473 
475  storage::StorageId storage_id,
476  XctId observed_owner_id,
477  RwLockableXctId* owner_id_address,
478  char* payload_address,
479  log::RecordLogType* log_entry) {
480  ASSERT_ND(observed_owner_id.is_valid());
481 #ifndef NDEBUG
482  log::invoke_assert_valid(log_entry);
483 #endif // NDEBUG
484  auto* write = write_set_ + write_set_size_;
485  CHECK_ERROR_CODE(add_to_write_set(storage_id, owner_id_address, payload_address, log_entry));
486 
487  auto* read = read_set_ + read_set_size_;
488  ReadXctAccess* dummy;
490  storage_id,
491  observed_owner_id,
492  write->owner_lock_id_,
493  owner_id_address,
494  &dummy));
495  ASSERT_ND(read->owner_id_address_ == owner_id_address);
496  read->related_write_ = write;
497  write->related_read_ = read;
498  ASSERT_ND(read->related_write_->related_read_ == read);
499  ASSERT_ND(write->related_read_->related_write_ == write);
500  ASSERT_ND(write->log_entry_ == log_entry);
501  ASSERT_ND(write->owner_id_address_ == owner_id_address);
502  ASSERT_ND(write_set_size_ > 0);
503  return kErrorCodeOk;
504 }
505 
507  ReadXctAccess* related_read_set,
508  RwLockableXctId* tid_address,
509  char* payload_address,
510  log::RecordLogType* log_entry) {
511  ASSERT_ND(related_read_set);
512  ASSERT_ND(tid_address);
513 #ifndef NDEBUG
514  log::invoke_assert_valid(log_entry);
515 #endif // NDEBUG
516 
517  auto* write = write_set_ + write_set_size_;
518  auto storage_id = related_read_set->storage_id_;
519  auto* owner_id_address = related_read_set->owner_id_address_;
520  CHECK_ERROR_CODE(add_to_write_set(storage_id, owner_id_address, payload_address, log_entry));
521 
522  related_read_set->related_write_ = write;
523  write->related_read_ = related_read_set;
524  ASSERT_ND(related_read_set->related_write_->related_read_ == related_read_set);
525  ASSERT_ND(write->related_read_->related_write_ == write);
526  ASSERT_ND(write->log_entry_ == log_entry);
527  ASSERT_ND(write->owner_id_address_ == owner_id_address);
528  ASSERT_ND(write_set_size_ > 0);
529  return kErrorCodeOk;
530 }
531 
533  storage::StorageId storage_id,
534  XctId observed_owner_id,
535  RwLockableXctId* owner_id_address) {
536  ASSERT_ND(storage_id != 0);
537  if (isolation_level_ != kSerializable) {
538  return kErrorCodeOk;
539  }
540  if (UNLIKELY(lock_free_read_set_size_ >= max_lock_free_read_set_size_)) {
542  }
543 
544  lock_free_read_set_[lock_free_read_set_size_].storage_id_ = storage_id;
545  lock_free_read_set_[lock_free_read_set_size_].observed_owner_id_ = observed_owner_id;
546  lock_free_read_set_[lock_free_read_set_size_].owner_id_address_ = owner_id_address;
547  ++lock_free_read_set_size_;
548  return kErrorCodeOk;
549 }
550 
552  storage::StorageId storage_id,
553  log::RecordLogType* log_entry) {
554  ASSERT_ND(storage_id != 0);
555  ASSERT_ND(log_entry);
556  if (UNLIKELY(lock_free_write_set_size_ >= max_lock_free_write_set_size_)) {
558  }
559 
560 #ifndef NDEBUG
561  log::invoke_assert_valid(log_entry);
562 #endif // NDEBUG
563 
564  lock_free_write_set_[lock_free_write_set_size_].storage_id_ = storage_id;
565  lock_free_write_set_[lock_free_write_set_size_].log_entry_ = log_entry;
566  ++lock_free_write_set_size_;
567  return kErrorCodeOk;
568 }
569 
570 } // namespace xct
571 } // namespace foedus
ReadXctAccess * related_read_
Definition: xct_access.hpp:178
void issue_next_id(XctId max_xct_id, Epoch *epoch)
Called while a successful commit of xct to issue a new xct id.
Definition: xct.cpp:134
log::RecordLogType * log_entry_
Pointer to the log entry in private log buffer for this write opereation.
Definition: xct_access.hpp:175
const memory::GlobalVolatilePageResolver & get_global_volatile_page_resolver() const
Returns the page resolver to convert page ID to page pointer.
Definition: thread.cpp:125
RwLockableXctId * owner_id_address_
Pointer to the TID we protect against.
Definition: xct_access.hpp:210
taken_mode_: we took a read-lock, not write-lock yet.
Definition: xct_id.hpp:105
void set_epoch(Epoch epoch) __attribute__((always_inline))
Definition: xct_id.hpp:965
UniversalLockId universal_lock_id_
Used to order locks in canonical order.
std::ostream & operator<<(std::ostream &o, const LockEntry &v)
Debugging.
Represents a record of special read-access during a transaction without any need for locking...
Definition: xct_access.hpp:200
uint16_t get_default_rll_threshold_for_this_xct() const
Definition: xct.hpp:137
LockListPosition binary_search(UniversalLockId lock) const
Analogous to std::binary_search() for the given lock.
ErrorCode add_to_read_set(storage::StorageId storage_id, XctId observed_owner_id, RwLockableXctId *owner_id_address, ReadXctAccess **read_set_address)
Add the given record to the read set of this transaction.
Definition: xct.cpp:395
const storage::PageVersion * address_
Address to the page version.
Definition: xct_access.hpp:76
Epoch get_current_global_epoch_weak() const
uint32_t max_lock_free_read_set_size_
The maximum number of lock-free read-set one transaction can have.
Definition: xct_options.hpp:84
#define CXX11_NULLPTR
Used in public headers in place of "nullptr" of C++11.
Definition: cxx11.hpp:132
Page * to_page(const void *address)
super-dirty way to obtain Page the address belongs to.
Definition: page.hpp:395
ErrorCode cll_try_or_acquire_single_lock(xct::LockListPosition pos)
Methods related to Current Lock List (CLL) These are the only interface in Thread to lock records...
Epoch get_current_global_epoch() const
Returns the current global epoch, the epoch a newly started transaction will be in.
0x0A01 : "XCTION : Too large read-set. Check the config of XctOptions" .
Definition: error_code.hpp:196
uint32_t StorageId
Unique ID for storage.
Definition: storage_id.hpp:55
bool enable_retrospective_lock_list_
Whether to use Retrospective Lock List (RLL) after aborts.
uint32_t get_write_set_size() const
Definition: xct.hpp:157
uint32_t ordinal_
Indicates the ordinal among ReadXctAccess/WriteXctAccess of this transaction.
Definition: xct_access.hpp:96
Root package of FOEDUS (Fast Optimistic Engine for Data Unification Services).
Definition: assert_nd.hpp:44
Represents a record of write-access during a transaction.
Definition: xct_access.hpp:168
const storage::VolatilePagePointer * address_
Address of the volatile pointer.
Definition: xct_access.hpp:52
Represents one thread running on one NUMA core.
Definition: thread.hpp:48
ErrorCode on_record_read(bool intended_for_write, RwLockableXctId *tid_address, XctId *observed_xid, ReadXctAccess **read_set_address, bool no_readset_if_moved=false, bool no_readset_if_next_layer=false)
The general logic invoked for every record read.
Definition: xct.cpp:258
StorageId storage_id_
ID of the storage this page belongs to.
Definition: page.hpp:196
storage::VolatilePagePointer observed_
Value of the volatile pointer as of the access.
Definition: xct_access.hpp:55
const GlobalVolatilePageResolver & get_global_volatile_page_resolver() const
Returns the page resolver to convert volatile page ID to page pointer.
const XctId & get_id() const
Returns the ID of this transaction, but note that it is not issued until commit time! ...
Definition: xct.hpp:151
Represents a pointer to a volatile page with modification count for preventing ABA.
Definition: storage_id.hpp:194
Represents a user transaction.
Definition: xct.hpp:58
Just a synonym of XctId to be used as a page lock mechanism.
Definition: page.hpp:129
storage::PageVersionStatus observed_
Value of the page version as of the access.
Definition: xct_access.hpp:79
Persistent status part of Transaction ID.
Definition: xct_id.hpp:955
void invoke_assert_valid(void *log_buffer)
Invokes the assertion logic of each log type.
const LockListPosition kLockListPositionInvalid
Definition: xct_id.hpp:149
Represents a record of read-access during a transaction.
Definition: xct_access.hpp:139
Represents a time epoch.
Definition: epoch.hpp:61
bool is_active() const
Returns whether the object is an active transaction.
Definition: xct.hpp:121
XctId xct_id_
the second 64bit: Persistent status part of TID.
Definition: xct_id.hpp:1137
An entry in CLL and RLL, representing a lock that is taken or will be taken.
uint32_t get_ordinal() const __attribute__((always_inline))
Definition: xct_id.hpp:976
uint64_t get_current_lock_list_capacity() const
Snapshot isolation (SI), meaning the transaction reads a consistent and complete image of the databas...
Definition: xct_id.hpp:78
ErrorCode add_to_write_set(storage::StorageId storage_id, RwLockableXctId *owner_id_address, char *payload_address, log::RecordLogType *log_entry)
Add the given record to the write set of this transaction.
Definition: xct.cpp:444
uint64_t get_local_work_memory_size() const
bool is_valid() const __attribute__((always_inline))
Definition: xct_id.hpp:973
0x0A06 : "XCTION : Too large page-version set. Consider using snapshot isolation." ...
Definition: error_code.hpp:201
ErrorCode add_to_read_and_write_set(storage::StorageId storage_id, XctId observed_owner_id, RwLockableXctId *owner_id_address, char *payload_address, log::RecordLogType *log_entry)
Add a pair of read and write set of this transaction.
Definition: xct.cpp:474
ErrorCode add_to_lock_free_read_set(storage::StorageId storage_id, XctId observed_owner_id, RwLockableXctId *owner_id_address)
Add the given record to the special read-set that is not placed in usual data pages.
Definition: xct.cpp:532
Represents a record of special write-access during a transaction without any need for locking...
Definition: xct_access.hpp:228
xct::LockEntry * get_retrospective_lock_list_memory() const
XctId observed_owner_id_
XID value we observed.
Definition: xct_access.hpp:204
const EngineOptions & get_options() const
Definition: engine.cpp:39
Repository of memories dynamically acquired within one CPU core (thread).
savepoint::SavepointManager * get_savepoint_manager() const
See Savepoint Manager.
Definition: engine.cpp:53
uintptr_t UniversalLockId
Universally ordered identifier of each lock.
Definition: xct_id.hpp:134
The MCS reader-writer lock variant of LockableXctId.
Definition: xct_id.hpp:1132
0x0AA1 : "XCTION : Lock acquire failed." .
Definition: error_code.hpp:206
uint64_t hot_threshold_
Page hotness >= this value will be considered hot (hybrid CC only).
Set of options for xct manager.
Definition: xct_options.hpp:35
UniversalLockId to_universal_lock_id(storage::VolatilePagePointer page_id, uintptr_t addr)
Definition: sysxct_impl.hpp:63
bool is_default_rll_for_this_xct() const
Definition: xct.hpp:125
const SmallThreadLocalMemoryPieces & get_small_thread_local_memory_pieces() const
void remember_previous_xct_id(XctId new_id)
Definition: xct.hpp:386
0 means no-error.
Definition: error_code.hpp:87
UniversalLockId xct_id_to_universal_lock_id(const memory::GlobalVolatilePageResolver &resolver, RwLockableXctId *lock)
Definition: xct_id.hpp:1226
storage::StorageId storage_id_
The storage we accessed.
Definition: xct_access.hpp:207
void store_max(const XctId &other) __attribute__((always_inline))
Kind of std::max(this, other).
Definition: xct_id.hpp:1059
memory::PagePoolOffset get_offset() const
Definition: storage_id.hpp:202
void set_owner_id_resolve_lock_id(const memory::GlobalVolatilePageResolver &resolver, RwLockableXctId *owner_id_address)
Calculate owner_lock_id using the resolver.
Definition: xct_access.cpp:93
uint32_t get_read_set_size() const
Definition: xct.hpp:156
ErrorCode add_related_write_set(ReadXctAccess *related_read_set, RwLockableXctId *tid_address, char *payload_address, log::RecordLogType *log_entry)
Registers a write-set related to an existing read-set.
Definition: xct.cpp:506
ErrorCode add_to_lock_free_write_set(storage::StorageId storage_id, log::RecordLogType *log_entry)
Add the given log to the lock-free write set of this transaction.
Definition: xct.cpp:551
storage::StorageOptions storage_
uint16_t get_default_hot_threshold_for_this_xct() const
Definition: xct.hpp:130
RwLockableXctId * owner_id_address_
Pointer to the accessed record.
Definition: xct_access.hpp:102
uint32_t LockListPosition
Index in a lock-list, either RLL or CLL.
Definition: xct_id.hpp:148
uint32_t max_lock_free_write_set_size_
The maximum number of lock-free write-set one transaction can have.
Definition: xct_options.hpp:92
uint16_t get_hot_threshold_for_this_xct() const
Definition: xct.hpp:128
void assert_within_valid_volatile_page(const memory::GlobalVolatilePageResolver &resolver, const void *address)
Definition: page.hpp:428
Database engine object that holds all resources and provides APIs.
Definition: engine.hpp:109
char * payload_address_
Pointer to the payload of the record.
Definition: xct_access.hpp:172
taken_mode_: we took a write-lock.
Definition: xct_id.hpp:110
0x0A02 : "XCTION : Too large write-set. Check the config of XctOptions" .
Definition: error_code.hpp:197
uint32_t get_page_version_set_size() const
Definition: xct.hpp:155
Epoch get_epoch() const __attribute__((always_inline))
Definition: xct_id.hpp:964
bool is_next_layer() const __attribute__((always_inline))
Definition: xct_id.hpp:1042
LockEntry * get_entry(LockListPosition pos)
Just a marker to denote that the memory region represents a data page.
Definition: page.hpp:334
xct::LockEntry * get_current_lock_list_memory() const
const uint64_t kMaxXctOrdinal
Maximum value of in-epoch ordinal.
Definition: xct_id.hpp:898
void init(LockEntry *array, uint32_t capacity, const memory::GlobalVolatilePageResolver &resolver)
void cll_giveup_all_locks_after(xct::UniversalLockId address)
This gives-up locks in CLL that are not yet taken.
uint16_t hot_threshold_for_retrospective_lock_list_
When we construct Retrospective Lock List (RLL) after aborts, we add read-locks on records whose hotn...
storage::StorageId storage_id_
The storage we accessed.
Definition: xct_access.hpp:85
SysxctWorkspace * get_sysxct_workspace() const
Definition: xct.hpp:142
void on_record_read_take_locks_if_needed(bool intended_for_write, const storage::Page *page_address, UniversalLockId lock_id, RwLockableXctId *tid_address)
subroutine of on_record_read() to take lock(s).
Definition: xct.cpp:334
WriteXctAccess * related_write_
An optional member that points to a write access related to this read.
Definition: xct_access.hpp:153
bool is_hot_page(const storage::Page *page) const
Definition: thread.cpp:142
Packs pointers to pieces of small_thread_local_memory_.
uint32_t max_write_set_size_
The maximum number of write-set one transaction can have.
Definition: xct_options.hpp:76
void set_ordinal(uint32_t ordinal) __attribute__((always_inline))
Definition: xct_id.hpp:980
uint32_t get_pointer_set_size() const
Definition: xct.hpp:154
xct::XctManager * get_xct_manager() const
See Transaction Manager.
Definition: engine.cpp:61
uint32_t get_lock_free_read_set_size() const
Definition: xct.hpp:158
#define CHECK_ERROR_CODE(x)
This macro calls x and checks its returned error code.
Definition: error_code.hpp:155
uint16_t ThreadId
Typedef for a global ID of Thread (core), which is unique across NUMA nodes.
Definition: thread_id.hpp:80
VolatilePagePointer construct_volatile_page_pointer(uint64_t word)
Definition: storage_id.hpp:230
void advance_current_global_epoch()
Requests to advance the current global epoch as soon as possible and blocks until it actually does...
LockMode
Represents a mode of lock.
Definition: xct_id.hpp:95
uint16_t get_rll_threshold_for_this_xct() const
Definition: xct.hpp:135
ErrorCode add_to_page_version_set(const storage::PageVersion *version_address, storage::PageVersionStatus observed)
Add the given page version to the page version set of this transaction.
Definition: xct.cpp:242
Atomic fence methods and load/store with fences that work for both C++11/non-C++11 code...
bool is_being_written() const __attribute__((always_inline))
Definition: xct_id.hpp:1038
XctId spin_while_being_written() const __attribute__((always_inline))
Returns a version of this Xid whose being_written flag is off.
Definition: xct_id.hpp:1190
void init(thread::Thread *enclosing_thread)
void init(LockEntry *array, uint32_t capacity, const memory::GlobalVolatilePageResolver &resolver)
void memory_fence_acquire()
Equivalent to std::atomic_thread_fence(std::memory_order_acquire).
No guarantee at all for reads, for the sake of best performance and scalability.
Definition: xct_id.hpp:65
PageHeader & get_header()
At least the basic header exists in all pages.
Definition: page.hpp:336
log::RecordLogType * log_entry_
Pointer to the log entry in private log buffer for this write opereation.
Definition: xct_access.hpp:235
bool is_enable_rll_for_this_xct() const
Definition: xct.hpp:123
Base class for log type of record-wise operation.
uint64_t get_retrospective_lock_list_capacity() const
#define UNLIKELY(x)
Hints that x is highly likely false.
Definition: compiler.hpp:104
bool is_moved() const __attribute__((always_inline))
Definition: xct_id.hpp:1041
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
Represents a record of following a page pointer during a transaction.
Definition: xct_access.hpp:48
0x0A07 : "XCTION : Too large pointer-set. Consider using snapshot isolation." .
Definition: error_code.hpp:202
uint32_t get_lock_free_write_set_size() const
Definition: xct.hpp:159
bool before(const Epoch &other) const
Returns if this epoch is before the given epoch in the sense of distance defined in RFC 1982...
Definition: epoch.hpp:176
uint32_t max_read_set_size_
The maximum number of read-set one transaction can have.
Definition: xct_options.hpp:60
Represents a record of reading a page during a transaction.
Definition: xct_access.hpp:72
void overwrite_to_pointer_set(const storage::VolatilePagePointer *pointer_address, storage::VolatilePagePointer observed)
The transaction that has updated the volatile pointer should not abort itself.
Definition: xct.cpp:226
ErrorCode add_to_pointer_set(const storage::VolatilePagePointer *pointer_address, storage::VolatilePagePointer observed)
Add the given page pointer to the pointer set of this transaction.
Definition: xct.cpp:198
memory::EngineMemory * get_memory_manager() const
See Memory Manager.
Definition: engine.cpp:50
ErrorCode
Enum of error codes defined in error_code.xmacro.
Definition: error_code.hpp:85
Per-thread reused work memory for system transactions.
Xct(Engine *engine, thread::Thread *context, thread::ThreadId thread_id)
Definition: xct.cpp:41
const UniversalLockId kNullUniversalLockId
This never points to a valid lock, and also evaluates less than any vaild alocks. ...
Definition: xct_id.hpp:137
Protects against all anomalies in all situations.
Definition: xct_id.hpp:86
ErrorCode cll_try_or_acquire_multiple_locks(xct::LockListPosition upto_pos)
Acquire multiple locks up to the given position in canonical order.
const memory::GlobalVolatilePageResolver & get_volatile_page_resolver() const
LockListPosition get_or_add_entry(UniversalLockId lock_id, RwLockableXctId *lock, LockMode preferred_mode)
Adds an entry to this list, re-sorting part of the list if necessary to keep the sortedness.
void initialize(memory::NumaCoreMemory *core_memory, uint32_t *mcs_block_current, uint32_t *mcs_rw_async_mapping_current)
Definition: xct.cpp:77
storage::StorageId storage_id_
The storage we accessed.
Definition: xct_access.hpp:232