libfoedus-core
FOEDUS Core Library
foedus::xct::Xct Class Reference

Represents a user transaction. More...

Detailed Description

Represents a user transaction.

To obtain this object, call Thread::get_current_xct(). This object represents a user transaction as opposed to physical-only internal transactions (so called system transactions, SysxctScope).

Definition at line 58 of file xct.hpp.

#include <xct.hpp>

Public Types

enum  Constants { kMaxPointerSets = 1024, kMaxPageVersionSets = 1024 }
 

Public Member Functions

 Xct (Engine *engine, thread::Thread *context, thread::ThreadId thread_id)
 
 Xct (const Xct &other)=delete
 
Xctoperator= (const Xct &other)=delete
 
void initialize (memory::NumaCoreMemory *core_memory, uint32_t *mcs_block_current, uint32_t *mcs_rw_async_mapping_current)
 
void activate (IsolationLevel isolation_level)
 Begins the transaction. More...
 
void deactivate ()
 Closes the transaction. More...
 
uint32_t get_mcs_block_current () const
 
uint32_t increment_mcs_block_current ()
 
void decrement_mcs_block_current ()
 
bool is_active () const
 Returns whether the object is an active transaction. More...
 
bool is_enable_rll_for_this_xct () const
 
void set_enable_rll_for_this_xct (bool value)
 
bool is_default_rll_for_this_xct () const
 
void set_default_rll_for_this_xct (bool value)
 
uint16_t get_hot_threshold_for_this_xct () const
 
void set_hot_threshold_for_this_xct (uint16_t value)
 
uint16_t get_default_hot_threshold_for_this_xct () const
 
void set_default_hot_threshold_for_this_xct (uint16_t value)
 
uint16_t get_rll_threshold_for_this_xct () const
 
void set_rll_threshold_for_this_xct (uint16_t value)
 
uint16_t get_default_rll_threshold_for_this_xct () const
 
void set_default_rll_threshold_for_this_xct (uint16_t value)
 
SysxctWorkspaceget_sysxct_workspace () const
 
bool is_read_only () const
 Returns if this transaction makes no writes. More...
 
IsolationLevel get_isolation_level () const
 Returns the level of isolation for this transaction. More...
 
const XctIdget_id () const
 Returns the ID of this transaction, but note that it is not issued until commit time! More...
 
thread::Threadget_thread_context ()
 
thread::ThreadId get_thread_id () const
 
uint32_t get_pointer_set_size () const
 
uint32_t get_page_version_set_size () const
 
uint32_t get_read_set_size () const
 
uint32_t get_write_set_size () const
 
uint32_t get_lock_free_read_set_size () const
 
uint32_t get_lock_free_write_set_size () const
 
const PointerAccessget_pointer_set () const
 
const PageVersionAccessget_page_version_set () const
 
ReadXctAccessget_read_set ()
 
WriteXctAccessget_write_set ()
 
LockFreeReadXctAccessget_lock_free_read_set ()
 
LockFreeWriteXctAccessget_lock_free_write_set ()
 
void issue_next_id (XctId max_xct_id, Epoch *epoch)
 Called while a successful commit of xct to issue a new xct id. More...
 
ErrorCode add_to_pointer_set (const storage::VolatilePagePointer *pointer_address, storage::VolatilePagePointer observed)
 Add the given page pointer to the pointer set of this transaction. More...
 
void overwrite_to_pointer_set (const storage::VolatilePagePointer *pointer_address, storage::VolatilePagePointer observed)
 The transaction that has updated the volatile pointer should not abort itself. More...
 
ErrorCode add_to_page_version_set (const storage::PageVersion *version_address, storage::PageVersionStatus observed)
 Add the given page version to the page version set of this transaction. More...
 
ErrorCode on_record_read (bool intended_for_write, RwLockableXctId *tid_address, XctId *observed_xid, ReadXctAccess **read_set_address, bool no_readset_if_moved=false, bool no_readset_if_next_layer=false)
 The general logic invoked for every record read. More...
 
ErrorCode on_record_read (bool intended_for_write, RwLockableXctId *tid_address, bool no_readset_if_moved=false, bool no_readset_if_next_layer=false)
 Shortcut for a case when you don't need observed_xid/read_set_address back. More...
 
void on_record_read_take_locks_if_needed (bool intended_for_write, const storage::Page *page_address, UniversalLockId lock_id, RwLockableXctId *tid_address)
 subroutine of on_record_read() to take lock(s). More...
 
ErrorCode add_related_write_set (ReadXctAccess *related_read_set, RwLockableXctId *tid_address, char *payload_address, log::RecordLogType *log_entry)
 Registers a write-set related to an existing read-set. More...
 
ErrorCode add_to_read_set (storage::StorageId storage_id, XctId observed_owner_id, RwLockableXctId *owner_id_address, ReadXctAccess **read_set_address)
 Add the given record to the read set of this transaction. More...
 
ErrorCode add_to_read_set (storage::StorageId storage_id, XctId observed_owner_id, UniversalLockId owner_lock_id, RwLockableXctId *owner_id_address, ReadXctAccess **read_set_address)
 Use this in case you already have owner_lock_id. More...
 
ErrorCode add_to_write_set (storage::StorageId storage_id, RwLockableXctId *owner_id_address, char *payload_address, log::RecordLogType *log_entry)
 Add the given record to the write set of this transaction. More...
 
ErrorCode add_to_write_set (storage::StorageId storage_id, storage::Record *record, log::RecordLogType *log_entry)
 Add the given record to the write set of this transaction. More...
 
ErrorCode add_to_read_and_write_set (storage::StorageId storage_id, XctId observed_owner_id, RwLockableXctId *owner_id_address, char *payload_address, log::RecordLogType *log_entry)
 Add a pair of read and write set of this transaction. More...
 
ErrorCode add_to_lock_free_read_set (storage::StorageId storage_id, XctId observed_owner_id, RwLockableXctId *owner_id_address)
 Add the given record to the special read-set that is not placed in usual data pages. More...
 
ErrorCode add_to_lock_free_write_set (storage::StorageId storage_id, log::RecordLogType *log_entry)
 Add the given log to the lock-free write set of this transaction. More...
 
void remember_previous_xct_id (XctId new_id)
 
ErrorCode acquire_local_work_memory (uint32_t size, void **out, uint32_t alignment=8)
 Get a tentative work memory of the specified size from pre-allocated thread-private memory. More...
 
xct::CurrentLockListget_current_lock_list ()
 
const xct::CurrentLockListget_current_lock_list () const
 
xct::RetrospectiveLockListget_retrospective_lock_list ()
 
bool assert_related_read_write () const __attribute__((always_inline))
 This debug method checks whether the related_read_ and related_write_ fileds in read/write sets are consistent. More...
 

Friends

std::ostream & operator<< (std::ostream &o, const Xct &v)
 

Member Enumeration Documentation

Enumerator
kMaxPointerSets 
kMaxPageVersionSets 

Definition at line 60 of file xct.hpp.

Constructor & Destructor Documentation

foedus::xct::Xct::Xct ( Engine engine,
thread::Thread context,
thread::ThreadId  thread_id 
)

Definition at line 41 of file xct.cpp.

References foedus::storage::StorageOptions::kDefaultHotThreshold, foedus::xct::XctOptions::kDefaultHotThreshold, and foedus::xct::kSerializable.

42  : engine_(engine), context_(context), thread_id_(thread_id) {
43  id_ = XctId();
44  active_ = false;
45 
46  default_rll_for_this_xct_ = false;
47  enable_rll_for_this_xct_ = default_rll_for_this_xct_;
48  default_hot_threshold_for_this_xct_ = storage::StorageOptions::kDefaultHotThreshold;
49  hot_threshold_for_this_xct_ = default_hot_threshold_for_this_xct_;
50  default_rll_threshold_for_this_xct_ = XctOptions::kDefaultHotThreshold;
51  rll_threshold_for_this_xct_ = default_rll_threshold_for_this_xct_;
52 
53  sysxct_workspace_ = nullptr;
54 
55  read_set_ = nullptr;
56  read_set_size_ = 0;
57  max_read_set_size_ = 0;
58  write_set_ = nullptr;
59  write_set_size_ = 0;
60  max_write_set_size_ = 0;
61  lock_free_read_set_ = nullptr;
62  lock_free_read_set_size_ = 0;
63  max_lock_free_read_set_size_ = 0;
64  lock_free_write_set_ = nullptr;
65  lock_free_write_set_size_ = 0;
66  max_lock_free_write_set_size_ = 0;
67  pointer_set_size_ = 0;
68  page_version_set_size_ = 0;
69  isolation_level_ = kSerializable;
70  mcs_block_current_ = nullptr;
71  mcs_rw_async_mapping_current_ = nullptr;
72  local_work_memory_ = nullptr;
73  local_work_memory_size_ = 0;
74  local_work_memory_cur_ = 0;
75 }
Protects against all anomalies in all situations.
Definition: xct_id.hpp:86
foedus::xct::Xct::Xct ( const Xct other)
delete

Member Function Documentation

ErrorCode foedus::xct::Xct::acquire_local_work_memory ( uint32_t  size,
void **  out,
uint32_t  alignment = 8 
)
inline

Get a tentative work memory of the specified size from pre-allocated thread-private memory.

The local work memory is recycled after the current transaction.

Definition at line 397 of file xct.hpp.

References foedus::kErrorCodeOk, foedus::kErrorCodeXctNoMoreLocalWorkMemory, and UNLIKELY.

Referenced by foedus::thread::ThreadPimpl::find_or_read_a_snapshot_page(), and foedus::thread::ThreadPimpl::find_or_read_snapshot_pages_batch().

397  {
398  if (size % alignment != 0) {
399  size = ((size / alignment) + 1U) * alignment;
400  }
401  uint64_t begin = local_work_memory_cur_;
402  if (begin % alignment != 0) {
403  begin = ((begin / alignment) + 1U) * alignment;
404  }
405  if (UNLIKELY(size + begin > local_work_memory_size_)) {
407  }
408  local_work_memory_cur_ = size + begin;
409  *out = reinterpret_cast<char*>(local_work_memory_) + begin;
410  return kErrorCodeOk;
411  }
0 means no-error.
Definition: error_code.hpp:87
#define UNLIKELY(x)
Hints that x is highly likely false.
Definition: compiler.hpp:104
0x0A09 : "XCTION : Out of local work memory for the current transaction. Adjust XctOptions::local_wor...
Definition: error_code.hpp:204

Here is the caller graph for this function:

void foedus::xct::Xct::activate ( IsolationLevel  isolation_level)
inline

Begins the transaction.

Definition at line 79 of file xct.hpp.

References ASSERT_ND, foedus::xct::CurrentLockList::clear_entries(), foedus::xct::RetrospectiveLockList::is_empty(), and foedus::xct::CurrentLockList::prepopulate_for_retrospective_lock_list().

Referenced by foedus::xct::XctManagerPimpl::begin_xct().

79  {
80  ASSERT_ND(!active_);
81  active_ = true;
82  enable_rll_for_this_xct_ = default_rll_for_this_xct_;
83  hot_threshold_for_this_xct_ = default_hot_threshold_for_this_xct_;
84  rll_threshold_for_this_xct_ = default_rll_threshold_for_this_xct_;
85  isolation_level_ = isolation_level;
86  pointer_set_size_ = 0;
87  page_version_set_size_ = 0;
88  read_set_size_ = 0;
89  write_set_size_ = 0;
90  lock_free_read_set_size_ = 0;
91  lock_free_write_set_size_ = 0;
92  *mcs_block_current_ = 0;
93  *mcs_rw_async_mapping_current_ = 0;
94  local_work_memory_cur_ = 0;
95  current_lock_list_.clear_entries();
96  if (!retrospective_lock_list_.is_empty()) {
97  // If we have RLL, we will highly likely lock all of them.
98  // So, let's make CLL entries for all of them at the beginning.
99  // This is both for simplicity and performance.
100  current_lock_list_.prepopulate_for_retrospective_lock_list(retrospective_lock_list_);
101  }
102  }
void prepopulate_for_retrospective_lock_list(const RetrospectiveLockList &rll)
Another batch-insert method used at the beginning of a transaction.
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72

Here is the call graph for this function:

Here is the caller graph for this function:

ErrorCode foedus::xct::Xct::add_related_write_set ( ReadXctAccess related_read_set,
RwLockableXctId tid_address,
char *  payload_address,
log::RecordLogType log_entry 
)

Registers a write-set related to an existing read-set.

This is typically invoked after on_record_read(), which returns the read-set address.

Note
so far you can't reigster more than one write-set to a read-set. but, registering related read/write sets are just for performance. correctness is guaranteed even if they are not registered as "related".

Definition at line 506 of file xct.cpp.

References add_to_write_set(), ASSERT_ND, CHECK_ERROR_CODE, foedus::log::invoke_assert_valid(), foedus::kErrorCodeOk, foedus::xct::RecordXctAccess::owner_id_address_, foedus::xct::WriteXctAccess::related_read_, foedus::xct::ReadXctAccess::related_write_, and foedus::xct::RecordXctAccess::storage_id_.

Referenced by foedus::storage::masstree::MasstreeStoragePimpl::register_record_write_log(), and foedus::storage::hash::HashStoragePimpl::register_record_write_log().

510  {
511  ASSERT_ND(related_read_set);
512  ASSERT_ND(tid_address);
513 #ifndef NDEBUG
514  log::invoke_assert_valid(log_entry);
515 #endif // NDEBUG
516 
517  auto* write = write_set_ + write_set_size_;
518  auto storage_id = related_read_set->storage_id_;
519  auto* owner_id_address = related_read_set->owner_id_address_;
520  CHECK_ERROR_CODE(add_to_write_set(storage_id, owner_id_address, payload_address, log_entry));
521 
522  related_read_set->related_write_ = write;
523  write->related_read_ = related_read_set;
524  ASSERT_ND(related_read_set->related_write_->related_read_ == related_read_set);
525  ASSERT_ND(write->related_read_->related_write_ == write);
526  ASSERT_ND(write->log_entry_ == log_entry);
527  ASSERT_ND(write->owner_id_address_ == owner_id_address);
528  ASSERT_ND(write_set_size_ > 0);
529  return kErrorCodeOk;
530 }
void invoke_assert_valid(void *log_buffer)
Invokes the assertion logic of each log type.
ErrorCode add_to_write_set(storage::StorageId storage_id, RwLockableXctId *owner_id_address, char *payload_address, log::RecordLogType *log_entry)
Add the given record to the write set of this transaction.
Definition: xct.cpp:444
0 means no-error.
Definition: error_code.hpp:87
#define CHECK_ERROR_CODE(x)
This macro calls x and checks its returned error code.
Definition: error_code.hpp:155
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72

Here is the call graph for this function:

Here is the caller graph for this function:

ErrorCode foedus::xct::Xct::add_to_lock_free_read_set ( storage::StorageId  storage_id,
XctId  observed_owner_id,
RwLockableXctId owner_id_address 
)

Add the given record to the special read-set that is not placed in usual data pages.

Definition at line 532 of file xct.cpp.

References ASSERT_ND, foedus::kErrorCodeOk, foedus::kErrorCodeXctReadSetOverflow, foedus::xct::kSerializable, foedus::xct::LockFreeReadXctAccess::observed_owner_id_, foedus::xct::LockFreeReadXctAccess::owner_id_address_, foedus::xct::LockFreeReadXctAccess::storage_id_, and UNLIKELY.

Referenced by foedus::storage::sequential::SequentialStorageControlBlock::optimistic_read_truncate_epoch().

535  {
536  ASSERT_ND(storage_id != 0);
537  if (isolation_level_ != kSerializable) {
538  return kErrorCodeOk;
539  }
540  if (UNLIKELY(lock_free_read_set_size_ >= max_lock_free_read_set_size_)) {
542  }
543 
544  lock_free_read_set_[lock_free_read_set_size_].storage_id_ = storage_id;
545  lock_free_read_set_[lock_free_read_set_size_].observed_owner_id_ = observed_owner_id;
546  lock_free_read_set_[lock_free_read_set_size_].owner_id_address_ = owner_id_address;
547  ++lock_free_read_set_size_;
548  return kErrorCodeOk;
549 }
RwLockableXctId * owner_id_address_
Pointer to the TID we protect against.
Definition: xct_access.hpp:210
0x0A01 : "XCTION : Too large read-set. Check the config of XctOptions" .
Definition: error_code.hpp:196
XctId observed_owner_id_
XID value we observed.
Definition: xct_access.hpp:204
0 means no-error.
Definition: error_code.hpp:87
storage::StorageId storage_id_
The storage we accessed.
Definition: xct_access.hpp:207
#define UNLIKELY(x)
Hints that x is highly likely false.
Definition: compiler.hpp:104
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
Protects against all anomalies in all situations.
Definition: xct_id.hpp:86

Here is the caller graph for this function:

ErrorCode foedus::xct::Xct::add_to_lock_free_write_set ( storage::StorageId  storage_id,
log::RecordLogType log_entry 
)

Add the given log to the lock-free write set of this transaction.

Definition at line 551 of file xct.cpp.

References ASSERT_ND, foedus::log::invoke_assert_valid(), foedus::kErrorCodeOk, foedus::kErrorCodeXctWriteSetOverflow, foedus::xct::LockFreeWriteXctAccess::log_entry_, foedus::xct::LockFreeWriteXctAccess::storage_id_, and UNLIKELY.

Referenced by foedus::storage::sequential::SequentialStorage::append_record().

553  {
554  ASSERT_ND(storage_id != 0);
555  ASSERT_ND(log_entry);
556  if (UNLIKELY(lock_free_write_set_size_ >= max_lock_free_write_set_size_)) {
558  }
559 
560 #ifndef NDEBUG
561  log::invoke_assert_valid(log_entry);
562 #endif // NDEBUG
563 
564  lock_free_write_set_[lock_free_write_set_size_].storage_id_ = storage_id;
565  lock_free_write_set_[lock_free_write_set_size_].log_entry_ = log_entry;
566  ++lock_free_write_set_size_;
567  return kErrorCodeOk;
568 }
void invoke_assert_valid(void *log_buffer)
Invokes the assertion logic of each log type.
0 means no-error.
Definition: error_code.hpp:87
0x0A02 : "XCTION : Too large write-set. Check the config of XctOptions" .
Definition: error_code.hpp:197
log::RecordLogType * log_entry_
Pointer to the log entry in private log buffer for this write opereation.
Definition: xct_access.hpp:235
#define UNLIKELY(x)
Hints that x is highly likely false.
Definition: compiler.hpp:104
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
storage::StorageId storage_id_
The storage we accessed.
Definition: xct_access.hpp:232

Here is the call graph for this function:

Here is the caller graph for this function:

ErrorCode foedus::xct::Xct::add_to_page_version_set ( const storage::PageVersion version_address,
storage::PageVersionStatus  observed 
)

Add the given page version to the page version set of this transaction.

This is similar to pointer set. The difference is that this remembers the PageVersion value we observed when we accessed the page. This can capture many more concurrency issues in the page because PageVersion contains many flags and counters. However, PageVersionAccess can't be used if the page itself might be swapped.

Both PointerAccess and PageVersionAccess can be considered as "node set" in [TU2013], but for a little bit different purpose.

Definition at line 242 of file xct.cpp.

References foedus::xct::PageVersionAccess::address_, ASSERT_ND, foedus::kErrorCodeOk, foedus::kErrorCodeXctPageVersionSetOverflow, kMaxPointerSets, foedus::xct::kSerializable, foedus::xct::PageVersionAccess::observed_, and UNLIKELY.

Referenced by foedus::storage::masstree::MasstreeStoragePimpl::locate_record(), foedus::storage::hash::HashStoragePimpl::locate_record(), and foedus::storage::masstree::MasstreeStoragePimpl::locate_record_normalized().

244  {
245  ASSERT_ND(version_address);
246  if (isolation_level_ != kSerializable) {
247  return kErrorCodeOk;
248  } else if (UNLIKELY(page_version_set_size_ >= kMaxPointerSets)) {
250  }
251 
252  page_version_set_[page_version_set_size_].address_ = version_address;
253  page_version_set_[page_version_set_size_].observed_ = observed;
254  ++page_version_set_size_;
255  return kErrorCodeOk;
256 }
const storage::PageVersion * address_
Address to the page version.
Definition: xct_access.hpp:76
storage::PageVersionStatus observed_
Value of the page version as of the access.
Definition: xct_access.hpp:79
0x0A06 : "XCTION : Too large page-version set. Consider using snapshot isolation." ...
Definition: error_code.hpp:201
0 means no-error.
Definition: error_code.hpp:87
#define UNLIKELY(x)
Hints that x is highly likely false.
Definition: compiler.hpp:104
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
Protects against all anomalies in all situations.
Definition: xct_id.hpp:86

Here is the caller graph for this function:

ErrorCode foedus::xct::Xct::add_to_pointer_set ( const storage::VolatilePagePointer pointer_address,
storage::VolatilePagePointer  observed 
)

Add the given page pointer to the pointer set of this transaction.

You must call this method in the following cases;

  • When following a volatile pointer that might be later swapped with the RCU protocol.
  • When following a snapshot pointer except it is under a snapshot page.

To clarify, the first case does not apply to storage types that don't swap volatile pointers. So far, only Masstree Storage has such a swapping for root pages. All other storage types thus don't have to take pointer sets for this.

The second case doesn't apply to snapshot pointers once we follow a snapshot pointer in the tree because everything is assured to be stable once we follow a snapshot pointer.

Definition at line 198 of file xct.cpp.

References foedus::xct::PointerAccess::address_, ASSERT_ND, foedus::kErrorCodeOk, foedus::kErrorCodeXctPointerSetOverflow, kMaxPointerSets, foedus::xct::kSerializable, foedus::xct::PointerAccess::observed_, and UNLIKELY.

Referenced by foedus::thread::ThreadPimpl::follow_page_pointer(), foedus::thread::ThreadPimpl::follow_page_pointers_for_read_batch(), and foedus::storage::hash::HashStoragePimpl::locate_bin().

200  {
201  ASSERT_ND(pointer_address);
202  if (isolation_level_ != kSerializable) {
203  return kErrorCodeOk;
204  }
205 
206  // TASK(Hideaki) even though pointer set should be small, we don't want sequential search
207  // everytime. but insertion sort requires shifting. mmm.
208  for (uint32_t i = 0; i < pointer_set_size_; ++i) {
209  if (pointer_set_[i].address_ == pointer_address) {
210  pointer_set_[i].observed_ = observed;
211  return kErrorCodeOk;
212  }
213  }
214 
215  if (UNLIKELY(pointer_set_size_ >= kMaxPointerSets)) {
217  }
218 
219  // no need for fence. the observed pointer itself is the only data to verify
220  pointer_set_[pointer_set_size_].address_ = pointer_address;
221  pointer_set_[pointer_set_size_].observed_ = observed;
222  ++pointer_set_size_;
223  return kErrorCodeOk;
224 }
const storage::VolatilePagePointer * address_
Address of the volatile pointer.
Definition: xct_access.hpp:52
storage::VolatilePagePointer observed_
Value of the volatile pointer as of the access.
Definition: xct_access.hpp:55
0 means no-error.
Definition: error_code.hpp:87
#define UNLIKELY(x)
Hints that x is highly likely false.
Definition: compiler.hpp:104
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
0x0A07 : "XCTION : Too large pointer-set. Consider using snapshot isolation." .
Definition: error_code.hpp:202
Protects against all anomalies in all situations.
Definition: xct_id.hpp:86

Here is the caller graph for this function:

ErrorCode foedus::xct::Xct::add_to_read_and_write_set ( storage::StorageId  storage_id,
XctId  observed_owner_id,
RwLockableXctId owner_id_address,
char *  payload_address,
log::RecordLogType log_entry 
)

Add a pair of read and write set of this transaction.

Definition at line 474 of file xct.cpp.

References add_to_read_set(), add_to_write_set(), ASSERT_ND, CHECK_ERROR_CODE, foedus::log::invoke_assert_valid(), foedus::xct::XctId::is_valid(), and foedus::kErrorCodeOk.

479  {
480  ASSERT_ND(observed_owner_id.is_valid());
481 #ifndef NDEBUG
482  log::invoke_assert_valid(log_entry);
483 #endif // NDEBUG
484  auto* write = write_set_ + write_set_size_;
485  CHECK_ERROR_CODE(add_to_write_set(storage_id, owner_id_address, payload_address, log_entry));
486 
487  auto* read = read_set_ + read_set_size_;
488  ReadXctAccess* dummy;
490  storage_id,
491  observed_owner_id,
492  write->owner_lock_id_,
493  owner_id_address,
494  &dummy));
495  ASSERT_ND(read->owner_id_address_ == owner_id_address);
496  read->related_write_ = write;
497  write->related_read_ = read;
498  ASSERT_ND(read->related_write_->related_read_ == read);
499  ASSERT_ND(write->related_read_->related_write_ == write);
500  ASSERT_ND(write->log_entry_ == log_entry);
501  ASSERT_ND(write->owner_id_address_ == owner_id_address);
502  ASSERT_ND(write_set_size_ > 0);
503  return kErrorCodeOk;
504 }
ErrorCode add_to_read_set(storage::StorageId storage_id, XctId observed_owner_id, RwLockableXctId *owner_id_address, ReadXctAccess **read_set_address)
Add the given record to the read set of this transaction.
Definition: xct.cpp:395
void invoke_assert_valid(void *log_buffer)
Invokes the assertion logic of each log type.
ErrorCode add_to_write_set(storage::StorageId storage_id, RwLockableXctId *owner_id_address, char *payload_address, log::RecordLogType *log_entry)
Add the given record to the write set of this transaction.
Definition: xct.cpp:444
0 means no-error.
Definition: error_code.hpp:87
#define CHECK_ERROR_CODE(x)
This macro calls x and checks its returned error code.
Definition: error_code.hpp:155
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72

Here is the call graph for this function:

ErrorCode foedus::xct::Xct::add_to_read_set ( storage::StorageId  storage_id,
XctId  observed_owner_id,
RwLockableXctId owner_id_address,
ReadXctAccess **  read_set_address 
)

Add the given record to the read set of this transaction.

You must call this method BEFORE reading the data, otherwise it violates the commit protocol.

Definition at line 395 of file xct.cpp.

References foedus::xct::RetrospectiveLockList::get_volatile_page_resolver(), and foedus::xct::xct_id_to_universal_lock_id().

Referenced by add_to_read_and_write_set(), and on_record_read().

399  {
400  const auto& resolver = retrospective_lock_list_.get_volatile_page_resolver();
401  UniversalLockId owner_lock_id = xct_id_to_universal_lock_id(resolver, owner_id_address);
402  return add_to_read_set(
403  storage_id,
404  observed_owner_id,
405  owner_lock_id,
406  owner_id_address,
407  read_set_address);
408 }
ErrorCode add_to_read_set(storage::StorageId storage_id, XctId observed_owner_id, RwLockableXctId *owner_id_address, ReadXctAccess **read_set_address)
Add the given record to the read set of this transaction.
Definition: xct.cpp:395
uintptr_t UniversalLockId
Universally ordered identifier of each lock.
Definition: xct_id.hpp:134
UniversalLockId xct_id_to_universal_lock_id(const memory::GlobalVolatilePageResolver &resolver, RwLockableXctId *lock)
Definition: xct_id.hpp:1226
const memory::GlobalVolatilePageResolver & get_volatile_page_resolver() const

Here is the call graph for this function:

Here is the caller graph for this function:

ErrorCode foedus::xct::Xct::add_to_read_set ( storage::StorageId  storage_id,
XctId  observed_owner_id,
UniversalLockId  owner_lock_id,
RwLockableXctId owner_id_address,
ReadXctAccess **  read_set_address 
)

Use this in case you already have owner_lock_id.

Slightly faster.

Definition at line 410 of file xct.cpp.

References ASSERT_ND, foedus::storage::assert_within_valid_volatile_page(), foedus::xct::RetrospectiveLockList::get_volatile_page_resolver(), foedus::xct::XctId::is_being_written(), foedus::xct::XctId::is_next_layer(), foedus::kErrorCodeOk, foedus::kErrorCodeXctReadSetOverflow, foedus::xct::RecordXctAccess::ordinal_, UNLIKELY, and foedus::xct::xct_id_to_universal_lock_id().

415  {
416 #ifndef NDEBUG
417  const auto& resolver = retrospective_lock_list_.get_volatile_page_resolver();
418  storage::assert_within_valid_volatile_page(resolver, owner_id_address);
419  ASSERT_ND(owner_lock_id == xct_id_to_universal_lock_id(resolver, owner_id_address));
420 #endif // NDEBUG
421 
422  ASSERT_ND(storage_id != 0);
423  ASSERT_ND(owner_id_address);
424  ASSERT_ND(!observed_owner_id.is_being_written());
425  ASSERT_ND(read_set_address);
426  if (UNLIKELY(read_set_size_ >= max_read_set_size_)) {
428  }
429  // if the next-layer bit is ON, the record is not logically a record, so why we are adding
430  // it to read-set? we should have already either aborted or retried in this case.
431  ASSERT_ND(!observed_owner_id.is_next_layer());
432  ReadXctAccess* entry = read_set_ + read_set_size_;
433  *read_set_address = entry;
434  entry->ordinal_ = read_set_size_;
435  entry->storage_id_ = storage_id;
436  entry->set_owner_id_and_lock_id(owner_id_address, owner_lock_id);
437  entry->observed_owner_id_ = observed_owner_id;
438  entry->related_write_ = nullptr;
439  ++read_set_size_;
440  return kErrorCodeOk;
441 }
0x0A01 : "XCTION : Too large read-set. Check the config of XctOptions" .
Definition: error_code.hpp:196
0 means no-error.
Definition: error_code.hpp:87
UniversalLockId xct_id_to_universal_lock_id(const memory::GlobalVolatilePageResolver &resolver, RwLockableXctId *lock)
Definition: xct_id.hpp:1226
void assert_within_valid_volatile_page(const memory::GlobalVolatilePageResolver &resolver, const void *address)
Definition: page.hpp:428
#define UNLIKELY(x)
Hints that x is highly likely false.
Definition: compiler.hpp:104
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
const memory::GlobalVolatilePageResolver & get_volatile_page_resolver() const

Here is the call graph for this function:

ErrorCode foedus::xct::Xct::add_to_write_set ( storage::StorageId  storage_id,
RwLockableXctId owner_id_address,
char *  payload_address,
log::RecordLogType log_entry 
)

Add the given record to the write set of this transaction.

Definition at line 444 of file xct.cpp.

References ASSERT_ND, foedus::storage::assert_within_valid_volatile_page(), CXX11_NULLPTR, foedus::xct::RetrospectiveLockList::get_volatile_page_resolver(), foedus::log::invoke_assert_valid(), foedus::kErrorCodeOk, foedus::kErrorCodeXctWriteSetOverflow, foedus::xct::WriteXctAccess::log_entry_, foedus::xct::RecordXctAccess::ordinal_, foedus::xct::WriteXctAccess::payload_address_, foedus::xct::WriteXctAccess::related_read_, foedus::xct::RecordXctAccess::set_owner_id_resolve_lock_id(), foedus::xct::RecordXctAccess::storage_id_, and UNLIKELY.

Referenced by add_related_write_set(), add_to_read_and_write_set(), add_to_write_set(), foedus::storage::array::ArrayStoragePimpl::increment_record(), foedus::storage::array::ArrayStoragePimpl::increment_record_oneshot(), foedus::storage::array::ArrayStoragePimpl::overwrite_record(), foedus::storage::array::ArrayStoragePimpl::overwrite_record_primitive(), foedus::storage::masstree::MasstreeStoragePimpl::register_record_write_log(), and foedus::storage::hash::HashStoragePimpl::register_record_write_log().

448  {
449  ASSERT_ND(storage_id != 0);
450  ASSERT_ND(owner_id_address);
451  ASSERT_ND(payload_address);
452  ASSERT_ND(log_entry);
453  const auto& resolver = retrospective_lock_list_.get_volatile_page_resolver();
454 #ifndef NDEBUG
455  storage::assert_within_valid_volatile_page(resolver, owner_id_address);
456  log::invoke_assert_valid(log_entry);
457 #endif // NDEBUG
458 
459  if (UNLIKELY(write_set_size_ >= max_write_set_size_)) {
461  }
462  WriteXctAccess* write = write_set_ + write_set_size_;
463  write->ordinal_ = write_set_size_;
464  write->payload_address_ = payload_address;
465  write->log_entry_ = log_entry;
466  write->storage_id_ = storage_id;
467  write->set_owner_id_resolve_lock_id(resolver, owner_id_address);
468  write->related_read_ = CXX11_NULLPTR;
469  ++write_set_size_;
470  return kErrorCodeOk;
471 }
#define CXX11_NULLPTR
Used in public headers in place of "nullptr" of C++11.
Definition: cxx11.hpp:132
void invoke_assert_valid(void *log_buffer)
Invokes the assertion logic of each log type.
0 means no-error.
Definition: error_code.hpp:87
void assert_within_valid_volatile_page(const memory::GlobalVolatilePageResolver &resolver, const void *address)
Definition: page.hpp:428
0x0A02 : "XCTION : Too large write-set. Check the config of XctOptions" .
Definition: error_code.hpp:197
#define UNLIKELY(x)
Hints that x is highly likely false.
Definition: compiler.hpp:104
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
const memory::GlobalVolatilePageResolver & get_volatile_page_resolver() const

Here is the call graph for this function:

Here is the caller graph for this function:

ErrorCode foedus::xct::Xct::add_to_write_set ( storage::StorageId  storage_id,
storage::Record record,
log::RecordLogType log_entry 
)
inline

Add the given record to the write set of this transaction.

Definition at line 354 of file xct.hpp.

References add_to_write_set(), foedus::storage::Record::owner_id_, and foedus::storage::Record::payload_.

357  {
358  return add_to_write_set(storage_id, &record->owner_id_, record->payload_, log_entry);
359  }
ErrorCode add_to_write_set(storage::StorageId storage_id, RwLockableXctId *owner_id_address, char *payload_address, log::RecordLogType *log_entry)
Add the given record to the write set of this transaction.
Definition: xct.cpp:444

Here is the call graph for this function:

bool foedus::xct::Xct::assert_related_read_write ( ) const
inline

This debug method checks whether the related_read_ and related_write_ fileds in read/write sets are consistent.

This method is completely wiped out in release build.

Returns
whether it is consistent. but this method anyway asserts as of finding inconsistency.

Definition at line 538 of file xct.hpp.

References ASSERT_ND, foedus::xct::RecordXctAccess::owner_id_address_, foedus::xct::WriteXctAccess::related_read_, and foedus::xct::ReadXctAccess::related_write_.

Referenced by foedus::xct::XctManagerPimpl::precommit_xct(), foedus::xct::XctManagerPimpl::precommit_xct_lock(), and foedus::xct::XctManagerPimpl::precommit_xct_sort_access().

538  {
539 #ifndef NDEBUG
540  for (uint32_t i = 0; i < write_set_size_; ++i) {
541  WriteXctAccess* write = write_set_ + i;
542  if (write->related_read_) {
543  ASSERT_ND(write->related_read_ >= read_set_);
544  uint32_t index = write->related_read_ - read_set_;
545  ASSERT_ND(index < read_set_size_);
546  ASSERT_ND(write->owner_id_address_ == write->related_read_->owner_id_address_);
547  ASSERT_ND(write == write->related_read_->related_write_);
548  }
549  }
550 
551  for (uint32_t i = 0; i < read_set_size_; ++i) {
552  ReadXctAccess* read = read_set_ + i;
553  if (read->related_write_) {
554  ASSERT_ND(read->related_write_ >= write_set_);
555  uint32_t index = read->related_write_ - write_set_;
556  ASSERT_ND(index < write_set_size_);
557  ASSERT_ND(read->owner_id_address_ == read->related_write_->owner_id_address_);
558  ASSERT_ND(read == read->related_write_->related_read_);
559  }
560  }
561 #endif // NDEBUG
562  return true;
563 }
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72

Here is the caller graph for this function:

void foedus::xct::Xct::deactivate ( )
inline

Closes the transaction.

Precondition
Before calling this method, all locks must be already released.

Definition at line 108 of file xct.hpp.

References ASSERT_ND, and foedus::xct::CurrentLockList::is_empty().

Referenced by foedus::xct::XctManagerPimpl::abort_xct(), and foedus::xct::XctManagerPimpl::precommit_xct().

108  {
109  ASSERT_ND(active_);
110  ASSERT_ND(current_lock_list_.is_empty());
111  active_ = false;
112  *mcs_block_current_ = 0;
113  *mcs_rw_async_mapping_current_ = 0;
114  }
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72

Here is the call graph for this function:

Here is the caller graph for this function:

void foedus::xct::Xct::decrement_mcs_block_current ( )
inline

Definition at line 118 of file xct.hpp.

118 { --(*mcs_block_current_); }
const xct::CurrentLockList* foedus::xct::Xct::get_current_lock_list ( ) const
inline

Definition at line 414 of file xct.hpp.

414 { return &current_lock_list_; }
uint16_t foedus::xct::Xct::get_default_hot_threshold_for_this_xct ( ) const
inline

Definition at line 130 of file xct.hpp.

Referenced by foedus::xct::operator<<().

130  {
131  return default_hot_threshold_for_this_xct_ ; }

Here is the caller graph for this function:

uint16_t foedus::xct::Xct::get_default_rll_threshold_for_this_xct ( ) const
inline

Definition at line 137 of file xct.hpp.

Referenced by foedus::xct::operator<<().

137  {
138  return default_rll_threshold_for_this_xct_ ; }

Here is the caller graph for this function:

uint16_t foedus::xct::Xct::get_hot_threshold_for_this_xct ( ) const
inline

Definition at line 128 of file xct.hpp.

Referenced by foedus::storage::PageHeader::contains_hot_records(), foedus::thread::Thread::is_hot_page(), and foedus::xct::operator<<().

128 { return hot_threshold_for_this_xct_; }

Here is the caller graph for this function:

const XctId& foedus::xct::Xct::get_id ( ) const
inline

Returns the ID of this transaction, but note that it is not issued until commit time!

Definition at line 151 of file xct.hpp.

Referenced by foedus::xct::operator<<(), and foedus::xct::XctManagerPimpl::precommit_xct_apply().

151 { return id_; }

Here is the caller graph for this function:

LockFreeReadXctAccess* foedus::xct::Xct::get_lock_free_read_set ( )
inline

Definition at line 164 of file xct.hpp.

Referenced by foedus::xct::XctManagerPimpl::precommit_xct_verify_readonly(), and foedus::xct::XctManagerPimpl::precommit_xct_verify_readwrite().

164 { return lock_free_read_set_; }

Here is the caller graph for this function:

uint32_t foedus::xct::Xct::get_lock_free_read_set_size ( ) const
inline

Definition at line 158 of file xct.hpp.

Referenced by foedus::xct::operator<<(), foedus::xct::XctManagerPimpl::precommit_xct_verify_readonly(), and foedus::xct::XctManagerPimpl::precommit_xct_verify_readwrite().

158 { return lock_free_read_set_size_; }

Here is the caller graph for this function:

LockFreeWriteXctAccess* foedus::xct::Xct::get_lock_free_write_set ( )
inline

Definition at line 165 of file xct.hpp.

Referenced by foedus::xct::XctManagerPimpl::precommit_xct_apply().

165 { return lock_free_write_set_; }

Here is the caller graph for this function:

uint32_t foedus::xct::Xct::get_lock_free_write_set_size ( ) const
inline

Definition at line 159 of file xct.hpp.

Referenced by foedus::xct::XctManagerPimpl::begin_xct(), foedus::xct::operator<<(), and foedus::xct::XctManagerPimpl::precommit_xct_apply().

159 { return lock_free_write_set_size_; }

Here is the caller graph for this function:

uint32_t foedus::xct::Xct::get_mcs_block_current ( ) const
inline

Definition at line 116 of file xct.hpp.

Referenced by foedus::xct::XctManagerPimpl::begin_xct().

116 { return *mcs_block_current_; }

Here is the caller graph for this function:

const PageVersionAccess* foedus::xct::Xct::get_page_version_set ( ) const
inline

Definition at line 161 of file xct.hpp.

Referenced by foedus::xct::XctManagerPimpl::precommit_xct_verify_page_version_set().

161 { return page_version_set_; }

Here is the caller graph for this function:

uint32_t foedus::xct::Xct::get_page_version_set_size ( ) const
inline

Definition at line 155 of file xct.hpp.

Referenced by foedus::xct::operator<<(), and foedus::xct::XctManagerPimpl::precommit_xct_verify_page_version_set().

155 { return page_version_set_size_; }

Here is the caller graph for this function:

const PointerAccess* foedus::xct::Xct::get_pointer_set ( ) const
inline

Definition at line 160 of file xct.hpp.

Referenced by foedus::xct::XctManagerPimpl::precommit_xct_verify_pointer_set().

160 { return pointer_set_; }

Here is the caller graph for this function:

uint32_t foedus::xct::Xct::get_pointer_set_size ( ) const
inline

Definition at line 154 of file xct.hpp.

Referenced by foedus::xct::operator<<(), and foedus::xct::XctManagerPimpl::precommit_xct_verify_pointer_set().

154 { return pointer_set_size_; }

Here is the caller graph for this function:

ReadXctAccess* foedus::xct::Xct::get_read_set ( )
inline
uint32_t foedus::xct::Xct::get_read_set_size ( ) const
inline
xct::RetrospectiveLockList* foedus::xct::Xct::get_retrospective_lock_list ( )
inline

Definition at line 415 of file xct.hpp.

Referenced by foedus::xct::XctManagerPimpl::abort_xct(), foedus::xct::XctManagerPimpl::begin_xct(), and foedus::xct::XctManagerPimpl::precommit_xct().

415  {
416  return &retrospective_lock_list_;
417  }

Here is the caller graph for this function:

uint16_t foedus::xct::Xct::get_rll_threshold_for_this_xct ( ) const
inline

Definition at line 135 of file xct.hpp.

Referenced by foedus::xct::XctManagerPimpl::abort_xct(), and foedus::xct::operator<<().

135 { return rll_threshold_for_this_xct_; }

Here is the caller graph for this function:

SysxctWorkspace* foedus::xct::Xct::get_sysxct_workspace ( ) const
inline

Definition at line 142 of file xct.hpp.

Referenced by foedus::xct::operator<<(), and foedus::thread::ThreadPimpl::run_nested_sysxct().

142 { return sysxct_workspace_; }

Here is the caller graph for this function:

thread::Thread* foedus::xct::Xct::get_thread_context ( )
inline

Definition at line 152 of file xct.hpp.

152 { return context_; }
thread::ThreadId foedus::xct::Xct::get_thread_id ( ) const
inline

Definition at line 153 of file xct.hpp.

153 { return thread_id_; }
uint32_t foedus::xct::Xct::increment_mcs_block_current ( )
inline

Definition at line 117 of file xct.hpp.

117 { return ++(*mcs_block_current_); }
void foedus::xct::Xct::initialize ( memory::NumaCoreMemory core_memory,
uint32_t *  mcs_block_current,
uint32_t *  mcs_rw_async_mapping_current 
)

Definition at line 77 of file xct.cpp.

References ASSERT_ND, foedus::xct::XctOptions::enable_retrospective_lock_list_, foedus::memory::NumaCoreMemory::get_current_lock_list_capacity(), foedus::memory::NumaCoreMemory::get_current_lock_list_memory(), foedus::memory::EngineMemory::get_global_volatile_page_resolver(), foedus::savepoint::SavepointManager::get_initial_current_epoch(), foedus::memory::NumaCoreMemory::get_local_work_memory(), foedus::memory::NumaCoreMemory::get_local_work_memory_size(), foedus::Engine::get_memory_manager(), foedus::Engine::get_options(), foedus::memory::NumaCoreMemory::get_retrospective_lock_list_capacity(), foedus::memory::NumaCoreMemory::get_retrospective_lock_list_memory(), foedus::Engine::get_savepoint_manager(), foedus::memory::NumaCoreMemory::get_small_thread_local_memory_pieces(), foedus::storage::StorageOptions::hot_threshold_, foedus::xct::XctOptions::hot_threshold_for_retrospective_lock_list_, foedus::xct::RetrospectiveLockList::init(), foedus::xct::CurrentLockList::init(), foedus::xct::SysxctWorkspace::init(), foedus::xct::XctId::is_valid(), foedus::xct::XctOptions::max_lock_free_read_set_size_, foedus::xct::XctOptions::max_lock_free_write_set_size_, foedus::xct::XctOptions::max_read_set_size_, foedus::xct::XctOptions::max_write_set_size_, foedus::xct::XctId::set_epoch(), foedus::xct::XctId::set_ordinal(), foedus::EngineOptions::storage_, and foedus::EngineOptions::xct_.

Referenced by foedus::thread::ThreadPimpl::initialize_once().

80  {
82  id_.set_ordinal(0); // ordinal 0 is possible only as a dummy "latest" XctId
83  ASSERT_ND(id_.is_valid());
84  memory::NumaCoreMemory:: SmallThreadLocalMemoryPieces pieces
85  = core_memory->get_small_thread_local_memory_pieces();
86  const XctOptions& xct_opt = engine_->get_options().xct_;
87 
88  default_rll_for_this_xct_ = xct_opt.enable_retrospective_lock_list_;
89  enable_rll_for_this_xct_ = default_rll_for_this_xct_;
90  default_hot_threshold_for_this_xct_ = engine_->get_options().storage_.hot_threshold_;
91  hot_threshold_for_this_xct_ = default_hot_threshold_for_this_xct_;
92  default_rll_threshold_for_this_xct_ = xct_opt.hot_threshold_for_retrospective_lock_list_;
93  rll_threshold_for_this_xct_ = default_rll_threshold_for_this_xct_;
94 
95  sysxct_workspace_ = reinterpret_cast<SysxctWorkspace*>(pieces.sysxct_workspace_memory_);
96 
97  read_set_ = reinterpret_cast<ReadXctAccess*>(pieces.xct_read_access_memory_);
98  read_set_size_ = 0;
99  max_read_set_size_ = xct_opt.max_read_set_size_;
100  write_set_ = reinterpret_cast<WriteXctAccess*>(pieces.xct_write_access_memory_);
101  write_set_size_ = 0;
102  max_write_set_size_ = xct_opt.max_write_set_size_;
103  lock_free_read_set_ = reinterpret_cast<LockFreeReadXctAccess*>(
104  pieces.xct_lock_free_read_access_memory_);
105  lock_free_read_set_size_ = 0;
106  max_lock_free_read_set_size_ = xct_opt.max_lock_free_read_set_size_;
107  lock_free_write_set_ = reinterpret_cast<LockFreeWriteXctAccess*>(
108  pieces.xct_lock_free_write_access_memory_);
109  lock_free_write_set_size_ = 0;
110  max_lock_free_write_set_size_ = xct_opt.max_lock_free_write_set_size_;
111  pointer_set_ = reinterpret_cast<PointerAccess*>(pieces.xct_pointer_access_memory_);
112  pointer_set_size_ = 0;
113  page_version_set_ = reinterpret_cast<PageVersionAccess*>(pieces.xct_page_version_memory_);
114  page_version_set_size_ = 0;
115  mcs_block_current_ = mcs_block_current;
116  *mcs_block_current_ = 0;
117  mcs_rw_async_mapping_current_ = mcs_rw_async_mapping_current;
118  *mcs_rw_async_mapping_current_ = 0;
119  local_work_memory_ = core_memory->get_local_work_memory();
120  local_work_memory_size_ = core_memory->get_local_work_memory_size();
121  local_work_memory_cur_ = 0;
122 
123  sysxct_workspace_->init(context_);
124  current_lock_list_.init(
125  core_memory->get_current_lock_list_memory(),
126  core_memory->get_current_lock_list_capacity(),
128  retrospective_lock_list_.init(
129  core_memory->get_retrospective_lock_list_memory(),
130  core_memory->get_retrospective_lock_list_capacity(),
132 }
void set_epoch(Epoch epoch) __attribute__((always_inline))
Definition: xct_id.hpp:965
const GlobalVolatilePageResolver & get_global_volatile_page_resolver() const
Returns the page resolver to convert volatile page ID to page pointer.
bool is_valid() const __attribute__((always_inline))
Definition: xct_id.hpp:973
const EngineOptions & get_options() const
Definition: engine.cpp:39
savepoint::SavepointManager * get_savepoint_manager() const
See Savepoint Manager.
Definition: engine.cpp:53
uint64_t hot_threshold_
Page hotness >= this value will be considered hot (hybrid CC only).
storage::StorageOptions storage_
void init(LockEntry *array, uint32_t capacity, const memory::GlobalVolatilePageResolver &resolver)
void set_ordinal(uint32_t ordinal) __attribute__((always_inline))
Definition: xct_id.hpp:980
void init(thread::Thread *enclosing_thread)
void init(LockEntry *array, uint32_t capacity, const memory::GlobalVolatilePageResolver &resolver)
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
memory::EngineMemory * get_memory_manager() const
See Memory Manager.
Definition: engine.cpp:50

Here is the call graph for this function:

Here is the caller graph for this function:

bool foedus::xct::Xct::is_default_rll_for_this_xct ( ) const
inline

Definition at line 125 of file xct.hpp.

Referenced by foedus::xct::operator<<().

125 { return default_rll_for_this_xct_ ; }

Here is the caller graph for this function:

bool foedus::xct::Xct::is_enable_rll_for_this_xct ( ) const
inline

Definition at line 123 of file xct.hpp.

Referenced by foedus::xct::XctManagerPimpl::abort_xct(), and foedus::xct::operator<<().

123 { return enable_rll_for_this_xct_; }

Here is the caller graph for this function:

bool foedus::xct::Xct::is_read_only ( ) const
inline

Returns if this transaction makes no writes.

Definition at line 145 of file xct.hpp.

Referenced by foedus::xct::XctManagerPimpl::precommit_xct().

145  {
146  return write_set_size_ == 0 && lock_free_write_set_size_ == 0;
147  }

Here is the caller graph for this function:

void foedus::xct::Xct::issue_next_id ( XctId  max_xct_id,
Epoch epoch 
)

Called while a successful commit of xct to issue a new xct id.

Parameters
[in]max_xct_idlargest xct_id this transaction depends on.
[in,out]epoch(in) The minimal epoch this transaction has to be in. (out) the epoch this transaction ended up with, which is epoch+1 only when it found ordinal is full for the current epoch.

This method issues a XctId that satisfies the following properties (see [TU13]). Clarification: "larger" hereby means either a) the epoch is larger or b) the epoch is same and ordinal is larger.

  • Larger than the most recent XctId issued for read-write transaction on this thread.
  • Larger than every XctId of any record read or written by this transaction.
  • In the returned(out) epoch (which is same or larger than the given(in) epoch).

This method also advancec epoch when ordinal is full for the current epoch. This method never fails.

Definition at line 134 of file xct.cpp.

References foedus::xct::XctManager::advance_current_global_epoch(), ASSERT_ND, foedus::Epoch::before(), foedus::xct::XctManager::get_current_global_epoch(), foedus::xct::XctManager::get_current_global_epoch_weak(), foedus::xct::XctId::get_epoch(), foedus::xct::XctId::get_ordinal(), foedus::Engine::get_xct_manager(), foedus::xct::XctId::is_valid(), foedus::xct::kMaxXctOrdinal, remember_previous_xct_id(), foedus::xct::XctId::set_epoch(), foedus::xct::XctId::set_ordinal(), foedus::xct::XctId::store_max(), and UNLIKELY.

Referenced by foedus::xct::XctManagerPimpl::precommit_xct_apply().

134  {
135  ASSERT_ND(id_.is_valid());
136 
137  while (true) {
138  // invariant 1: Larger than latest XctId of this thread.
139  XctId new_id = id_;
140  // invariant 2: Larger than every XctId of any record read or written by this transaction.
141  new_id.store_max(max_xct_id);
142  // invariant 3: in the epoch
143  if (new_id.get_epoch().before(*epoch)) {
144  new_id.set_epoch(*epoch);
145  new_id.set_ordinal(0);
146  }
147  ASSERT_ND(new_id.get_epoch() == *epoch);
148 
149  // Now, is it possible to get an ordinal one larger than this one?
150  if (UNLIKELY(new_id.get_ordinal() >= kMaxXctOrdinal)) {
151  // oh, that's rare.
152  LOG(WARNING) << "Reached the maximum ordinal in this epoch. Advancing current epoch"
153  << " just for this reason. It's rare, but not an error.";
155  ASSERT_ND(epoch->before(engine_->get_xct_manager()->get_current_global_epoch()));
156  // we have already issued fence by now, so we can use nonatomic version.
157  *epoch = engine_->get_xct_manager()->get_current_global_epoch_weak();
158  continue; // try again with this epoch.
159  }
160 
161  ASSERT_ND(new_id.get_ordinal() < kMaxXctOrdinal);
162  new_id.set_ordinal(new_id.get_ordinal() + 1U);
163  remember_previous_xct_id(new_id);
164  break;
165  }
166 }
Epoch get_current_global_epoch_weak() const
Epoch get_current_global_epoch() const
Returns the current global epoch, the epoch a newly started transaction will be in.
bool is_valid() const __attribute__((always_inline))
Definition: xct_id.hpp:973
void remember_previous_xct_id(XctId new_id)
Definition: xct.hpp:386
void store_max(const XctId &other) __attribute__((always_inline))
Kind of std::max(this, other).
Definition: xct_id.hpp:1059
const uint64_t kMaxXctOrdinal
Maximum value of in-epoch ordinal.
Definition: xct_id.hpp:898
xct::XctManager * get_xct_manager() const
See Transaction Manager.
Definition: engine.cpp:61
void advance_current_global_epoch()
Requests to advance the current global epoch as soon as possible and blocks until it actually does...
#define UNLIKELY(x)
Hints that x is highly likely false.
Definition: compiler.hpp:104
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72

Here is the call graph for this function:

Here is the caller graph for this function:

ErrorCode foedus::xct::Xct::on_record_read ( bool  intended_for_write,
RwLockableXctId tid_address,
XctId observed_xid,
ReadXctAccess **  read_set_address,
bool  no_readset_if_moved = false,
bool  no_readset_if_next_layer = false 
)

The general logic invoked for every record read.

Parameters
[in]intended_for_writeHints whether the record will be written after this read
[in,out]tid_addressThe record's TID address
[out]observed_xidReturns the observed XID. See below for more details.
[out]read_set_addressIf this method took a read-set, points to the read-set record. nullptr if it didn't.
[in]no_readset_if_movedWhen this is true, and if we observe an XID whose is_moved() is on, we do not add it to readset. See the comment below for more details.
[in]no_readset_if_next_layerWhen this is true, and if we observe an XID whose is_next_layer() is on, we do not add it to readset. See the comment below for more details.
Returns
The only possible error is read-set full.
Precondition
tid_address != nullptr && observed_xid != nullptr
tid_address must be pointing to somewhere in an aligned data page. We reinterpret_cast the address to acquire the enclosing page and its header.
Postcondition
returns error code, or !observed_xid->is_being_written().

You must call this method BEFORE reading the data, otherwise it violates the commit protocol. This method does a few things listed below:

Observes XID
Observes XID in the TID and spins until we at least observe an XID that is !observed_xid->is_being_written(). However, remember that concurrent threads might write the data and XID after this method leaves. If you want to make sure your "read" is strictly as of one time point, you must call this method in a loop. In terms of serializability, you don't need it because commit protocol will catch it.
Takes lock(s) recommended by RLL or temperature stat.
This happens only when MOCC is on, and the page is a volatile page. This method might take a PCC-like lock on this record, and also other locks to keep the transaction in canonical mode.
Add to read set
This happens only when the transaction has higher isolation level (serializable), and the page is a volatile page. To protect the read, we add the observed XID and the address to read set of this transaction.
no_readset_if_moved/next_layer
After invoking on_record_read(), we might find the observed TID tells that the record is now permanently out of our interest (e.g., moved/next-layer). In that case, the caller doesn't want to have the entry in read-set. These flags tell this method to not add such entries to read-set. Note that such a protocol is safe because moved/next-layer flags in the storage type is immutable once set, eg masstree storage only changes moved-flag off->on, not the other way around. deleted flag is mutable (can off->on->off), so we can't skip such readset. Use it appropriately according to the protcol in the storage type. If you are unsure, don't give "true" to these parameters. Having unnecessary read-sets is just a performance issue, not correctness.

Definition at line 258 of file xct.cpp.

References add_to_read_set(), ASSERT_ND, foedus::storage::assert_within_valid_volatile_page(), CHECK_ERROR_CODE, foedus::storage::construct_volatile_page_pointer(), foedus::thread::Thread::get_global_volatile_page_resolver(), foedus::storage::Page::get_header(), foedus::storage::VolatilePagePointer::get_numa_node(), foedus::storage::VolatilePagePointer::get_offset(), foedus::xct::XctId::is_being_written(), foedus::xct::XctId::is_moved(), foedus::xct::XctId::is_next_layer(), foedus::xct::kDirtyRead, foedus::kErrorCodeOk, foedus::xct::kSerializable, foedus::xct::kSnapshot, foedus::assorted::memory_fence_acquire(), on_record_read_take_locks_if_needed(), foedus::xct::XctId::spin_while_being_written(), foedus::storage::PageHeader::storage_id_, foedus::storage::to_page(), foedus::xct::to_universal_lock_id(), and foedus::xct::RwLockableXctId::xct_id_.

Referenced by foedus::storage::array::ArrayStoragePimpl::get_record(), foedus::storage::array::ArrayStoragePimpl::get_record_for_write(), foedus::storage::array::ArrayStoragePimpl::get_record_for_write_batch(), foedus::storage::array::ArrayStoragePimpl::get_record_payload(), foedus::storage::array::ArrayStoragePimpl::get_record_payload_batch(), foedus::storage::array::ArrayStoragePimpl::get_record_primitive(), foedus::storage::array::ArrayStoragePimpl::get_record_primitive_batch(), foedus::storage::array::ArrayStoragePimpl::increment_record(), on_record_read(), foedus::storage::masstree::RecordLocation::populate_logical(), and foedus::storage::hash::RecordLocation::populate_logical().

264  {
265  ASSERT_ND(tid_address);
266  ASSERT_ND(observed_xid);
267  ASSERT_ND(read_set_address);
268  *read_set_address = nullptr;
269 
270  const storage::Page* page = storage::to_page(reinterpret_cast<const void*>(tid_address));
271  const auto& page_header = page->get_header();
272  if (page_header.snapshot_) {
273  // Snapshot page is immutable.
274  // No read-set, lock, or check for being_written flag needed.
275  *observed_xid = tid_address->xct_id_;
276  ASSERT_ND(!observed_xid->is_being_written());
277  return kErrorCodeOk;
278  } else if (isolation_level_ != kSerializable) {
279  // No read-set or read-locks needed in non-serializable transactions.
280  // Also no point to conservatively take write-locks recommended by RLL
281  // because we don't take any read locks in these modes, so the
282  // original SILO's write-lock protocol is enough and abort-free.
283  ASSERT_ND(isolation_level_ == kDirtyRead || isolation_level_ == kSnapshot);
284  *observed_xid = tid_address->xct_id_.spin_while_being_written();
285  ASSERT_ND(!observed_xid->is_being_written());
286  return kErrorCodeOk;
287  }
288 
289  storage::VolatilePagePointer vpp(storage::construct_volatile_page_pointer(page_header.page_id_));
290 #ifndef NDEBUG
291  const auto& resolver = context_->get_global_volatile_page_resolver();
292  storage::assert_within_valid_volatile_page(resolver, tid_address);
293 
294  ASSERT_ND(vpp.get_numa_node() < resolver.numa_node_count_);
295  ASSERT_ND(vpp.get_offset() >= resolver.begin_);
296  ASSERT_ND(vpp.get_offset() < resolver.end_);
297 #endif // NDEBUG
298 
299  // This is a serializable transaction, and we are reading a record from a volatile page.
300  // We might take a pessimisitic lock for the record, which is our MOCC protocol.
301  // However, we need to do this _before_ observing XctId. Otherwise there is a
302  // chance of aborts even with the lock.
303  const UniversalLockId lock_id = to_universal_lock_id(
304  vpp.get_numa_node(),
305  vpp.get_offset(),
306  reinterpret_cast<uintptr_t>(tid_address));
307  on_record_read_take_locks_if_needed(intended_for_write, page, lock_id, tid_address);
308 
309  *observed_xid = tid_address->xct_id_.spin_while_being_written();
310  ASSERT_ND(!observed_xid->is_being_written());
311 
312  // Now that we observe XID in its own (non-inlined) function, probably not needed...
313  assorted::memory_fence_acquire(); // following reads must happen *after* observing xid
314 
315  // check non-reversible flags and skip read-set
316  if (observed_xid->is_moved() && no_readset_if_moved) {
317  return kErrorCodeOk;
318  } else if (observed_xid->is_next_layer() && no_readset_if_next_layer) {
319  return kErrorCodeOk;
320  }
321 
322  const storage::StorageId storage_id = page->get_header().storage_id_;
323  ASSERT_ND(storage_id != 0);
325  storage_id,
326  *observed_xid,
327  lock_id,
328  tid_address,
329  read_set_address));
330 
331  return kErrorCodeOk;
332 }
const memory::GlobalVolatilePageResolver & get_global_volatile_page_resolver() const
Returns the page resolver to convert page ID to page pointer.
Definition: thread.cpp:125
ErrorCode add_to_read_set(storage::StorageId storage_id, XctId observed_owner_id, RwLockableXctId *owner_id_address, ReadXctAccess **read_set_address)
Add the given record to the read set of this transaction.
Definition: xct.cpp:395
Page * to_page(const void *address)
super-dirty way to obtain Page the address belongs to.
Definition: page.hpp:395
uint32_t StorageId
Unique ID for storage.
Definition: storage_id.hpp:55
Snapshot isolation (SI), meaning the transaction reads a consistent and complete image of the databas...
Definition: xct_id.hpp:78
uintptr_t UniversalLockId
Universally ordered identifier of each lock.
Definition: xct_id.hpp:134
UniversalLockId to_universal_lock_id(storage::VolatilePagePointer page_id, uintptr_t addr)
Definition: sysxct_impl.hpp:63
0 means no-error.
Definition: error_code.hpp:87
void assert_within_valid_volatile_page(const memory::GlobalVolatilePageResolver &resolver, const void *address)
Definition: page.hpp:428
void on_record_read_take_locks_if_needed(bool intended_for_write, const storage::Page *page_address, UniversalLockId lock_id, RwLockableXctId *tid_address)
subroutine of on_record_read() to take lock(s).
Definition: xct.cpp:334
#define CHECK_ERROR_CODE(x)
This macro calls x and checks its returned error code.
Definition: error_code.hpp:155
VolatilePagePointer construct_volatile_page_pointer(uint64_t word)
Definition: storage_id.hpp:230
void memory_fence_acquire()
Equivalent to std::atomic_thread_fence(std::memory_order_acquire).
No guarantee at all for reads, for the sake of best performance and scalability.
Definition: xct_id.hpp:65
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
Protects against all anomalies in all situations.
Definition: xct_id.hpp:86

Here is the call graph for this function:

Here is the caller graph for this function:

ErrorCode foedus::xct::Xct::on_record_read ( bool  intended_for_write,
RwLockableXctId tid_address,
bool  no_readset_if_moved = false,
bool  no_readset_if_next_layer = false 
)
inline

Shortcut for a case when you don't need observed_xid/read_set_address back.

Definition at line 286 of file xct.hpp.

References on_record_read().

290  {
291  XctId dummy_xctid;
292  ReadXctAccess* dummy_read_set;
293  return on_record_read(
294  intended_for_write,
295  tid_address,
296  &dummy_xctid,
297  &dummy_read_set,
298  no_readset_if_moved ,
299  no_readset_if_next_layer);
300  }
ErrorCode on_record_read(bool intended_for_write, RwLockableXctId *tid_address, XctId *observed_xid, ReadXctAccess **read_set_address, bool no_readset_if_moved=false, bool no_readset_if_next_layer=false)
The general logic invoked for every record read.
Definition: xct.cpp:258

Here is the call graph for this function:

void foedus::xct::Xct::on_record_read_take_locks_if_needed ( bool  intended_for_write,
const storage::Page page_address,
UniversalLockId  lock_id,
RwLockableXctId tid_address 
)

subroutine of on_record_read() to take lock(s).

Definition at line 334 of file xct.cpp.

References ASSERT_ND, foedus::storage::assert_within_valid_volatile_page(), foedus::xct::RetrospectiveLockList::binary_search(), foedus::xct::RetrospectiveLockList::clear_entries(), foedus::thread::Thread::cll_giveup_all_locks_after(), foedus::thread::Thread::cll_try_or_acquire_multiple_locks(), foedus::thread::Thread::cll_try_or_acquire_single_lock(), foedus::xct::RetrospectiveLockList::get_array(), foedus::xct::CurrentLockList::get_entry(), foedus::thread::Thread::get_global_volatile_page_resolver(), foedus::xct::CurrentLockList::get_or_add_entry(), foedus::xct::RetrospectiveLockList::is_empty(), foedus::xct::LockEntry::is_enough(), foedus::thread::Thread::is_hot_page(), foedus::kErrorCodeOk, foedus::kErrorCodeXctLockAbort, foedus::xct::kLockListPositionInvalid, foedus::xct::kNullUniversalLockId, foedus::xct::kReadLock, foedus::xct::kWriteLock, foedus::xct::LockEntry::universal_lock_id_, and foedus::xct::xct_id_to_universal_lock_id().

Referenced by on_record_read().

338  {
339 #ifndef NDEBUG
340  const auto& resolver = context_->get_global_volatile_page_resolver();
341  storage::assert_within_valid_volatile_page(resolver, tid_address);
342  ASSERT_ND(lock_id == xct_id_to_universal_lock_id(resolver, tid_address));
343 #endif // NDEBUG
344 
346  bool lets_take_lock = false;
347  if (!retrospective_lock_list_.is_empty()) {
348  // RLL is set, which means the previous run aborted for race.
349  // binary-search for each read-set is not cheap, but in this case better than aborts.
350  // So, let's see if we should take the lock.
351  rll_pos = retrospective_lock_list_.binary_search(lock_id);
352  if (rll_pos != kLockListPositionInvalid) {
353  ASSERT_ND(retrospective_lock_list_.get_array()[rll_pos].universal_lock_id_ == lock_id);
354  DVLOG(1) << "RLL recommends to take lock on this record!";
355  lets_take_lock = true;
356  }
357  }
358 
359  if (!lets_take_lock && context_->is_hot_page(page_address)) {
360  lets_take_lock = true;
361  }
362 
363  if (lets_take_lock) {
364  LockMode mode = intended_for_write ? kWriteLock : kReadLock;
365  LockListPosition cll_pos = current_lock_list_.get_or_add_entry(lock_id, tid_address, mode);
366  LockEntry* cll_entry = current_lock_list_.get_entry(cll_pos);
367  if (cll_entry->is_enough()) {
368  return; // already had the lock
369  }
370 
371  ErrorCode lock_ret;
372  if (rll_pos == kLockListPositionInvalid) {
373  // Then, this is a single read-lock to take.
374  lock_ret = context_->cll_try_or_acquire_single_lock(cll_pos);
375  // TODO(Hideaki) The above locks unconditionally in canonnical mode. Even in non-canonical,
376  // when it returns kErrorCodeXctLockAbort AND we haven't taken any write-lock yet,
377  // we might still want a retry here.. but it has pros/cons. Revisit later.
378  } else {
379  // Then we should take all locks before this too.
380  lock_ret = context_->cll_try_or_acquire_multiple_locks(cll_pos);
381  }
382 
383  if (lock_ret != kErrorCodeOk) {
384  ASSERT_ND(lock_ret == kErrorCodeXctLockAbort);
385  DVLOG(0) << "Failed to take some of the lock that might be beneficial later"
386  << ". We still go on because the locks here are not mandatory.";
387  // At this point, no point to be advised by RLL any longer.
388  // Let's clear it, and let's give-up all incomplete locks in CLL.
390  retrospective_lock_list_.clear_entries();
391  }
392  }
393 }
const memory::GlobalVolatilePageResolver & get_global_volatile_page_resolver() const
Returns the page resolver to convert page ID to page pointer.
Definition: thread.cpp:125
taken_mode_: we took a read-lock, not write-lock yet.
Definition: xct_id.hpp:105
UniversalLockId universal_lock_id_
Used to order locks in canonical order.
LockListPosition binary_search(UniversalLockId lock) const
Analogous to std::binary_search() for the given lock.
ErrorCode cll_try_or_acquire_single_lock(xct::LockListPosition pos)
Methods related to Current Lock List (CLL) These are the only interface in Thread to lock records...
const LockListPosition kLockListPositionInvalid
Definition: xct_id.hpp:149
0x0AA1 : "XCTION : Lock acquire failed." .
Definition: error_code.hpp:206
0 means no-error.
Definition: error_code.hpp:87
UniversalLockId xct_id_to_universal_lock_id(const memory::GlobalVolatilePageResolver &resolver, RwLockableXctId *lock)
Definition: xct_id.hpp:1226
uint32_t LockListPosition
Index in a lock-list, either RLL or CLL.
Definition: xct_id.hpp:148
void assert_within_valid_volatile_page(const memory::GlobalVolatilePageResolver &resolver, const void *address)
Definition: page.hpp:428
taken_mode_: we took a write-lock.
Definition: xct_id.hpp:110
LockEntry * get_entry(LockListPosition pos)
void cll_giveup_all_locks_after(xct::UniversalLockId address)
This gives-up locks in CLL that are not yet taken.
bool is_hot_page(const storage::Page *page) const
Definition: thread.cpp:142
LockMode
Represents a mode of lock.
Definition: xct_id.hpp:95
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
ErrorCode
Enum of error codes defined in error_code.xmacro.
Definition: error_code.hpp:85
const UniversalLockId kNullUniversalLockId
This never points to a valid lock, and also evaluates less than any vaild alocks. ...
Definition: xct_id.hpp:137
ErrorCode cll_try_or_acquire_multiple_locks(xct::LockListPosition upto_pos)
Acquire multiple locks up to the given position in canonical order.
LockListPosition get_or_add_entry(UniversalLockId lock_id, RwLockableXctId *lock, LockMode preferred_mode)
Adds an entry to this list, re-sorting part of the list if necessary to keep the sortedness.

Here is the call graph for this function:

Here is the caller graph for this function:

Xct& foedus::xct::Xct::operator= ( const Xct other)
delete
void foedus::xct::Xct::overwrite_to_pointer_set ( const storage::VolatilePagePointer pointer_address,
storage::VolatilePagePointer  observed 
)

The transaction that has updated the volatile pointer should not abort itself.

So, it calls this method to apply the version it installed.

Definition at line 226 of file xct.cpp.

References ASSERT_ND, foedus::xct::kSerializable, and foedus::xct::PointerAccess::observed_.

228  {
229  ASSERT_ND(pointer_address);
230  if (isolation_level_ != kSerializable) {
231  return;
232  }
233 
234  for (uint32_t i = 0; i < pointer_set_size_; ++i) {
235  if (pointer_set_[i].address_ == pointer_address) {
236  pointer_set_[i].observed_ = observed;
237  return;
238  }
239  }
240 }
storage::VolatilePagePointer observed_
Value of the volatile pointer as of the access.
Definition: xct_access.hpp:55
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
Protects against all anomalies in all situations.
Definition: xct_id.hpp:86
void foedus::xct::Xct::remember_previous_xct_id ( XctId  new_id)
inline

Definition at line 386 of file xct.hpp.

References ASSERT_ND, and foedus::xct::XctId::before().

Referenced by issue_next_id().

386  {
387  ASSERT_ND(id_.before(new_id));
388  id_ = new_id;
389  ASSERT_ND(id_.get_ordinal() > 0);
390  ASSERT_ND(id_.is_valid());
391  }
bool before(const XctId &other) const __attribute__((always_inline))
Returns if this XctId is before other in serialization order, meaning this is either an invalid (unus...
Definition: xct_id.hpp:1074
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72

Here is the call graph for this function:

Here is the caller graph for this function:

void foedus::xct::Xct::set_default_hot_threshold_for_this_xct ( uint16_t  value)
inline

Definition at line 132 of file xct.hpp.

Referenced by foedus::thread::ThreadPimpl::handle_tasks().

132  {
133  default_hot_threshold_for_this_xct_ = value; }

Here is the caller graph for this function:

void foedus::xct::Xct::set_default_rll_for_this_xct ( bool  value)
inline

Definition at line 126 of file xct.hpp.

Referenced by foedus::thread::ThreadPimpl::handle_tasks().

126 { default_rll_for_this_xct_ = value; }

Here is the caller graph for this function:

void foedus::xct::Xct::set_default_rll_threshold_for_this_xct ( uint16_t  value)
inline

Definition at line 139 of file xct.hpp.

Referenced by foedus::thread::ThreadPimpl::handle_tasks().

139  {
140  default_rll_threshold_for_this_xct_ = value; }

Here is the caller graph for this function:

void foedus::xct::Xct::set_enable_rll_for_this_xct ( bool  value)
inline

Definition at line 124 of file xct.hpp.

124 { enable_rll_for_this_xct_ = value; }
void foedus::xct::Xct::set_hot_threshold_for_this_xct ( uint16_t  value)
inline

Definition at line 129 of file xct.hpp.

129 { hot_threshold_for_this_xct_ = value; }
void foedus::xct::Xct::set_rll_threshold_for_this_xct ( uint16_t  value)
inline

Definition at line 136 of file xct.hpp.

136 { rll_threshold_for_this_xct_ = value; }

Friends And Related Function Documentation

std::ostream& operator<< ( std::ostream &  o,
const Xct v 
)
friend

Definition at line 168 of file xct.cpp.

168  {
169  o << "<Xct>"
170  << "<active_>" << v.is_active() << "</active_>";
171  o << "<enable_rll_for_this_xct_>" << v.is_enable_rll_for_this_xct()
172  << "</enable_rll_for_this_xct_>";
173  o << "<default_rll_for_this_xct_>" << v.is_default_rll_for_this_xct()
174  << "</default_rll_for_this_xct_>";
175  o << "<hot_threshold>" << v.get_hot_threshold_for_this_xct() << "</hot_threshold>";
176  o << "<default_hot_threshold>" << v.get_default_hot_threshold_for_this_xct()
177  << "</default_hot_threshold>";
178  o << "<rll_threshold>" << v.get_rll_threshold_for_this_xct() << "</rll_threshold>";
179  o << "<default_rll_threshold>" << v.get_default_rll_threshold_for_this_xct()
180  << "</default_rll_threshold>";
181  if (v.is_active()) {
182  o << "<id_>" << v.get_id() << "</id_>"
183  << "<read_set_size>" << v.get_read_set_size() << "</read_set_size>"
184  << "<write_set_size>" << v.get_write_set_size() << "</write_set_size>"
185  << "<pointer_set_size>" << v.get_pointer_set_size() << "</pointer_set_size>"
186  << "<page_version_set_size>" << v.get_page_version_set_size() << "</page_version_set_size>"
187  << "<lock_free_read_set_size>" << v.get_lock_free_read_set_size()
188  << "</lock_free_read_set_size>"
189  << "<lock_free_write_set_size>" << v.get_lock_free_write_set_size()
190  << "</lock_free_write_set_size>";
191  const SysxctWorkspace* sysxct_workspace = v.get_sysxct_workspace();
192  o << *sysxct_workspace;
193  }
194  o << "</Xct>";
195  return o;
196 }

The documentation for this class was generated from the following files: