20 #include <glog/logging.h>
90 resolver.resolve_offset_newpage(new_pointer));
102 mini_page.pointers_[0].snapshot_pointer_ = 0;
104 mini_page.pointers_[1].snapshot_pointer_ = 0;
126 if (cb->first_root_locked_) {
127 DVLOG(0) <<
"Interesting. other thread seems growing the first-layer. let him do that";
133 DVLOG(0) <<
"Interesting. other thread is growing the first-layer. let him do that";
150 DVLOG(0) <<
"Interesting. concurrent thread has already grown it.";
157 DVLOG(0) <<
"Adopting Empty-range child for first layer root. storage_id=" <<
storage_id_;
160 LOG(INFO) <<
"Growing first layer root. storage_id=" <<
storage_id_;
179 if (record->is_moved()) {
180 DVLOG(0) <<
"Interesting. concurrent thread has split or is splitting the parent page.";
197 DVLOG(0) <<
"Interesting. concurrent thread has already grown it.";
205 DVLOG(1) <<
"Easier. Empty-range foster child for non-first layer root."
const memory::GlobalVolatilePageResolver & get_global_volatile_page_resolver() const
Returns the page resolver to convert page ID to page pointer.
const KeySlice kInfimumSlice
Represents a pointer to another page (usually a child page).
memory::NumaCoreMemory * get_thread_memory() const
Returns the private memory repository of this thread.
bool is_empty_range() const __attribute__((always_inline))
An empty-range page, either intermediate or border, never has any entries.
virtual ErrorCode run(xct::SysxctWorkspace *sysxct_workspace) override
Execute the system transaction.
bool is_locked() const __attribute__((always_inline))
thread::Thread *const context_
Thread context.
Root package of FOEDUS (Fast Optimistic Engine for Data Unification Services).
bool is_moved() const __attribute__((always_inline))
ErrorCode sysxct_record_lock(xct::SysxctWorkspace *sysxct_workspace, storage::VolatilePagePointer page_id, xct::RwLockableXctId *lock)
Takes a lock for a sysxct running under this thread.
bool is_border() const __attribute__((always_inline))
Represents one thread running on one NUMA core.
void collect_retired_volatile_page(storage::VolatilePagePointer ptr)
Keeps the specified volatile page as retired as of the current epoch.
Represents a pointer to a volatile page with modification count for preventing ABA.
MiniPage & get_minipage(uint8_t index) __attribute__((always_inline))
DualPagePointer * get_next_layer(SlotIndex index) __attribute__((always_inline))
A simple spinlock using a boolean field.
bool is_layer_root() const __attribute__((always_inline))
Common base of MasstreeIntermediatePage and MasstreeBorderPage.
storage::VolatilePagePointer grab_free_volatile_page_pointer()
Wrapper for grab_free_volatile_page().
void grow_case_a_common(thread::Thread *context, DualPagePointer *pointer, MasstreePage *cur_root)
Engine * get_engine() const
VolatilePagePointer get_foster_minor() const __attribute__((always_inline))
bool is_retired() const __attribute__((always_inline))
VolatilePagePointer get_foster_major() const __attribute__((always_inline))
Repository of memories dynamically acquired within one CPU core (thread).
VolatilePagePointer volatile_pointer_
bool try_lock()
try-version of the lock.
MasstreeBorderPage *const parent_
The border page of the parent layer.
ErrorCode sysxct_page_lock(xct::SysxctWorkspace *sysxct_workspace, storage::Page *page)
Takes a page lock in the same page for a sysxct running under this thread.
ErrorCode grow_case_b_common(thread::Thread *context, DualPagePointer *pointer, MasstreePage *cur_root)
uint8_t get_btree_level() const __attribute__((always_inline))
used only in masstree.
Just a marker to denote that the memory region represents a data page.
uint8_t get_layer() const __attribute__((always_inline))
Layer-0 stores the first 8 byte slice, Layer-1 next 8 byte...
void set_retired() __attribute__((always_inline))
KeySlice get_high_fence() const __attribute__((always_inline))
0x0301 : "MEMORY : Not enough free volatile pages. Check the config of MemoryOptions" ...
Represents a Masstree storage.
KeySlice get_low_fence() const __attribute__((always_inline))
const uint16_t pointer_index_
Index of the pointer in parent.
KeySlice get_foster_fence() const __attribute__((always_inline))
#define CHECK_ERROR_CODE(x)
This macro calls x and checks its returned error code.
const PageVersion * get_version_address() const __attribute__((always_inline))
void initialize_volatile_page(StorageId storage_id, VolatilePagePointer page_id, uint8_t layer, uint8_t level, KeySlice low_fence, KeySlice high_fence)
VolatilePagePointer get_volatile_page_id() const
PageVersionStatus status_
virtual ErrorCode run(xct::SysxctWorkspace *sysxct_workspace) override
Execute the system transaction.
const KeySlice kSupremumSlice
xct::RwLockableXctId * get_owner_id(SlotIndex index) __attribute__((always_inline))
Represents one intermediate page in Masstree Storage.
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
bool is_locked_by_me() const
void memory_fence_release()
Equivalent to std::atomic_thread_fence(std::memory_order_release).
thread::Thread *const context_
Thread context.
CONTROL_BLOCK * get_control_block() const
bool does_point_to_layer(SlotIndex index) const __attribute__((always_inline))
StorageId storage_id_
ID of the masstree storage to grow.
ErrorCode
Enum of error codes defined in error_code.xmacro.
Per-thread reused work memory for system transactions.
bool is_keylocked() const __attribute__((always_inline))