20 #include <glog/logging.h>
64 DVLOG(0) <<
"Interesting. this page has been split";
90 DVLOG(2) <<
"Need to expand the record.";
99 }
else if (record->is_next_layer()) {
100 DVLOG(0) <<
"Interesting. the record now points to next layer";
110 DVLOG(0) <<
"Ouch. need to split for allocating a space for record expansion";
125 if (record->is_next_layer()) {
126 DVLOG(0) <<
"Interesting. the record now points to next layer";
132 DVLOG(1) <<
"We need to expand the record to make it a next-layer."
133 " If this happens too often and is the bottleneck, "
134 " you should have used physical_payload_hint when you initially inserted the record.";
157 DVLOG(0) <<
"Ouch. need to split for allocating a space for next-layer";
162 DVLOG(1) <<
"Aggressively creating a next-layer.";
165 if (root ==
nullptr) {
184 initial_id.set_next_layer();
200 initial_id.set_deleted();
219 DVLOG(1) <<
"Ouch. need to split for allocating a space for new record";
void reserve_record_space(SlotIndex index, xct::XctId initial_owner_id, KeySlice slice, const void *suffix, KeyLength remainder_length, PayloadLength payload_count)
Installs a new physical record that doesn't exist logically (delete bit on).
MasstreeBorderPage *const target_
The page to install a new physical record.
const KeySlice kInfimumSlice
SlotIndex get_key_count() const __attribute__((always_inline))
physical key count (those keys might be deleted) in this page.
MasstreeBorderPage * allocate_new_border_page(thread::Thread *context)
Represents a pointer to another page (usually a child page).
const KeySlice slice_
The slice of the key.
memory::NumaCoreMemory * get_thread_memory() const
Returns the private memory repository of this thread.
PagePoolOffset grab_free_volatile_page()
Acquires one free volatile page from local page pool.
The first epoch (before wrap-around) that might have transactions is ep-3.
uint16_t SlotIndex
Index of a record in a (border) page.
return value for find_key_for_reserve().
void initialize_as_layer_root_physical(VolatilePagePointer page_id, MasstreeBorderPage *parent, SlotIndex parent_index)
A physical-only method to initialize this page as a volatile page of a layer-root pointed from the gi...
PayloadLength get_max_payload_length(SlotIndex index) const __attribute__((always_inline))
bool is_locked() const __attribute__((always_inline))
Root package of FOEDUS (Fast Optimistic Engine for Data Unification Services).
bool is_moved() const __attribute__((always_inline))
ErrorCode sysxct_record_lock(xct::SysxctWorkspace *sysxct_workspace, storage::VolatilePagePointer page_id, xct::RwLockableXctId *lock)
Takes a lock for a sysxct running under this thread.
Represents one thread running on one NUMA core.
uint32_t PagePoolOffset
Offset in PagePool that compactly represents the page address (unlike 8 bytes pointer).
Represents a pointer to a volatile page with modification count for preventing ABA.
Persistent status part of Transaction ID.
xct::XctId get_initial_xid()
Represents one border page in Masstree Storage.
DualPagePointer * get_next_layer(SlotIndex index) __attribute__((always_inline))
const PayloadLength payload_count_
Minimal required length of the payload.
uint64_t KeySlice
Each key slice is an 8-byte integer.
thread::Thread *const context_
Thread context.
bool can_accomodate(SlotIndex new_index, KeyLength remainder_length, PayloadLength payload_count) const __attribute__((always_inline))
bool is_retired() const __attribute__((always_inline))
Repository of memories dynamically acquired within one CPU core (thread).
void set(Epoch::EpochInteger epoch_int, uint32_t ordinal)
FindKeyForReserveResult find_key_for_reserve(SlotIndex from_index, SlotIndex to_index, KeySlice slice, const void *suffix, KeyLength remainder) const __attribute__((always_inline))
This is for the case we are looking for either the matching slot or the slot we will modify...
VolatilePagePointer volatile_pointer_
bool out_split_needed_
[Out]
ErrorCode sysxct_page_lock(xct::SysxctWorkspace *sysxct_workspace, storage::Page *page)
Takes a page lock in the same page for a sysxct running under this thread.
SnapshotPagePointer snapshot_pointer_
void increment_key_count() __attribute__((always_inline))
virtual ErrorCode run(xct::SysxctWorkspace *sysxct_workspace) override
Execute the system transaction.
const void *const suffix_
Suffix of the key.
const SlotIndex kBorderPageMaxSlots
Maximum number of slots in one MasstreeBorderPage.
void set(uint8_t numa_node, memory::PagePoolOffset offset)
uint8_t get_layer() const __attribute__((always_inline))
Layer-0 stores the first 8 byte slice, Layer-1 next 8 byte...
const bool should_aggresively_create_next_layer_
When we CAN create a next layer for the new record, whether to make it a next-layer from the beginnin...
void assert_entries() __attribute__((always_inline))
const memory::LocalPageResolver & get_local_volatile_page_resolver() const
Returns page resolver to convert only local page ID to page pointer.
0x0301 : "MEMORY : Not enough free volatile pages. Check the config of MemoryOptions" ...
void initialize_volatile_page(StorageId storage_id, VolatilePagePointer page_id, uint8_t layer, KeySlice low_fence, KeySlice high_fence)
bool try_expand_record_in_page_physical(PayloadLength payload_count, SlotIndex record_index)
A physical-only method to expand a record within this page without any logical change.
#define CHECK_ERROR_CODE(x)
This macro calls x and checks its returned error code.
VolatilePagePointer get_volatile_page_id() const
ThreadGroupId get_numa_node() const
const KeyLength remainder_length_
Length of the remainder.
const KeySlice kSupremumSlice
xct::RwLockableXctId * get_owner_id(SlotIndex index) __attribute__((always_inline))
const SlotIndex hint_check_from_
The in-page location from which this sysxct will look for matching records.
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
void reserve_initially_next_layer(SlotIndex index, xct::XctId initial_owner_id, KeySlice slice, const DualPagePointer &pointer)
For creating a record that is initially a next-layer.
void memory_fence_release()
Equivalent to std::atomic_thread_fence(std::memory_order_release).
bool does_point_to_layer(SlotIndex index) const __attribute__((always_inline))
ErrorCode
Enum of error codes defined in error_code.xmacro.
Per-thread reused work memory for system transactions.