18 #ifndef FOEDUS_STORAGE_MASSTREE_MASSTREE_PAGE_IMPL_HPP_
19 #define FOEDUS_STORAGE_MASSTREE_MASSTREE_PAGE_IMPL_HPP_
251 = (kMaxIntermediateSeparators + 1U) * (kMaxIntermediateMiniSeparators + 1U);
289 ASSERT_ND(key_count <= kMaxIntermediateMiniSeparators);
290 for (uint8_t i = 0; i < key_count; ++i) {
291 if (slice < separators_[i]) {
314 ASSERT_ND(key_count <= kMaxIntermediateSeparators);
315 for (uint8_t i = 0; i < key_count; ++i) {
316 if (slice < separators_[i]) {
398 separators_[minipage_index] = new_separator;
417 MiniPage mini_pages_[10];
627 next_offset_ += length;
628 ASSERT_ND(next_offset_ <=
sizeof(data_));
634 return reinterpret_cast<const Slot*
>(
this + 1) - index - 1;
640 return reinterpret_cast<Slot*
>(
this + 1) - index - 1;
646 return reinterpret_cast<Slot*
>(
this + 1) - index - 1;
651 int64_t index =
reinterpret_cast<const Slot*
>(
this + 1) - slot - 1;
653 ASSERT_ND(index < static_cast<int64_t>(
sizeof(data_) /
sizeof(
Slot)));
663 if (consumed >
sizeof(data_)) {
666 return sizeof(data_) - consumed;
743 return record + skipped;
748 return record + skipped;
760 ASSERT_ND(record_offset <
sizeof(data_));
761 return data_ + record_offset;
765 ASSERT_ND(record_offset <
sizeof(data_));
766 return data_ + record_offset;
770 KeyLength remainder_length)
const ALWAYS_INLINE {
773 return record + skipped;
777 KeyLength remainder_length) ALWAYS_INLINE {
780 return record + skipped;
784 KeyLength remainder_length) ALWAYS_INLINE {
790 KeyLength remainder_length)
const ALWAYS_INLINE {
803 return slices_[index];
807 slices_[index] = slice;
942 if (key_count > kInitiallyFetched) {
945 uint16_t cachelines = ((key_count - kInitiallyFetched) /
sizeof(
KeySlice)) + 1;
947 reinterpret_cast<const char*>(
this) + kInitialPrefetchBytes,
1012 bool consecutive_inserts_;
1087 }
else if (index_ < page_->get_key_count()) {
1110 if (remainder_length >=
sizeof(
KeySlice)) {
1111 return remainder_length -
sizeof(
KeySlice);
1130 if (remainder <=
sizeof(
KeySlice)) {
1132 for (
SlotIndex i = 0; i < key_count; ++i) {
1134 if (
LIKELY(slice != rec_slice)) {
1139 if (klen == remainder) {
1145 for (
SlotIndex i = 0; i < key_count; ++i) {
1147 if (
LIKELY(slice != rec_slice)) {
1163 if (klen == remainder) {
1166 if (std::memcmp(record_suffix, suffix, remainder -
sizeof(
KeySlice)) == 0) {
1185 if (from_index == 0) {
1188 for (
SlotIndex i = from_index; i < to_index; ++i) {
1207 if (from_index == 0) {
1210 if (remainder <=
sizeof(
KeySlice)) {
1211 for (
SlotIndex i = from_index; i < to_index; ++i) {
1213 if (
LIKELY(slice != rec_slice)) {
1217 if (klen == remainder) {
1223 for (
SlotIndex i = from_index; i < to_index; ++i) {
1225 if (
LIKELY(slice != rec_slice)) {
1235 }
else if (klen <=
sizeof(
KeySlice)) {
1243 if (klen == remainder &&
1244 std::memcmp(record_suffix, suffix, remainder -
sizeof(
KeySlice)) == 0) {
1264 if (remainder <=
sizeof(
KeySlice)) {
1265 for (
SlotIndex i = 0; i < key_count; ++i) {
1267 if (rec_slice < slice) {
1269 }
else if (rec_slice > slice) {
1275 if (klen == remainder) {
1278 }
else if (klen < remainder) {
1286 for (
SlotIndex i = 0; i < key_count; ++i) {
1288 if (rec_slice < slice) {
1290 }
else if (rec_slice > slice) {
1311 if (klen == remainder &&
1312 std::memcmp(record_suffix, suffix, remainder -
sizeof(
KeySlice)) == 0) {
1351 next_offset_ += record_size;
1353 consecutive_inserts_ =
true;
1354 }
else if (consecutive_inserts_) {
1358 if (prev_slice > slice || (prev_slice == slice && prev_klen > remainder_length)) {
1359 consecutive_inserts_ =
false;
1364 if (suffix_length > 0) {
1366 std::memcpy(record, suffix, suffix_length);
1369 if (suffix_length_aligned > suffix_length) {
1370 std::memset(record + suffix_length, 0, suffix_length_aligned - suffix_length);
1401 next_offset_ += record_size;
1403 consecutive_inserts_ =
true;
1404 }
else if (consecutive_inserts_) {
1407 if (prev_slice > slice) {
1408 consecutive_inserts_ =
false;
1437 next_offset_ += record_size;
1489 ASSERT_ND(keys <= kMaxIntermediateSeparators);
1490 const MiniPage& minipage = mini_pages_[keys];
1492 ASSERT_ND(keys_mini <= kMaxIntermediateMiniSeparators);
1502 MiniPage& mini = mini_pages_[index];
1507 if (index_mini < kMaxIntermediateMiniSeparators) {
1512 ASSERT_ND(index == 0 || low_fence > separators_[index - 1]);
1528 ASSERT_ND(key_count < kMaxIntermediateSeparators);
1529 ASSERT_ND(mini_pages_[key_count].key_count_ == kMaxIntermediateMiniSeparators);
1531 separators_[key_count] = low_fence;
1532 MiniPage& new_minipage = mini_pages_[key_count + 1];
1549 if (index_mini == 0) {
1556 *separator_low = minipage.
separators_[index_mini - 1U];
1559 if (index == key_count) {
1565 *separator_high = minipage.
separators_[index_mini];
1574 if (new_index == 0) {
1583 return required <= available;
1590 if (new_index == 0) {
1598 return required <= available;
1607 if (remainder != klen) {
1612 if (slice != rec_slice) {
1615 if (remainder >
sizeof(
KeySlice)) {
1619 remainder -
sizeof(
KeySlice)) == 0;
1638 const char* suffix = be_key + (
get_layer() + 1) * kSliceLen;
1639 return ltgt_key(index, slice, suffix, remainder);
1651 if (slice < rec_slice) {
1653 }
else if (slice > rec_slice) {
1658 if (remainder == rec_remainder) {
1660 }
else if (remainder < rec_remainder) {
1675 KeyLength min_remainder = std::min(remainder, rec_remainder);
1677 int cmp = std::memcmp(suffix, rec_suffix, min_remainder);
1682 if (remainder == rec_remainder) {
1684 }
else if (remainder < rec_remainder) {
1712 return (remainder == rec_remainder);
1742 #endif // FOEDUS_STORAGE_MASSTREE_MASSTREE_PAGE_IMPL_HPP_
void reserve_record_space(SlotIndex index, xct::XctId initial_owner_id, KeySlice slice, const void *suffix, KeyLength remainder_length, PayloadLength payload_count)
Installs a new physical record that doesn't exist logically (delete bit on).
KeyLength get_remainder_length(SlotIndex index) const __attribute__((always_inline))
void set_slice(SlotIndex index, KeySlice slice) __attribute__((always_inline))
DataOffset physical_record_length_
Byte count this record occupies.
const KeySlice kInfimumSlice
void prefetch_additional_if_needed(SlotIndex key_count) const __attribute__((always_inline))
void write_lengthes_oneshot(SlotLengthPart new_value) __attribute__((always_inline))
Writes lengthes_ in one-shot.
KeySlice high_fence_
Inclusive high fence of this page.
SlotIndex get_key_count() const __attribute__((always_inline))
physical key count (those keys might be deleted) in this page.
FindKeyForReserveResult(SlotIndex index, MatchType match_type)
bool verify_slot_lengthes(SlotIndex index) const
uint16_t unused_
unused so far
PayloadLength get_max_payload_peek() const __attribute__((always_inline))
This might be affected by concurrent threads.
DataOffset get_offset_in_bytes(SlotIndex index) const __attribute__((always_inline))
KeySlice get_slice(SlotIndex index) const __attribute__((always_inline))
T align8(T value)
8-alignment.
void release_pages_recursive_common(const memory::GlobalVolatilePageResolver &page_resolver, memory::PageReleaseBatch *batch)
const uint32_t kCommonPageHeaderSize
Size of the base page class (MasstreePage), which is the common header for intermediate and border pa...
void release_pages_recursive(const memory::GlobalVolatilePageResolver &page_resolver, memory::PageReleaseBatch *batch)
const uint64_t kSliceLen
Shorthand for sizeof(KeySlice).
KeyLength calculate_suffix_length_aligned(KeyLength remainder_length) __attribute__((always_inline))
SlotLengthUnion lengthes_
Stores mutable length information of the record.
Represents a pointer to another page (usually a child page).
void prefetch() const __attribute__((always_inline))
prefetch upto 256th bytes.
bool is_empty_range() const __attribute__((always_inline))
An empty-range page, either intermediate or border, never has any entries.
uint16_t PayloadLength
Represents a byte-length of a payload in this package.
friend std::ostream & operator<<(std::ostream &o, const MasstreeBorderPage &v)
defined in masstree_page_debug.cpp.
Definitions of IDs in this package and a few related constant values.
PageVersion & get_version() __attribute__((always_inline))
uint16_t SlotIndex
Index of a record in a (border) page.
return value for find_key_for_reserve().
const char * get_record_payload_from_offsets(DataOffset record_offset, KeyLength remainder_length) const __attribute__((always_inline))
void initialize_snapshot_page(StorageId storage_id, SnapshotPagePointer page_id, uint8_t layer, KeySlice low_fence, KeySlice high_fence)
void initialize_as_layer_root_physical(VolatilePagePointer page_id, MasstreeBorderPage *parent, SlotIndex parent_index)
A physical-only method to initialize this page as a volatile page of a layer-root pointed from the gi...
PayloadLength get_max_payload_length(SlotIndex index) const __attribute__((always_inline))
const KeyLength kMaxKeyLength
Max length of a key.
const MiniPage & get_minipage(uint8_t index) const __attribute__((always_inline))
const xct::RwLockableXctId * get_owner_id(SlotIndex index) const __attribute__((always_inline))
MasstreePage & operator=(const MasstreePage &other)=delete
bool is_locked() const __attribute__((always_inline))
bool is_retired() const __attribute__((always_inline))
PayloadLength payload_length_
Byte length of the payload.
uint32_t StorageId
Unique ID for storage.
Root package of FOEDUS (Fast Optimistic Engine for Data Unification Services).
MatchType
Used in FindKeyForReserveResult.
Represents a record of write-access during a transaction.
bool is_moved() const __attribute__((always_inline))
bool is_border() const __attribute__((always_inline))
SnapshotPagePointer get_snapshot_page_id() const
const PageHeader & header() const
uint32_t PagePoolOffset
Offset in PagePool that compactly represents the page address (unlike 8 bytes pointer).
KeySlice low_fence_
Inclusive low fence of this page.
DataOffset get_next_offset() const
void prefetch_general() const __attribute__((always_inline))
prefetch upto keys/separators, whether this page is border or interior.
bool compare_key(SlotIndex index, const void *be_key, KeyLength key_length) const __attribute__((always_inline))
actually this method should be renamed to equal_key...
Slot * get_new_slot(SlotIndex index) __attribute__((always_inline))
uint16_t DataOffset
Byte-offset in a page.
bool is_foster_minor_null() const __attribute__((always_inline))
PageType
The following 1-byte value is stored in the common page header.
Forward declarations of classes in transaction package.
bool is_foster_major_null() const __attribute__((always_inline))
Result of track_moved_record().
KeySlice foster_fence_
Inclusive low_fence of foster child.
Represents a pointer to a volatile page with modification count for preventing ABA.
MiniPage & get_minipage(uint8_t index) __attribute__((always_inline))
Just a synonym of XctId to be used as a page lock mechanism.
Forward declarations of classes in root package.
Persistent status part of Transaction ID.
SlotLengthPart read_lengthes_oneshot() const __attribute__((always_inline))
Reads lengthes_ of this slot in one-shot.
void prefetch() const
prefetch upto separators.
Represents one border page in Masstree Storage.
bool is_consecutive_inserts() const
Whether this page is receiving only sequential inserts.
DualPagePointer * get_next_layer(SlotIndex index) __attribute__((always_inline))
void set_foster_major_offset_unsafe(memory::PagePoolOffset offset) __attribute__((always_inline))
As the name suggests, this should be used only by composer.
void prefetch() const
prefetch upto separators.
void increase_next_offset(DataOffset length)
const DataOffset kBorderPageDataPartOffset
Offset of data_ member in MasstreeBorderPage.
XctId xct_id_
the second 64bit: Persistent status part of TID.
void set_moved() __attribute__((always_inline))
HighFence(KeySlice slice, bool supremum)
void prefetch_cachelines(const void *address, int cacheline_count)
Prefetch multiple contiguous cachelines to L1 cache.
uint64_t KeySlice
Each key slice is an 8-byte integer.
bool is_layer_root() const __attribute__((always_inline))
friend std::ostream & operator<<(std::ostream &o, const MasstreeIntermediatePage &v)
defined in masstree_page_debug.cpp.
Definitions of IDs in this package and a few related constant values.
const uint16_t kMaxIntermediateMiniSeparators
Max number of separators stored in the second level of intermediate pages.
VolatilePagePointer foster_twin_[2]
Points to foster children, or tentative child pages.
const char * get_record(SlotIndex index) const __attribute__((always_inline))
uint16_t KeyLength
Represents a byte-length of a key in this package.
Common base of MasstreeIntermediatePage and MasstreeBorderPage.
#define LIKELY(x)
Hints that x is highly likely true.
char * get_record(SlotIndex index) __attribute__((always_inline))
PayloadLength get_max_payload_stable(SlotLengthPart stable_length) const __attribute__((always_inline))
This is not affected by concurrent threads.
bool can_accomodate(SlotIndex new_index, KeyLength remainder_length, PayloadLength payload_count) const __attribute__((always_inline))
VolatilePagePointer get_foster_minor() const __attribute__((always_inline))
PayloadLength get_payload_length(SlotIndex index) const __attribute__((always_inline))
Slot & operator=(const Slot &)=delete
const DualPagePointer * get_next_layer_from_offsets(DataOffset record_offset, KeyLength remainder_length) const __attribute__((always_inline))
bool is_retired() const __attribute__((always_inline))
VolatilePagePointer get_foster_major() const __attribute__((always_inline))
SlotLengthPart components
McsRwLock lock_
the first 64bit: Locking part of TID
void set_offset_unsafe(memory::PagePoolOffset offset)
This is used only in special places (snapshot composer).
KeySlice get_separator(uint8_t index) const __attribute__((always_inline))
bool does_point_to_layer() const __attribute__((always_inline))
void initialize_volatile_common(StorageId storage_id, VolatilePagePointer page_id, PageType page_type, uint8_t layer, uint8_t level, KeySlice low_fence, KeySlice high_fence)
KeyLength get_suffix_length() const __attribute__((always_inline))
const uint16_t kCachelineSize
Byte count of one cache line.
The MCS reader-writer lock variant of LockableXctId.
void append_next_layer_snapshot(xct::XctId initial_owner_id, KeySlice slice, SnapshotPagePointer pointer)
Installs a next layer pointer.
KeyLength get_suffix_length(SlotIndex index) const __attribute__((always_inline))
DualPagePointer pointers_[kMaxIntermediateMiniSeparators+1]
FindKeyForReserveResult find_key_for_reserve(SlotIndex from_index, SlotIndex to_index, KeySlice slice, const void *suffix, KeyLength remainder) const __attribute__((always_inline))
This is for the case we are looking for either the matching slot or the slot we will modify...
void append_pointer_snapshot(KeySlice low_fence, SnapshotPagePointer pointer)
Appends a new poiner and separator in an existing mini page, used only by snapshot composer...
DataOffset available_space() const
Returns usable data space in bytes.
VolatilePagePointer volatile_pointer_
bool is_moved() const __attribute__((always_inline))
MasstreeBorderPage & operator=(const MasstreeBorderPage &other)=delete
void set_high_fence_unsafe(KeySlice high_fence) __attribute__((always_inline))
As the name suggests, this should be used only by composer.
PageVersion * get_version_address() __attribute__((always_inline))
void release_pages_recursive_parallel(Engine *engine)
This method is used when we release a large number of volatile pages, most likely when we drop a stor...
void assert_entries_impl() const
defined in masstree_page_debug.cpp.
void set_retired() __attribute__((always_inline))
SlotIndex find_key_normalized(SlotIndex from_index, SlotIndex to_index, KeySlice slice) const __attribute__((always_inline))
Specialized version for 8 byte native integer search.
DataOffset original_offset_
The value of offset_ as of the creation of this record.
const DualPagePointer * get_next_layer(SlotIndex index) const __attribute__((always_inline))
Definitions of IDs in this package and a few related constant values.
Slot()=delete
only reinterpret_cast
const uint16_t kMaxIntermediateSeparators
Max number of separators stored in the first level of intermediate pages.
An exclusive-only (WW) MCS lock data structure.
static DataOffset required_data_space(KeyLength remainder_length, PayloadLength payload_length)
returns the byte size of required contiguous space in data_ to insert a new record of the given remai...
void install_foster_twin(VolatilePagePointer minor, VolatilePagePointer major, KeySlice foster_fence)
uint64_t SnapshotPagePointer
Page ID of a snapshot page.
Constants and methods related to CPU cacheline and its prefetching.
DataOffset original_physical_record_length_
The value of physical_record_length_ as of the creation of this record.
const KeyLength kInitiallyNextLayer
A special key length value that denotes the record in a border page was initially a next-layer pointe...
Database engine object that holds all resources and provides APIs.
Slot * get_slot(SlotIndex index) __attribute__((always_inline))
A system transaction to split a border page in Master-Tree.
uint8_t get_btree_level() const __attribute__((always_inline))
used only in masstree.
Fix-sized slot for each record, which is placed at the end of data region.
void initialize_snapshot_common(StorageId storage_id, SnapshotPagePointer page_id, PageType page_type, uint8_t layer, uint8_t level, KeySlice low_fence, KeySlice high_fence)
const Slot * get_slot(SlotIndex index) const __attribute__((always_inline))
SnapshotPagePointer snapshot_pointer_
void increment_key_count() __attribute__((always_inline))
const SlotIndex kBorderPageMaxSlots
Maximum number of slots in one MasstreeBorderPage.
bool is_next_layer() const __attribute__((always_inline))
const uint32_t kBorderPageDataPartSize
Byte size of the record data part (data_) in MasstreeBorderPage.
uint16_t extract_snapshot_id_from_snapshot_pointer(SnapshotPagePointer pointer)
const uint32_t kBorderPageAdditionalHeaderSize
Misc header attributes specific to MasstreeBorderPage placed after the common header.
A piece of Slot object that must be read/written in one-shot, meaning no one reads half-written value...
uint8_t get_layer() const __attribute__((always_inline))
Layer-0 stores the first 8 byte slice, Layer-1 next 8 byte...
xct::McsWwLock * get_lock_address() __attribute__((always_inline))
bool is_deleted() const __attribute__((always_inline))
void set_retired() __attribute__((always_inline))
KeySlice get_high_fence() const __attribute__((always_inline))
int ltgt_key(SlotIndex index, const char *be_key, KeyLength key_length) const __attribute__((always_inline))
compare the key.
void set_foster_twin(VolatilePagePointer minor, VolatilePagePointer major)
void set_separator(uint8_t minipage_index, KeySlice new_separator)
Place a new separator for a new minipage.
void assert_entries() __attribute__((always_inline))
bool within_foster_minor(KeySlice slice) const __attribute__((always_inline))
DualPagePointer * get_next_layer_from_offsets(DataOffset record_offset, KeyLength remainder_length) __attribute__((always_inline))
KeySlice separators_[kMaxIntermediateMiniSeparators]
Same semantics as separators_ in enclosing class.
FindKeyForReserveResult find_key_for_snapshot(KeySlice slice, const void *suffix, KeyLength remainder) const __attribute__((always_inline))
This one is used for snapshot pages.
Used only for debugging as this is not space efficient.
void initialize_layer_root(const MasstreeBorderPage *copy_from, SlotIndex copy_index)
Copy the initial record that will be the only record for a new root page.
SlotIndex to_slot_index(const Slot *slot) const __attribute__((always_inline))
const PayloadLength kMaxPayloadLength
Max length of a payload.
Forward declarations of classes in memory package.
bool has_foster_child() const __attribute__((always_inline))
KeySlice get_low_fence() const __attribute__((always_inline))
void initialize_volatile_page(StorageId storage_id, VolatilePagePointer page_id, uint8_t layer, KeySlice low_fence, KeySlice high_fence)
void set_moved() __attribute__((always_inline))
MasstreeIntermediatePage & operator=(const MasstreeIntermediatePage &other)=delete
SlotIndex find_key(KeySlice slice, const void *suffix, KeyLength remainder) const __attribute__((always_inline))
Navigates a searching key-slice to one of the record in this page.
MasstreeBorderPage()=delete
bool can_accomodate_snapshot(KeyLength remainder_length, PayloadLength payload_count) const __attribute__((always_inline))
Slightly different from can_accomodate() as follows:
bool try_expand_record_in_page_physical(PayloadLength payload_count, SlotIndex record_index)
A physical-only method to expand a record within this page without any logical change.
KeySlice get_foster_fence() const __attribute__((always_inline))
void extract_separators_volatile(uint8_t index, uint8_t index_mini, KeySlice *separator_low, KeySlice *separator_high) const
Retrieves separators defining the index, used for volatile page, which requires appropriate locks or ...
bool is_high_fence_supremum() const __attribute__((always_inline))
xct::TrackMovedRecordResult track_moved_record(Engine *engine, xct::RwLockableXctId *old_address, xct::WriteXctAccess *write_set)
void append_minipage_snapshot(KeySlice low_fence, SnapshotPagePointer pointer)
Appends a new separator and the initial pointer in new mini page, used only by snapshot composer...
xct::TrackMovedRecordResult track_moved_record_next_layer(Engine *engine, xct::RwLockableXctId *old_address)
This one further tracks it to next layer.
bool will_conflict(SlotIndex index, const char *be_key, KeyLength key_length) const __attribute__((always_inline))
Returns whether inserting the key will cause creation of a new next layer.
const PageVersion * get_version_address() const __attribute__((always_inline))
KeyLength remainder_length_
Followings are immutable.
void initialize_volatile_page(StorageId storage_id, VolatilePagePointer page_id, uint8_t layer, uint8_t level, KeySlice low_fence, KeySlice high_fence)
VolatilePagePointer get_volatile_page_id() const
char * get_record_payload_from_offsets(DataOffset record_offset, KeyLength remainder_length) __attribute__((always_inline))
friend std::ostream & operator<<(std::ostream &o, const MasstreePage &v)
defined in masstree_page_debug.cpp.
char * get_record_payload(SlotIndex index) __attribute__((always_inline))
bool is_low_fence_infimum() const __attribute__((always_inline))
void release_pages_recursive(const memory::GlobalVolatilePageResolver &page_resolver, memory::PageReleaseBatch *batch)
const uint32_t kMaxIntermediatePointers
Max number of pointers (if completely filled) stored in an intermediate pages.
static DataOffset to_record_length(KeyLength remainder_length, PayloadLength payload_length)
returns minimal physical_record_length_ for the given remainder/payload length.
void extract_separators_common(uint8_t index, uint8_t index_mini, KeySlice *separator_low, KeySlice *separator_high) const
Retrieves separators defining the index, used only by snapshot composer (or when no race) ...
#define STATIC_SIZE_CHECK(desired, actual)
const char * get_record_from_offset(DataOffset record_offset) const __attribute__((always_inline))
KeyLength get_suffix_length_aligned(SlotIndex index) const __attribute__((always_inline))
const KeySlice kSupremumSlice
const char * get_record_payload(SlotIndex index) const __attribute__((always_inline))
xct::RwLockableXctId * get_owner_id(SlotIndex index) __attribute__((always_inline))
Represents one intermediate page in Masstree Storage.
KeyLength calculate_suffix_length(KeyLength remainder_length) __attribute__((always_inline))
void verify_separators() const
Forward declarations of classes in masstree storage package.
#define UNLIKELY(x)
Hints that x is highly likely false.
bool is_full_snapshot() const
Whether this page is full of poiters, used only by snapshot composer (or when no race) ...
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
bool within_foster_major(KeySlice slice) const __attribute__((always_inline))
bool is_locked() const __attribute__((always_inline))
KeySlice slice_layer(const void *be_bytes, KeyLength key_length, Layer current_layer)
Extract a part of a big-endian byte array of given length as KeySlice.
bool equal_key(SlotIndex index, const void *be_key, KeyLength key_length) const __attribute__((always_inline))
let's gradually migrate from compare_key() to this.
void reserve_initially_next_layer(SlotIndex index, xct::XctId initial_owner_id, KeySlice slice, const DualPagePointer &pointer)
For creating a record that is initially a next-layer.
Forward declarations of classes in thread package.
MiniPage & operator=(const MiniPage &other)=delete
xct::RwLockableXctId tid_
TID of the record.
bool will_contain_next_layer(SlotIndex index, const char *be_key, KeyLength key_length) const __attribute__((always_inline))
Returns whether the record is a next-layer pointer that would contain the key.
KeyLength get_aligned_suffix_length() const __attribute__((always_inline))
#define ALWAYS_INLINE
A function suffix to hint that the function should always be inlined.
bool is_next_layer() const __attribute__((always_inline))
bool within_fences(KeySlice slice) const __attribute__((always_inline))
void replace_next_layer_snapshot(SnapshotPagePointer pointer)
Same as above, except this is used to transform an existing record at end to a next layer pointer...
bool does_point_to_layer(SlotIndex index) const __attribute__((always_inline))
MasstreeIntermediatePage()=delete
const uint16_t kPageSize
A constant defining the page size (in bytes) of both snapshot pages and volatile pages.
void set_key_count(SlotIndex count) __attribute__((always_inline))
uint8_t find_minipage(KeySlice slice) const __attribute__((always_inline))
Navigates a searching key-slice to one of the mini pages in this page.
void extract_separators_snapshot(uint8_t index, uint8_t index_mini, KeySlice *separator_low, KeySlice *separator_high) const
Retrieves separators defining the index, used only by snapshot composer, thus no race.
DataOffset offset_
Byte offset in data_ where this record starts.
uint8_t find_pointer(KeySlice slice) const __attribute__((always_inline))
Navigates a searching key-slice to one of pointers in this mini-page.
void initialize_snapshot_page(StorageId storage_id, SnapshotPagePointer page_id, uint8_t layer, uint8_t level, KeySlice low_fence, KeySlice high_fence)
char * get_record_from_offset(DataOffset record_offset) __attribute__((always_inline))
Offset versions.
void set_next_layer() __attribute__((always_inline))
const PageVersion & get_version() const __attribute__((always_inline))