libfoedus-core
FOEDUS Core Library
foedus::storage::hash::HashDataPage Class Referencefinal

Represents an individual data page in Hashtable Storage. More...

Detailed Description

Represents an individual data page in Hashtable Storage.

This is one of the page types in hash. A data page contains full keys and values.

Page Layout
Headers, including bloom filter for faster check
Record Data part, which grows forward
Unused part
Slot part (32 bytes per record), which grows backward

Definition at line 155 of file hash_page_impl.hpp.

#include <hash_page_impl.hpp>

Classes

struct  Slot
 Fix-sized slot for each record, which is placed at the end of data region. More...
 

Public Member Functions

 HashDataPage ()=delete
 
 HashDataPage (const HashDataPage &other)=delete
 
HashDataPageoperator= (const HashDataPage &other)=delete
 
void initialize_volatile_page (StorageId storage_id, VolatilePagePointer page_id, const Page *parent, HashBin bin, uint8_t bin_shifts)
 
void initialize_snapshot_page (StorageId storage_id, SnapshotPagePointer page_id, HashBin bin, uint8_t bin_bits, uint8_t bin_shifts)
 
void release_pages_recursive (const memory::GlobalVolatilePageResolver &page_resolver, memory::PageReleaseBatch *batch)
 
PageHeaderheader ()
 
const PageHeaderheader () const
 
bool is_locked () const
 
VolatilePagePointer get_volatile_page_id () const
 
SnapshotPagePointer get_snapshot_page_id () const
 
const DualPagePointernext_page () const __attribute__((always_inline))
 
DualPagePointernext_page () __attribute__((always_inline))
 
const DualPagePointernext_page_address () const __attribute__((always_inline))
 
DualPagePointernext_page_address () __attribute__((always_inline))
 
const DataPageBloomFilterbloom_filter () const __attribute__((always_inline))
 
DataPageBloomFilterbloom_filter () __attribute__((always_inline))
 
uint16_t get_record_count () const __attribute__((always_inline))
 
const Slotget_slot (DataPageSlotIndex record) const __attribute__((always_inline))
 
Slotget_slot (DataPageSlotIndex record) __attribute__((always_inline))
 
Slotget_new_slot (DataPageSlotIndex record) __attribute__((always_inline))
 
const Slotget_slot_address (DataPageSlotIndex record) const __attribute__((always_inline))
 same as &get_slot(), but this is more explicit and easier to understand/maintain More...
 
Slotget_slot_address (DataPageSlotIndex record) __attribute__((always_inline))
 
DataPageSlotIndex to_slot_index (const Slot *slot) const __attribute__((always_inline))
 
char * record_from_offset (uint16_t offset)
 
const char * record_from_offset (uint16_t offset) const
 
uint16_t available_space () const
 Returns usable space in bytes. More...
 
uint16_t next_offset () const
 Returns offset of a next record. More...
 
DataPageSlotIndex reserve_record (HashValue hash, const BloomFilterFingerprint &fingerprint, const void *key, uint16_t key_length, uint16_t payload_length)
 A system transaction that creates a logically deleted record in this page for the given key. More...
 
void create_record_in_snapshot (xct::XctId xct_id, HashValue hash, const BloomFilterFingerprint &fingerprint, const void *key, uint16_t key_length, const void *payload, uint16_t payload_length) __attribute__((always_inline))
 A simplified/efficient version to insert an active record, which must be used only in snapshot pages. More...
 
DataPageSlotIndex search_key_physical (HashValue hash, const BloomFilterFingerprint &fingerprint, const void *key, KeyLength key_length, DataPageSlotIndex record_count, DataPageSlotIndex check_from=0) const
 Search for a physical slot that exactly contains the given key. More...
 
bool compare_slot_key (DataPageSlotIndex index, HashValue hash, const void *key, uint16_t key_length) const
 returns whether the slot contains the exact key specified More...
 
HashBin get_bin () const
 
void assert_bin (HashBin bin) const __attribute__((always_inline))
 
void protected_set_bin_shifts (uint8_t bin_shifts)
 this method should be called only in page creation. More...
 
uint8_t get_bin_shifts () const
 
void assert_entries () const __attribute__((always_inline))
 
void assert_entries_impl () const
 defined in hash_page_debug.cpp. More...
 

Static Public Member Functions

static uint16_t required_space (uint16_t key_length, uint16_t payload_length)
 returns physical_record_length_ for a new record More...
 

Friends

struct ReserveRecords
 
std::ostream & operator<< (std::ostream &o, const HashDataPage &v)
 defined in hash_page_debug.cpp. More...
 

Constructor & Destructor Documentation

foedus::storage::hash::HashDataPage::HashDataPage ( )
delete
foedus::storage::hash::HashDataPage::HashDataPage ( const HashDataPage other)
delete

Member Function Documentation

void foedus::storage::hash::HashDataPage::assert_bin ( HashBin  bin) const
inline

Definition at line 413 of file hash_page_impl.hpp.

References ASSERT_ND.

413 { ASSERT_ND(bin_ == bin); }
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
void foedus::storage::hash::HashDataPage::assert_entries ( ) const
inline

Definition at line 424 of file hash_page_impl.hpp.

References assert_entries_impl().

424  {
425 #ifndef NDEBUG
427 #endif // NDEBUG
428  }
void assert_entries_impl() const
defined in hash_page_debug.cpp.

Here is the call graph for this function:

void foedus::storage::hash::HashDataPage::assert_entries_impl ( ) const

defined in hash_page_debug.cpp.

Definition at line 89 of file hash_page_debug.cpp.

References foedus::storage::hash::DataPageBloomFilter::add(), ASSERT_ND, foedus::storage::hash::DataPageBloomFilter::clear(), foedus::storage::hash::DataPageBloomFilter::extract_fingerprint(), foedus::storage::hash::HashDataPage::Slot::get_aligned_key_length(), get_bin_shifts(), get_record_count(), get_slot_address(), foedus::storage::hash::HashDataPage::Slot::hash_, foedus::storage::hash::hashinate(), foedus::storage::hash::HashDataPage::Slot::key_length_, foedus::storage::hash::kHashDataPageHeaderSize, foedus::storage::kPageSize, next_page(), foedus::storage::hash::HashDataPage::Slot::offset_, foedus::storage::hash::HashDataPage::Slot::payload_length_, foedus::storage::hash::HashDataPage::Slot::physical_record_length_, record_from_offset(), foedus::storage::PageHeader::snapshot_, and foedus::storage::hash::DataPageBloomFilter::values_.

Referenced by assert_entries().

89  {
90  const uint8_t bin_shifts = get_bin_shifts();
91  uint8_t records = get_record_count();
92 
93  if (header_.snapshot_) {
94  ASSERT_ND(next_page().volatile_pointer_.is_null());
95  } else {
96  ASSERT_ND(next_page().snapshot_pointer_ == 0);
97  }
98 
99  DataPageBloomFilter correct_filter;
100  correct_filter.clear();
101  for (DataPageSlotIndex i = 0; i < records; ++i) {
102  const HashDataPage::Slot* slot = get_slot_address(i);
103  if (i > 0) {
104  const HashDataPage::Slot* pre = get_slot_address(i - 1);
105  ASSERT_ND(slot->offset_ == pre->offset_ + pre->physical_record_length_);
106  }
107  HashValue hash = hashinate(record_from_offset(slot->offset_), slot->key_length_);
108  ASSERT_ND(slot->hash_ == hash);
109  HashBin bin = hash >> bin_shifts;
110  ASSERT_ND(bin_ == bin);
111 
112  correct_filter.add(DataPageBloomFilter::extract_fingerprint(slot->hash_));
113  ASSERT_ND(slot->physical_record_length_
114  >= slot->get_aligned_key_length() + slot->payload_length_);
115  uint16_t end_offset = slot->offset_ + slot->physical_record_length_;
116  ASSERT_ND(end_offset + records * sizeof(Slot) <= (kPageSize - kHashDataPageHeaderSize));
117  }
118 
119  for (uint16_t i = 0; i < sizeof(correct_filter.values_); ++i) {
120  ASSERT_ND(correct_filter.values_[i] == bloom_filter_.values_[i]);
121  }
122 }
const Slot * get_slot_address(DataPageSlotIndex record) const __attribute__((always_inline))
same as &get_slot(), but this is more explicit and easier to understand/maintain
HashValue hashinate(const void *key, uint16_t key_length)
Calculates hash value for general input.
uint16_t DataPageSlotIndex
Definition: hash_id.hpp:196
uint8_t values_[kHashDataPageBloomFilterBytes]
static BloomFilterFingerprint extract_fingerprint(HashValue fullhash)
char * record_from_offset(uint16_t offset)
uint64_t HashBin
Represents a bin of a hash value.
Definition: hash_id.hpp:142
const DualPagePointer & next_page() const __attribute__((always_inline))
const uint16_t kHashDataPageHeaderSize
Byte size of header in data page of hash storage.
Definition: hash_id.hpp:110
uint16_t get_record_count() const __attribute__((always_inline))
bool snapshot_
Whether this page image is of a snapshot page.
Definition: page.hpp:211
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
const uint16_t kPageSize
A constant defining the page size (in bytes) of both snapshot pages and volatile pages.
Definition: storage_id.hpp:45
uint64_t HashValue
Represents a full 64-bit hash value calculated from a key.
Definition: hash_id.hpp:129

Here is the call graph for this function:

Here is the caller graph for this function:

uint16_t foedus::storage::hash::HashDataPage::available_space ( ) const
inline

Returns usable space in bytes.

Definition at line 281 of file hash_page_impl.hpp.

References ASSERT_ND, get_record_count(), and next_offset().

Referenced by foedus::storage::hash::ReserveRecords::append_record_to_page(), foedus::storage::hash::ReserveRecords::create_new_record_in_tail_page(), create_record_in_snapshot(), foedus::storage::hash::ReserveRecords::expand_record(), foedus::storage::hash::ReserveRecords::find_and_lock_spacious_tail(), and reserve_record().

281  {
282  uint16_t consumed = next_offset() + get_record_count() * sizeof(Slot);
283  ASSERT_ND(consumed <= sizeof(data_));
284  if (consumed > sizeof(data_)) { // just to be conservative on release build
285  return 0;
286  }
287  return sizeof(data_) - consumed;
288  }
uint16_t next_offset() const
Returns offset of a next record.
uint16_t get_record_count() const __attribute__((always_inline))
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72

Here is the call graph for this function:

Here is the caller graph for this function:

const DataPageBloomFilter& foedus::storage::hash::HashDataPage::bloom_filter ( ) const
inline

Definition at line 243 of file hash_page_impl.hpp.

Referenced by foedus::storage::hash::HashStoragePimpl::insert_record(), and foedus::storage::hash::HashStoragePimpl::upsert_record().

243 { return bloom_filter_; }

Here is the caller graph for this function:

DataPageBloomFilter& foedus::storage::hash::HashDataPage::bloom_filter ( )
inline

Definition at line 244 of file hash_page_impl.hpp.

244 { return bloom_filter_; }
bool foedus::storage::hash::HashDataPage::compare_slot_key ( DataPageSlotIndex  index,
HashValue  hash,
const void *  key,
uint16_t  key_length 
) const
inline

returns whether the slot contains the exact key specified

Definition at line 396 of file hash_page_impl.hpp.

References ASSERT_ND, get_record_count(), get_slot(), foedus::storage::hash::HashDataPage::Slot::hash_, foedus::storage::hash::hashinate(), foedus::storage::hash::HashDataPage::Slot::key_length_, foedus::storage::hash::HashDataPage::Slot::offset_, and record_from_offset().

Referenced by foedus::storage::hash::HashStoragePimpl::locate_record(), and foedus::storage::hash::HashStoragePimpl::locate_record_in_snapshot().

400  {
401  ASSERT_ND(index < get_record_count()); // record count purely increasing
402  const Slot& slot = get_slot(index);
403  ASSERT_ND(hashinate(record_from_offset(slot.offset_), slot.key_length_) == slot.hash_);
404  // quick check first
405  if (slot.hash_ != hash || slot.key_length_ != key_length) {
406  return false;
407  }
408  const void* data = record_from_offset(slot.offset_);
409  return std::memcmp(data, key, key_length) == 0;
410  }
const Slot & get_slot(DataPageSlotIndex record) const __attribute__((always_inline))
HashValue hashinate(const void *key, uint16_t key_length)
Calculates hash value for general input.
char * record_from_offset(uint16_t offset)
uint16_t get_record_count() const __attribute__((always_inline))
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72

Here is the call graph for this function:

Here is the caller graph for this function:

void foedus::storage::hash::HashDataPage::create_record_in_snapshot ( xct::XctId  xct_id,
HashValue  hash,
const BloomFilterFingerprint fingerprint,
const void *  key,
uint16_t  key_length,
const void *  payload,
uint16_t  payload_length 
)
inline

A simplified/efficient version to insert an active record, which must be used only in snapshot pages.

Parameters
[in]xct_idXctId of the record.
[in]hashhash value of the key.
[in]fingerprintBloom Filter fingerprint of the key.
[in]keyfull key.
[in]key_lengthfull key length.
[in]payloadthe payload of the new record
[in]payload_lengthlength of payload
Precondition
the page is a snapshot page being constructed
the page doesn't have the key
required_space(key_length, payload_length) <= available_space()

On constructing snapshot pages, we don't need any concurrency control, and we can loosen all restrictions. We thus have this method as an optimized version for snapshot pages. Inlined to make it more efficient.

Definition at line 480 of file hash_page_impl.hpp.

References foedus::storage::hash::DataPageBloomFilter::add(), foedus::assorted::align8(), ASSERT_ND, ASSUME_ALIGNED, available_space(), get_record_count(), get_slot(), foedus::storage::hash::HashDataPage::Slot::hash_, foedus::storage::PageHeader::increment_key_count(), foedus::storage::hash::HashDataPage::Slot::key_length_, foedus::storage::kPageSize, next_offset(), foedus::storage::hash::HashDataPage::Slot::offset_, foedus::storage::hash::HashDataPage::Slot::payload_length_, foedus::storage::hash::HashDataPage::Slot::physical_record_length_, record_from_offset(), required_space(), foedus::storage::PageHeader::snapshot_, foedus::storage::hash::HashDataPage::Slot::tid_, and foedus::xct::RwLockableXctId::xct_id_.

487  {
488  ASSERT_ND(header_.snapshot_);
489  ASSERT_ND(available_space() >= required_space(key_length, payload_length));
490  ASSERT_ND(reinterpret_cast<uintptr_t>(this) % kPageSize == 0);
491  ASSERT_ND(reinterpret_cast<uintptr_t>(key_arg) % 8 == 0);
492  ASSERT_ND(reinterpret_cast<uintptr_t>(payload_arg) % 8 == 0);
493 
494  const void* key = ASSUME_ALIGNED(key_arg, 8U);
495  const void* payload = ASSUME_ALIGNED(payload_arg, 8U);
497  Slot& slot = get_slot(index);
498  slot.tid_.xct_id_ = xct_id;
499  slot.offset_ = next_offset();
500  slot.hash_ = hash;
501  slot.key_length_ = key_length;
502  uint16_t aligned_key_length = assorted::align8(key_length);
503  slot.physical_record_length_ = aligned_key_length + assorted::align8(payload_length);
504  slot.payload_length_ = payload_length;
505 
506  ASSERT_ND(reinterpret_cast<uintptr_t>(record_from_offset(slot.offset_)) % 8 == 0);
507  char* record = reinterpret_cast<char*>(ASSUME_ALIGNED(record_from_offset(slot.offset_), 8U));
508 
509  ASSERT_ND(key_length > 0);
510  std::memcpy(record, key, key_length);
511  if (key_length != aligned_key_length) {
512  std::memset(record + key_length, 0, aligned_key_length - key_length % 8);
513  }
514 
515  if (payload_length > 0) {
516  char* dest = reinterpret_cast<char*>(ASSUME_ALIGNED(record + aligned_key_length, 8U));
517  uint16_t aligned_payload_length = assorted::align8(payload_length);
518  std::memcpy(dest, payload, payload_length);
519  if (key_length != aligned_payload_length) {
520  std::memset(dest + payload_length, 0, aligned_payload_length - payload_length);
521  }
522  }
523 
524  bloom_filter_.add(fingerprint);
525 
526  header_.increment_key_count();
527 }
T align8(T value)
8-alignment.
uint16_t next_offset() const
Returns offset of a next record.
void increment_key_count() __attribute__((always_inline))
Definition: page.hpp:318
#define ASSUME_ALIGNED(x, y)
Wraps GCC's __builtin_assume_aligned.
Definition: compiler.hpp:111
const Slot & get_slot(DataPageSlotIndex record) const __attribute__((always_inline))
uint16_t DataPageSlotIndex
Definition: hash_id.hpp:196
static uint16_t required_space(uint16_t key_length, uint16_t payload_length)
returns physical_record_length_ for a new record
char * record_from_offset(uint16_t offset)
void add(const BloomFilterFingerprint &fingerprint) __attribute__((always_inline))
Adds the fingerprint to this bloom filter.
uint16_t available_space() const
Returns usable space in bytes.
uint16_t get_record_count() const __attribute__((always_inline))
bool snapshot_
Whether this page image is of a snapshot page.
Definition: page.hpp:211
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
const uint16_t kPageSize
A constant defining the page size (in bytes) of both snapshot pages and volatile pages.
Definition: storage_id.hpp:45

Here is the call graph for this function:

uint8_t foedus::storage::hash::HashDataPage::get_bin_shifts ( ) const
inline

Definition at line 422 of file hash_page_impl.hpp.

References foedus::storage::PageHeader::masstree_layer_.

Referenced by assert_entries_impl(), foedus::storage::hash::ReserveRecords::create_new_tail_page(), and initialize_volatile_page().

422 { return header_.masstree_layer_; }
uint8_t masstree_layer_
used only in masstree.
Definition: page.hpp:226

Here is the caller graph for this function:

Slot& foedus::storage::hash::HashDataPage::get_new_slot ( DataPageSlotIndex  record)
inline

Definition at line 254 of file hash_page_impl.hpp.

Referenced by foedus::storage::hash::ReserveRecords::append_record_to_page().

254  {
255  // Only in this method, record is okay to be == key_count
256  return reinterpret_cast<Slot*>(this + 1)[-record - 1];
257  }

Here is the caller graph for this function:

const Slot& foedus::storage::hash::HashDataPage::get_slot ( DataPageSlotIndex  record) const
inline

Definition at line 248 of file hash_page_impl.hpp.

Referenced by compare_slot_key(), create_record_in_snapshot(), foedus::storage::hash::ReserveRecords::expand_record(), foedus::storage::hash::ReserveRecords::find_or_create_or_expand(), next_offset(), reserve_record(), and search_key_physical().

248  {
249  return reinterpret_cast<const Slot*>(this + 1)[-record - 1];
250  }

Here is the caller graph for this function:

Slot& foedus::storage::hash::HashDataPage::get_slot ( DataPageSlotIndex  record)
inline

Definition at line 251 of file hash_page_impl.hpp.

251  {
252  return reinterpret_cast<Slot*>(this + 1)[-record - 1];
253  }
const Slot* foedus::storage::hash::HashDataPage::get_slot_address ( DataPageSlotIndex  record) const
inline

same as &get_slot(), but this is more explicit and easier to understand/maintain

Definition at line 259 of file hash_page_impl.hpp.

Referenced by assert_entries_impl(), foedus::storage::hash::operator<<(), foedus::storage::hash::RecordLocation::populate_logical(), foedus::storage::hash::RecordLocation::populate_physical(), foedus::storage::hash::HashStoragePimpl::register_record_write_log(), and foedus::storage::hash::HashStoragePimpl::track_moved_record_search().

259  {
260  return reinterpret_cast<const Slot*>(this + 1) - record - 1;
261  }

Here is the caller graph for this function:

Slot* foedus::storage::hash::HashDataPage::get_slot_address ( DataPageSlotIndex  record)
inline

Definition at line 262 of file hash_page_impl.hpp.

262  {
263  return reinterpret_cast<Slot*>(this + 1) - record - 1;
264  }
SnapshotPagePointer foedus::storage::hash::HashDataPage::get_snapshot_page_id ( ) const
inline

Definition at line 233 of file hash_page_impl.hpp.

References ASSERT_ND, foedus::storage::PageHeader::page_id_, and foedus::storage::PageHeader::snapshot_.

233  {
234  ASSERT_ND(header_.snapshot_);
235  return static_cast<SnapshotPagePointer>(header_.page_id_);
236  }
uint64_t SnapshotPagePointer
Page ID of a snapshot page.
Definition: storage_id.hpp:79
bool snapshot_
Whether this page image is of a snapshot page.
Definition: page.hpp:211
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
uint64_t page_id_
Page ID of this page.
Definition: page.hpp:191
VolatilePagePointer foedus::storage::hash::HashDataPage::get_volatile_page_id ( ) const
inline

Definition at line 229 of file hash_page_impl.hpp.

References ASSERT_ND, foedus::storage::PageHeader::page_id_, and foedus::storage::PageHeader::snapshot_.

Referenced by foedus::storage::hash::ReserveRecords::create_new_record_in_tail_page(), foedus::storage::hash::ReserveRecords::expand_record(), and foedus::storage::hash::ReserveRecords::find_and_lock_spacious_tail().

229  {
230  ASSERT_ND(!header_.snapshot_);
231  return VolatilePagePointer(header_.page_id_);
232  }
bool snapshot_
Whether this page image is of a snapshot page.
Definition: page.hpp:211
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
uint64_t page_id_
Page ID of this page.
Definition: page.hpp:191

Here is the caller graph for this function:

const PageHeader& foedus::storage::hash::HashDataPage::header ( ) const
inline

Definition at line 226 of file hash_page_impl.hpp.

226 { return header_; }
void foedus::storage::hash::HashDataPage::initialize_snapshot_page ( StorageId  storage_id,
SnapshotPagePointer  page_id,
HashBin  bin,
uint8_t  bin_bits,
uint8_t  bin_shifts 
)

Definition at line 93 of file hash_page_impl.cpp.

References ASSERT_ND, foedus::storage::PageHeader::init_snapshot(), foedus::storage::kHashDataPageType, foedus::storage::kPageSize, and protected_set_bin_shifts().

98  {
99  ASSERT_ND(bin_bits + bin_shifts == 64U);
100  std::memset(this, 0, kPageSize);
101  header_.init_snapshot(page_id, storage_id, kHashDataPageType);
102  bin_ = bin;
103  protected_set_bin_shifts(bin_shifts);
104 }
void protected_set_bin_shifts(uint8_t bin_shifts)
this method should be called only in page creation.
void init_snapshot(SnapshotPagePointer page_id, StorageId storage_id, PageType page_type) __attribute__((always_inline))
Definition: page.hpp:301
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
const uint16_t kPageSize
A constant defining the page size (in bytes) of both snapshot pages and volatile pages.
Definition: storage_id.hpp:45

Here is the call graph for this function:

void foedus::storage::hash::HashDataPage::initialize_volatile_page ( StorageId  storage_id,
VolatilePagePointer  page_id,
const Page parent,
HashBin  bin,
uint8_t  bin_shifts 
)

Definition at line 70 of file hash_page_impl.cpp.

References ASSERT_ND, foedus::storage::hash::HashBinRange::contains(), get_bin(), foedus::storage::hash::HashIntermediatePage::get_bin_range(), get_bin_shifts(), foedus::storage::Page::get_header(), foedus::storage::hash::HashIntermediatePage::get_level(), foedus::storage::PageHeader::get_page_type(), foedus::storage::PageHeader::init_volatile(), foedus::storage::kHashDataPageType, foedus::storage::kHashIntermediatePageType, foedus::storage::kPageSize, and protected_set_bin_shifts().

Referenced by foedus::storage::hash::ReserveRecords::create_new_tail_page(), and foedus::storage::hash::hash_data_volatile_page_init().

75  {
76  std::memset(this, 0, kPageSize);
77  header_.init_volatile(page_id, storage_id, kHashDataPageType);
78  bin_ = bin;
79  protected_set_bin_shifts(bin_shifts);
80  ASSERT_ND(parent);
81  if (parent->get_header().get_page_type() == kHashIntermediatePageType) {
82  const HashIntermediatePage* parent_casted
83  = reinterpret_cast<const HashIntermediatePage*>(parent);
84  ASSERT_ND(parent_casted->get_level() == 0);
85  ASSERT_ND(parent_casted->get_bin_range().contains(bin));
86  } else {
87  const HashDataPage* parent_casted = reinterpret_cast<const HashDataPage*>(parent);
88  ASSERT_ND(parent_casted->get_bin() == bin);
89  ASSERT_ND(parent_casted->get_bin_shifts() == bin_shifts);
90  }
91 }
void init_volatile(VolatilePagePointer page_id, StorageId storage_id, PageType page_type) __attribute__((always_inline))
Definition: page.hpp:284
void protected_set_bin_shifts(uint8_t bin_shifts)
this method should be called only in page creation.
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
const uint16_t kPageSize
A constant defining the page size (in bytes) of both snapshot pages and volatile pages.
Definition: storage_id.hpp:45

Here is the call graph for this function:

Here is the caller graph for this function:

bool foedus::storage::hash::HashDataPage::is_locked ( ) const
inline

Definition at line 227 of file hash_page_impl.hpp.

References foedus::storage::PageVersion::is_locked(), and foedus::storage::PageHeader::page_version_.

Referenced by foedus::storage::hash::ReserveRecords::create_new_record_in_tail_page(), foedus::storage::hash::ReserveRecords::create_new_tail_page(), foedus::storage::hash::ReserveRecords::expand_record(), and foedus::storage::hash::ReserveRecords::find_or_create_or_expand().

227 { return header_.page_version_.is_locked(); }
PageVersion page_version_
Used in several storage types as concurrency control mechanism for the page.
Definition: page.hpp:272
bool is_locked() const __attribute__((always_inline))
Definition: page.hpp:138

Here is the call graph for this function:

Here is the caller graph for this function:

uint16_t foedus::storage::hash::HashDataPage::next_offset ( ) const
inline

Returns offset of a next record.

Definition at line 291 of file hash_page_impl.hpp.

References ASSERT_ND, get_record_count(), get_slot(), foedus::storage::hash::HashDataPage::Slot::offset_, and foedus::storage::hash::HashDataPage::Slot::physical_record_length_.

Referenced by foedus::storage::hash::ReserveRecords::append_record_to_page(), available_space(), create_record_in_snapshot(), and reserve_record().

291  {
293  if (count == 0) {
294  return 0;
295  }
296  const Slot& last_slot = get_slot(count - 1);
297  ASSERT_ND((last_slot.offset_ + last_slot.physical_record_length_) % 8 == 0);
298  return last_slot.offset_ + last_slot.physical_record_length_;
299  }
const Slot & get_slot(DataPageSlotIndex record) const __attribute__((always_inline))
uint16_t DataPageSlotIndex
Definition: hash_id.hpp:196
uint16_t get_record_count() const __attribute__((always_inline))
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72

Here is the call graph for this function:

Here is the caller graph for this function:

DualPagePointer& foedus::storage::hash::HashDataPage::next_page ( )
inline

Definition at line 239 of file hash_page_impl.hpp.

239 { return next_page_; }
const DualPagePointer* foedus::storage::hash::HashDataPage::next_page_address ( ) const
inline
DualPagePointer* foedus::storage::hash::HashDataPage::next_page_address ( )
inline

Definition at line 241 of file hash_page_impl.hpp.

241 { return &next_page_; }
HashDataPage& foedus::storage::hash::HashDataPage::operator= ( const HashDataPage other)
delete
void foedus::storage::hash::HashDataPage::protected_set_bin_shifts ( uint8_t  bin_shifts)
inline

this method should be called only in page creation.

bin_shifts is immutable after that. we currently reuse header_.masstree_layer_ (not applicable to hash storage) for bin_shifts to squeeze space.

Definition at line 421 of file hash_page_impl.hpp.

References foedus::storage::PageHeader::masstree_layer_.

Referenced by initialize_snapshot_page(), and initialize_volatile_page().

421 { header_.masstree_layer_ = bin_shifts; }
uint8_t masstree_layer_
used only in masstree.
Definition: page.hpp:226

Here is the caller graph for this function:

const char* foedus::storage::hash::HashDataPage::record_from_offset ( uint16_t  offset) const
inline

Definition at line 276 of file hash_page_impl.hpp.

276 { return data_ + offset; }
void foedus::storage::hash::HashDataPage::release_pages_recursive ( const memory::GlobalVolatilePageResolver page_resolver,
memory::PageReleaseBatch batch 
)

Definition at line 329 of file hash_page_impl.cpp.

References ASSERT_ND, foedus::storage::VolatilePagePointer::clear(), get_bin(), foedus::storage::PageHeader::get_in_layer_level(), header(), foedus::storage::VolatilePagePointer::is_null(), foedus::storage::PageHeader::page_id_, foedus::memory::PageReleaseBatch::release(), release_pages_recursive(), foedus::memory::GlobalVolatilePageResolver::resolve_offset(), foedus::storage::DualPagePointer::volatile_pointer_, and foedus::storage::VolatilePagePointer::word.

Referenced by foedus::storage::hash::HashIntermediatePage::release_pages_recursive(), and release_pages_recursive().

331  {
332  if (!next_page_.volatile_pointer_.is_null()) {
333  HashDataPage* next = reinterpret_cast<HashDataPage*>(
334  page_resolver.resolve_offset(next_page_.volatile_pointer_));
335  ASSERT_ND(next->header().get_in_layer_level() == 0);
336  ASSERT_ND(next->get_bin() == get_bin());
337  next->release_pages_recursive(page_resolver, batch);
338  next_page_.volatile_pointer_.clear();
339  }
340 
341  VolatilePagePointer volatile_id;
342  volatile_id.word = header().page_id_;
343  batch->release(volatile_id);
344 }
VolatilePagePointer volatile_pointer_
Definition: storage_id.hpp:308
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
uint64_t page_id_
Page ID of this page.
Definition: page.hpp:191

Here is the call graph for this function:

Here is the caller graph for this function:

static uint16_t foedus::storage::hash::HashDataPage::required_space ( uint16_t  key_length,
uint16_t  payload_length 
)
inlinestatic

returns physical_record_length_ for a new record

Definition at line 302 of file hash_page_impl.hpp.

References foedus::assorted::align8().

Referenced by foedus::storage::hash::ReserveRecords::append_record_to_page(), foedus::storage::hash::ReserveRecords::create_new_record_in_tail_page(), create_record_in_snapshot(), foedus::storage::hash::ReserveRecords::expand_record(), foedus::storage::hash::ReserveRecords::find_and_lock_spacious_tail(), and reserve_record().

302  {
303  return assorted::align8(key_length) + assorted::align8(payload_length) + sizeof(Slot);
304  }
T align8(T value)
8-alignment.

Here is the call graph for this function:

Here is the caller graph for this function:

DataPageSlotIndex foedus::storage::hash::HashDataPage::reserve_record ( HashValue  hash,
const BloomFilterFingerprint fingerprint,
const void *  key,
uint16_t  key_length,
uint16_t  payload_length 
)

A system transaction that creates a logically deleted record in this page for the given key.

Parameters
[in]hashhash value of the key.
[in]fingerprintBloom Filter fingerprint of the key.
[in]keyfull key.
[in]key_lengthfull key length.
[in]payload_lengththe new record can contain at least this length of payload
Returns
slot index of the newly created record
Precondition
the page is locked
the page must not already contain a record with the exact key except it's moved
required_space(key_length, payload_length) <= available_space()

This method also adds the fingerprint to the bloom filter.

Definition at line 170 of file hash_page_impl.cpp.

References foedus::storage::hash::DataPageBloomFilter::add(), foedus::assorted::align8(), ASSERT_ND, available_space(), get_record_count(), get_slot(), foedus::storage::hash::HashDataPage::Slot::hash_, foedus::storage::PageHeader::increment_key_count(), foedus::storage::PageVersion::is_locked(), foedus::Epoch::kEpochInitialCurrent, foedus::storage::hash::HashDataPage::Slot::key_length_, foedus::assorted::memory_fence_release(), next_offset(), foedus::storage::hash::HashDataPage::Slot::offset_, foedus::storage::PageHeader::page_version_, foedus::storage::hash::HashDataPage::Slot::payload_length_, foedus::storage::hash::HashDataPage::Slot::physical_record_length_, record_from_offset(), required_space(), foedus::xct::RwLockableXctId::reset(), foedus::xct::XctId::set(), foedus::xct::XctId::set_deleted(), foedus::storage::hash::HashDataPage::Slot::tid_, and foedus::xct::RwLockableXctId::xct_id_.

175  {
176  ASSERT_ND(header_.page_version_.is_locked());
177  ASSERT_ND(available_space() >= HashDataPage::required_space(key_length, payload_length));
179  Slot& slot = get_slot(index);
180  slot.offset_ = next_offset();
181  slot.hash_ = hash;
182  slot.key_length_ = key_length;
183  slot.physical_record_length_ = assorted::align8(key_length) + assorted::align8(payload_length);
184  slot.payload_length_ = 0;
185  char* record = record_from_offset(slot.offset_);
186  std::memcpy(record, key, key_length);
187  if (key_length % 8 != 0) {
188  std::memset(record + key_length, 0, 8 - (key_length % 8));
189  }
190  xct::XctId initial_id;
191  initial_id.set(
192  Epoch::kEpochInitialCurrent, // TODO(Hideaki) this should be something else
193  0);
194  initial_id.set_deleted();
195  // FIXME(tzwang): do this in a more meaningful place,
196  // e.g., have something like get_new_slot.
197  slot.tid_.reset();
198  slot.tid_.xct_id_ = initial_id;
199 
200  // we install the fingerprint to bloom filter BEFORE we increment key count.
201  // it's okay for concurrent reads to see false positives, but false negatives are wrong!
202  bloom_filter_.add(fingerprint);
203 
204  // we increment key count AFTER installing the key because otherwise the optimistic read
205  // might see the record but find that the key doesn't match. we need a fence to prevent it.
207  header_.increment_key_count();
208 
209  return index;
210 }
T align8(T value)
8-alignment.
The first epoch (before wrap-around) that might have transactions is ep-3.
Definition: epoch.hpp:77
uint16_t next_offset() const
Returns offset of a next record.
void increment_key_count() __attribute__((always_inline))
Definition: page.hpp:318
const Slot & get_slot(DataPageSlotIndex record) const __attribute__((always_inline))
PageVersion page_version_
Used in several storage types as concurrency control mechanism for the page.
Definition: page.hpp:272
uint16_t DataPageSlotIndex
Definition: hash_id.hpp:196
static uint16_t required_space(uint16_t key_length, uint16_t payload_length)
returns physical_record_length_ for a new record
char * record_from_offset(uint16_t offset)
void add(const BloomFilterFingerprint &fingerprint) __attribute__((always_inline))
Adds the fingerprint to this bloom filter.
uint16_t available_space() const
Returns usable space in bytes.
uint16_t get_record_count() const __attribute__((always_inline))
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
bool is_locked() const __attribute__((always_inline))
Definition: page.hpp:138
void memory_fence_release()
Equivalent to std::atomic_thread_fence(std::memory_order_release).

Here is the call graph for this function:

DataPageSlotIndex foedus::storage::hash::HashDataPage::search_key_physical ( HashValue  hash,
const BloomFilterFingerprint fingerprint,
const void *  key,
KeyLength  key_length,
DataPageSlotIndex  record_count,
DataPageSlotIndex  check_from = 0 
) const

Search for a physical slot that exactly contains the given key.

Parameters
[in]hashhash value of the key.
[in]fingerprintBloom Filter fingerprint of the key.
[in]keyfull key.
[in]key_lengthfull key length.
[in]record_counthow many records this page supposedly contains. See below.
[in]check_fromFrom which record we start searching. If you know that records before this index do definitely not contain the keys, this will speed up the search. The caller must guarantee that none of the records before this index have the key or they have been moved.
Returns
index of the slot that has the key. kSlotNotFound if not found (including the case where an exactly-matched record's TID says it's "moved").
Invariant
hash == hashinate(key, key_length)

If you have acquired record_count in a protected way (after a page lock, which you still keep) then this method is an exact search. Otherwise, a concurrent thread might be now inserting, so this method might have false negative (which is why you should take PageVersion for miss-search). However, no false positive possible.

This method searches for a physical slot no matter whether the record is logically deleted. However, "moved" records are completely ignored.

This method is physical-only. The returend record might be now being modified or moved. The only contract is the returend slot (if not kSlotNotFound) points to a physical record whose key is exactly same as the given one. It is trivially guaranteed because key/hash in physical records in our hash pages are immutable.

Definition at line 106 of file hash_page_impl.cpp.

References ASSERT_ND, foedus::storage::hash::DataPageBloomFilter::contains(), foedus::storage::hash::DataPageBloomFilter::extract_fingerprint(), get_record_count(), get_slot(), foedus::storage::hash::HashDataPage::Slot::hash_, foedus::storage::hash::hashinate(), foedus::xct::XctId::is_moved(), foedus::storage::hash::HashDataPage::Slot::key_length_, foedus::storage::hash::kSlotNotFound, LIKELY, foedus::storage::hash::HashDataPage::Slot::offset_, record_from_offset(), foedus::storage::hash::HashDataPage::Slot::tid_, and foedus::xct::RwLockableXctId::xct_id_.

Referenced by foedus::storage::hash::HashStoragePimpl::locate_record(), foedus::storage::hash::HashStoragePimpl::locate_record_in_snapshot(), foedus::storage::hash::ReserveRecords::search_within_page(), and foedus::storage::hash::HashStoragePimpl::track_moved_record_search().

112  {
113  // invariant checks
114  ASSERT_ND(hash == hashinate(key, key_length));
116  ASSERT_ND(record_count <= get_record_count()); // it must be increasing.
117 
118 #ifndef NDEBUG
119  // Check the invariant on check_from
120  for (uint16_t i = 0; i < check_from; ++i) {
121  const Slot& s = get_slot(i);
122  if (LIKELY(s.hash_ != hash) || s.key_length_ != key_length) {
123  continue;
124  } else if (s.tid_.xct_id_.is_moved()) {
125  continue;
126  }
127 
128  const char* data = record_from_offset(s.offset_);
129  ASSERT_ND(s.key_length_ != key_length || std::memcmp(data, key, key_length) != 0);
130  }
131 #endif // NDEBUG
132 
133  // check bloom filter first.
134  if (!bloom_filter_.contains(fingerprint)) {
135  return kSlotNotFound;
136  }
137 
138  // then most likely this page contains it. let's check one by one.
139  for (uint16_t i = check_from; i < record_count; ++i) {
140  const Slot& s = get_slot(i);
141  if (LIKELY(s.hash_ != hash) || s.key_length_ != key_length) {
142  continue;
143  }
144  // At this point, we don't take read-set (this is a physical search).
145  // We thus mind seeing being-written. The logical check will follow.
146  // We can also simply check is_moved because the flag is guaranteed to remain once set.
147  // But, remember, it might be NOW being moved, so a logical read-set must follow.
148  if (s.tid_.xct_id_.is_moved()) {
149  // not so rare. this happens.
150  DVLOG(1) << "Hash matched, but the record was moved";
151  continue;
152  }
153 
154  const char* data = record_from_offset(s.offset_);
155  if (s.key_length_ == key_length && std::memcmp(data, key, key_length) == 0) {
156  return i;
157  }
158  // hash matched, but key didn't match? wow, that's rare
159  DLOG(INFO) << "Hash matched, but key didn't match. interesting. hash="
160  << assorted::Hex(hash, 16) << ", key="
161  << assorted::HexString(std::string(reinterpret_cast<const char*>(key), key_length))
162  << ", key_slot=" << assorted::HexString(std::string(data, s.key_length_));
163  }
164 
165  // should be 1~2%
166  DVLOG(0) << "Nope, bloom filter contained it, but key not found in this page. false positive";
167  return kSlotNotFound;
168 }
const DataPageSlotIndex kSlotNotFound
Definition: hash_id.hpp:197
bool contains(const BloomFilterFingerprint &fingerprint) const __attribute__((always_inline))
#define LIKELY(x)
Hints that x is highly likely true.
Definition: compiler.hpp:103
const Slot & get_slot(DataPageSlotIndex record) const __attribute__((always_inline))
HashValue hashinate(const void *key, uint16_t key_length)
Calculates hash value for general input.
static BloomFilterFingerprint extract_fingerprint(HashValue fullhash)
char * record_from_offset(uint16_t offset)
uint16_t get_record_count() const __attribute__((always_inline))
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72

Here is the call graph for this function:

Here is the caller graph for this function:

DataPageSlotIndex foedus::storage::hash::HashDataPage::to_slot_index ( const Slot slot) const
inline

Definition at line 266 of file hash_page_impl.hpp.

References ASSERT_ND.

266  {
267  ASSERT_ND(this);
268  ASSERT_ND(slot);
269  int64_t index = reinterpret_cast<const Slot*>(this + 1) - slot - 1;
270  ASSERT_ND(index >= 0);
271  ASSERT_ND(index < static_cast<int64_t>(sizeof(data_) / sizeof(Slot)));
272  return static_cast<DataPageSlotIndex>(index);
273  }
uint16_t DataPageSlotIndex
Definition: hash_id.hpp:196
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72

Friends And Related Function Documentation

std::ostream& operator<< ( std::ostream &  o,
const HashDataPage v 
)
friend

defined in hash_page_debug.cpp.

Definition at line 52 of file hash_page_debug.cpp.

52  {
53  o << "<HashDataPage>";
54  o << std::endl << " <vaddr>" << assorted::Hex(reinterpret_cast<uintptr_t>(&v), 16) << "</vaddr>";
55  o << std::endl << v.header();
56  o << std::endl << " <bin>" << assorted::Hex(v.get_bin()) << "</bin>";
57  uint16_t records = v.get_record_count();
58  o << std::endl << " <record_count>" << records << "</record_count>";
59  o << std::endl << " <next_page>" << v.next_page() << "</next_page>";
60  o << std::endl << " <records>";
61  for (DataPageSlotIndex i = 0; i < records; ++i) {
62  const HashDataPage::Slot* slot = v.get_slot_address(i);
63  o << std::endl << " <record index=\"" << i
64  << "\" hash=\"" << assorted::Hex(slot->hash_, 16)
65  << "\" physical_record_len=\"" << slot->physical_record_length_
66  << "\" key_length_=\"" << slot->key_length_
67  << "\" offset=\"" << slot->offset_
68  << "\" payload_len=\"" << slot->payload_length_
69  << "\">";
70  const char* data = v.record_from_offset(slot->offset_);
71  std::string key(data, slot->key_length_);
72  o << "<key>" << assorted::HexString(key) << "</key>";
73  if (slot->payload_length_ > 0) {
74  std::string payload(data + slot->get_aligned_key_length(), slot->payload_length_);
75  o << "<payload>" << assorted::HexString(payload) << "</payload>";
76  }
77  BloomFilterFingerprint fingerprint = DataPageBloomFilter::extract_fingerprint(slot->hash_);
78  o << fingerprint;
79  o << slot->tid_;
80  o << "</record>";
81  }
82  o << std::endl << " </records>";
83  o << std::endl << " <BloomFilter>" << v.bloom_filter_ << "</BloomFilter>";
84  o << "</HashDataPage>";
85  return o;
86 }
uint16_t DataPageSlotIndex
Definition: hash_id.hpp:196
static BloomFilterFingerprint extract_fingerprint(HashValue fullhash)
friend struct ReserveRecords
friend

Definition at line 157 of file hash_page_impl.hpp.


The documentation for this class was generated from the following files: