libfoedus-core
FOEDUS Core Library
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
masstree_id.hpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014-2015, Hewlett-Packard Development Company, LP.
3  * This program is free software; you can redistribute it and/or modify it
4  * under the terms of the GNU General Public License as published by the Free
5  * Software Foundation; either version 2 of the License, or (at your option)
6  * any later version.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11  * more details. You should have received a copy of the GNU General Public
12  * License along with this program; if not, write to the Free Software
13  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14  *
15  * HP designates this particular file as subject to the "Classpath" exception
16  * as provided by HP in the LICENSE.txt file that accompanied this code.
17  */
18 #ifndef FOEDUS_STORAGE_MASSTREE_MASSTREE_ID_HPP_
19 #define FOEDUS_STORAGE_MASSTREE_MASSTREE_ID_HPP_
20 
26 #include <stdint.h>
27 
28 #include <cstring>
29 #include <limits>
30 
31 #include "foedus/compiler.hpp"
34 
35 namespace foedus {
36 namespace storage {
37 namespace masstree {
42 typedef uint8_t Layer;
43 
48 const Layer kMaxLayer = 63U;
49 
54 typedef uint8_t InLayerLevel;
55 
63 const InLayerLevel kMaxSaneInLayerLevel = 15U;
64 
69 typedef uint16_t KeyLength;
70 
75 const KeyLength kMaxKeyLength = 1024U;
76 
86 const KeyLength kInitiallyNextLayer = 0xFFFFU;
87 
92 typedef uint16_t PayloadLength;
93 
98 const PayloadLength kMaxPayloadLength = 1024U;
99 
104 typedef uint16_t DataOffset;
105 
110 typedef uint16_t SlotIndex;
111 
126 typedef uint64_t KeySlice;
127 
129 const uint64_t kSliceLen = sizeof(KeySlice);
130 
131 // infimum can be simply 0 because low-fence is inclusive.
132 const KeySlice kInfimumSlice = 0;
133 // Be aware that this might be used as a valid key slice of \e a record.
134 // Instead, as a fence, this is always used as a supremum.
135 // So, for a record key, sanity check is "slice < high_fence || (slice==high_fence==supremum)"
136 // For a fence, sanity check is "slice < high_fence"
137 const KeySlice kSupremumSlice = 0xFFFFFFFFFFFFFFFFULL;
138 
144 const uint32_t kCommonPageHeaderSize = 80U;
145 
151 
156 const uint32_t kBorderPageSlotSize = 32U;
157 
162 const SlotIndex kBorderPageMaxSlots
163  = (kPageSize - kCommonPageHeaderSize - kBorderPageAdditionalHeaderSize)
164  / (kBorderPageSlotSize + sizeof(KeySlice));
165 
170 const uint32_t kBorderPageDataPartSize
172  - kCommonPageHeaderSize
173  - kBorderPageAdditionalHeaderSize
174  - kBorderPageMaxSlots * sizeof(KeySlice);
175 
177 const DataOffset kBorderPageDataPartOffset
178  = kCommonPageHeaderSize
179  + 8U // next_offset_, consecutive_inserts_, dummy_
180  + kBorderPageMaxSlots * sizeof(KeySlice); // slices_
181 
193 template<typename T>
194 inline KeySlice normalize_primitive(T value) {
195  if (std::numeric_limits<T>::is_signed) {
196  return static_cast<uint64_t>(value) - (1ULL << 63);
197  } else {
198  return static_cast<uint64_t>(value);
199  }
200 }
201 
207 template<typename T>
208 inline T denormalize_primitive(KeySlice value) {
209  if (std::numeric_limits<T>::is_signed) {
210  return static_cast<T>(value - (1ULL << 63));
211  } else {
212  return static_cast<T>(value);
213  }
214 }
215 
222 inline KeySlice normalize_be_bytes_full_aligned(const void* be_bytes) {
223  // then efficient.
224  return assorted::read_bigendian<uint64_t>(be_bytes);
225 }
226 
233 inline KeySlice normalize_be_bytes_full(const void* be_bytes) {
234  // otherwise we have to copy to an aligned local variable first
235  uint64_t tmp; // 8-byte stack variable is guaranteed to be 8-byte aligned. at least in GCC.
236  std::memcpy(&tmp, be_bytes, 8);
237  return assorted::read_bigendian<uint64_t>(&tmp);
238 }
239 
247 inline KeySlice normalize_be_bytes_fragment(const void* be_bytes, uint32_t length) {
248  if (length >= 8) {
249  return normalize_be_bytes_full(be_bytes);
250  }
251  uint64_t tmp = 0;
252  std::memcpy(&tmp, be_bytes, length);
253  return assorted::read_bigendian<uint64_t>(&tmp);
254 }
255 
263 inline KeySlice slice_key(const void* be_bytes, KeyLength slice_length) {
264  if (slice_length >= sizeof(KeySlice)) {
265  return normalize_be_bytes_full(be_bytes);
266  } else {
267  return normalize_be_bytes_fragment(be_bytes, slice_length);
268  }
269 }
270 
279 inline KeySlice slice_layer(const void* be_bytes, KeyLength key_length, Layer current_layer) {
280  const KeyLength skipped = current_layer * sizeof(KeySlice);
281  const KeyLength remainder_length = key_length - skipped;
282  const char* casted = reinterpret_cast<const char*>(be_bytes);
283  if (remainder_length >= sizeof(KeySlice)) {
284  return normalize_be_bytes_full(casted + skipped);
285  } else {
286  return normalize_be_bytes_fragment(casted + skipped, remainder_length);
287  }
288 }
289 
298 inline uint16_t count_common_slices(const void* left, const void* right, uint16_t max_slices) {
299  const uint64_t* left_casted = reinterpret_cast<const uint64_t*>(ASSUME_ALIGNED(left, 8));
300  const uint64_t* right_casted = reinterpret_cast<const uint64_t*>(ASSUME_ALIGNED(right, 8));
301  for (uint16_t slices = 0; slices < max_slices; ++slices) {
302  if (left_casted[slices] != right_casted[slices]) {
303  return slices;
304  }
305  }
306  return max_slices;
307 }
308 
314 inline bool is_key_aligned_and_zero_padded(const char* key, KeyLength key_length) {
315  uintptr_t int_address = reinterpret_cast<uintptr_t>(key);
316  if (int_address % 8 != 0) {
317  return false;
318  }
319  if (key_length % 8 != 0) {
320  uint16_t paddings = 8 - (key_length % 8);
321  for (uint16_t i = 0; i < paddings; ++i) {
322  if (key[key_length + i] != 0) {
323  return false;
324  }
325  }
326  }
327  return true;
328 }
329 
330 } // namespace masstree
331 } // namespace storage
332 } // namespace foedus
333 #endif // FOEDUS_STORAGE_MASSTREE_MASSTREE_ID_HPP_
const KeySlice kInfimumSlice
const uint32_t kCommonPageHeaderSize
Size of the base page class (MasstreePage), which is the common header for intermediate and border pa...
const uint64_t kSliceLen
Shorthand for sizeof(KeySlice).
uint16_t PayloadLength
Represents a byte-length of a payload in this package.
Definition: masstree_id.hpp:92
Definitions of IDs in this package and a few related constant values.
uint16_t SlotIndex
Index of a record in a (border) page.
const KeyLength kMaxKeyLength
Max length of a key.
Definition: masstree_id.hpp:75
Root package of FOEDUS (Fast Optimistic Engine for Data Unification Services).
Definition: assert_nd.hpp:44
const uint32_t kBorderPageSlotSize
Byte size of one slot in MasstreeBorderPage excluding slice information.
uint16_t DataOffset
Byte-offset in a page.
const Layer kMaxLayer
Maximum value for Layer.
Definition: masstree_id.hpp:48
const DataOffset kBorderPageDataPartOffset
Offset of data_ member in MasstreeBorderPage.
uint64_t KeySlice
Each key slice is an 8-byte integer.
uint16_t KeyLength
Represents a byte-length of a key in this package.
Definition: masstree_id.hpp:69
#define ASSUME_ALIGNED(x, y)
Wraps GCC's __builtin_assume_aligned.
Definition: compiler.hpp:111
A few macros and helper methods related to byte endian-ness.
KeySlice normalize_be_bytes_full(const void *be_bytes)
Convert a big-endian byte array of at least 8-bytes-length to KeySlice.
const InLayerLevel kMaxSaneInLayerLevel
If InLayerLevel exceeds this value, there must be something wrong.
Definition: masstree_id.hpp:63
KeySlice slice_key(const void *be_bytes, KeyLength slice_length)
Extract a part of a big-endian byte array of given length as KeySlice.
uint8_t Layer
Represents the depth of a B-trie layer.
Definition: masstree_id.hpp:42
const KeyLength kInitiallyNextLayer
A special key length value that denotes the record in a border page was initially a next-layer pointe...
Definition: masstree_id.hpp:86
KeySlice normalize_primitive(T value)
Order-preserving normalization for primitive key types.
const SlotIndex kBorderPageMaxSlots
Maximum number of slots in one MasstreeBorderPage.
const uint32_t kBorderPageDataPartSize
Byte size of the record data part (data_) in MasstreeBorderPage.
const uint32_t kBorderPageAdditionalHeaderSize
Misc header attributes specific to MasstreeBorderPage placed after the common header.
const PayloadLength kMaxPayloadLength
Max length of a payload.
Definition: masstree_id.hpp:98
uint8_t InLayerLevel
Represents the depth of a B-tree node in a B-trie layer.
Definition: masstree_id.hpp:54
bool is_key_aligned_and_zero_padded(const char *key, KeyLength key_length)
Returns if the given key is 8-bytes aligned and also zero-padded to 8-bytes for easier slicing (which...
KeySlice normalize_be_bytes_fragment(const void *be_bytes, uint32_t length)
Convert a big-endian byte array of given length to KeySlice.
T denormalize_primitive(KeySlice value)
Opposite of normalize_primitive().
const KeySlice kSupremumSlice
uint16_t count_common_slices(const void *left, const void *right, uint16_t max_slices)
Returns the number of 8-byte slices that the two strings share as prefix.
KeySlice slice_layer(const void *be_bytes, KeyLength key_length, Layer current_layer)
Extract a part of a big-endian byte array of given length as KeySlice.
const uint16_t kPageSize
A constant defining the page size (in bytes) of both snapshot pages and volatile pages.
Definition: storage_id.hpp:45
KeySlice normalize_be_bytes_full_aligned(const void *be_bytes)
Convert an aligned big-endian byte array of at least 8-bytes-length to KeySlice.