libfoedus-core
FOEDUS Core Library
masstree_storage_prefetch.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014-2015, Hewlett-Packard Development Company, LP.
3  * This program is free software; you can redistribute it and/or modify it
4  * under the terms of the GNU General Public License as published by the Free
5  * Software Foundation; either version 2 of the License, or (at your option)
6  * any later version.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11  * more details. You should have received a copy of the GNU General Public
12  * License along with this program; if not, write to the Free Software
13  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14  *
15  * HP designates this particular file as subject to the "Classpath" exception
16  * as provided by HP in the LICENSE.txt file that accompanied this code.
17  */
19 
20 #include <glog/logging.h>
21 
22 #include "foedus/assert_nd.hpp"
25 
26 namespace foedus {
27 namespace storage {
28 namespace masstree {
29 
31  thread::Thread* context,
32  bool vol_on,
33  bool snp_on,
34  KeySlice from,
35  KeySlice to) {
37  VLOG(0) << "Thread-" << context->get_thread_id()
38  << " prefetching " << get_name() << " from=" << from << ", to=" << to;
39 
40  MasstreeIntermediatePage* root_page;
41  CHECK_ERROR_CODE(get_first_root(context, false, &root_page));
42  prefetch_page_l2(root_page);
43  CHECK_ERROR_CODE(prefetch_pages_normalized_recurse(context, vol_on, snp_on, from, to, root_page));
44 
45  watch.stop();
46  VLOG(0) << "Thread-" << context->get_thread_id()
47  << " prefetched " << get_name() << " in " << watch.elapsed_us() << "us";
48  return kErrorCodeOk;
49 }
50 
52  thread::Thread* context,
53  bool vol_on,
54  bool snp_on,
55  KeySlice from,
56  KeySlice to,
57  MasstreePage* p) {
58  if (p->is_empty_range()) {
59  return kErrorCodeOk;
60  }
61 
62  if (p->has_foster_child()) {
63  MasstreePage* minor = context->resolve_cast<MasstreePage>(p->get_foster_minor());
64  MasstreePage* major = context->resolve_cast<MasstreePage>(p->get_foster_major());
65  CHECK_ERROR_CODE(prefetch_pages_normalized_recurse(context, vol_on, snp_on, from, to, minor));
66  CHECK_ERROR_CODE(prefetch_pages_normalized_recurse(context, vol_on, snp_on, from, to, major));
67  return kErrorCodeOk;
68  }
69 
70  uint8_t count = p->get_key_count();
71  if (p->is_border()) {
72  MasstreeBorderPage* page = reinterpret_cast<MasstreeBorderPage*>(p);
73  for (uint8_t i = 0; i < count; ++i) {
74  if (page->does_point_to_layer(i) && page->get_slice(i) >= from && page->get_slice(i) <= to) {
75  DualPagePointer* pointer = page->get_next_layer(i);
76  // next layer. exhaustively read
78  context,
79  pointer,
80  vol_on,
81  snp_on,
84  }
85  }
86  } else {
87  MasstreeIntermediatePage* page = reinterpret_cast<MasstreeIntermediatePage*>(p);
88  for (uint8_t i = 0; i <= count; ++i) {
89  if (i < count && page->get_separator(i) >= from) {
90  continue;
91  }
92  if (i > 0 && page->get_separator(i - 1) > to) {
93  break;
94  }
96  for (uint8_t j = 0; j <= minipage.key_count_; ++j) {
97  if (j < minipage.key_count_ && minipage.separators_[j] >= from) {
98  continue;
99  }
100  if (j > 0 && minipage.separators_[j - 1] > to) {
101  break;
102  }
103  DualPagePointer* pointer = &minipage.pointers_[j];
104  // next layer. exhaustively read
105  CHECK_ERROR_CODE(prefetch_pages_follow(context, pointer, vol_on, snp_on, from, to));
106  }
107  }
108  }
109  return kErrorCodeOk;
110 }
111 
113  thread::Thread* context,
114  DualPagePointer* pointer,
115  bool vol_on,
116  bool snp_on,
117  KeySlice from,
118  KeySlice to) {
119  // first, do we have to cache snapshot page?
120  if (pointer->snapshot_pointer_ != 0) {
121  if (snp_on) {
122  MasstreePage* child;
124  pointer->snapshot_pointer_,
125  reinterpret_cast<Page**>(&child)));
126  prefetch_page_l2(child);
128  context,
129  false,
130  snp_on,
131  from,
132  to,
133  child));
134  }
135  // do we have to install volatile page based on it?
136  if (pointer->volatile_pointer_.is_null() && vol_on) {
137  ASSERT_ND(!to_page(pointer)->get_header().snapshot_);
138  Page* child;
139  CHECK_ERROR_CODE(context->install_a_volatile_page(pointer, &child));
140  }
141  }
142 
143  // then go down
144  if (!pointer->volatile_pointer_.is_null() && vol_on) {
145  ASSERT_ND(!to_page(pointer)->get_header().snapshot_);
146  MasstreePage* child = context->resolve_cast<MasstreePage>(pointer->volatile_pointer_);
147  prefetch_page_l2(child);
149  context,
150  vol_on,
151  snp_on,
152  from,
153  to,
154  child));
155  }
156  return kErrorCodeOk;
157 }
158 
159 } // namespace masstree
160 } // namespace storage
161 } // namespace foedus
ErrorCode prefetch_pages_follow(thread::Thread *context, DualPagePointer *pointer, bool vol_on, bool snp_on, KeySlice from, KeySlice to)
const KeySlice kInfimumSlice
ErrorCode find_or_read_a_snapshot_page(storage::SnapshotPagePointer page_id, storage::Page **out)
Find the given page in snapshot cache, reading it if not found.
Definition: thread.cpp:95
SlotIndex get_key_count() const __attribute__((always_inline))
physical key count (those keys might be deleted) in this page.
KeySlice get_slice(SlotIndex index) const __attribute__((always_inline))
ErrorCode install_a_volatile_page(storage::DualPagePointer *pointer, storage::Page **installed_page)
Installs a volatile page to the given dual pointer as a copy of the snapshot page.
Definition: thread.cpp:107
Represents a pointer to another page (usually a child page).
Definition: storage_id.hpp:271
bool is_empty_range() const __attribute__((always_inline))
An empty-range page, either intermediate or border, never has any entries.
Page * to_page(const void *address)
super-dirty way to obtain Page the address belongs to.
Definition: page.hpp:395
Root package of FOEDUS (Fast Optimistic Engine for Data Unification Services).
Definition: assert_nd.hpp:44
bool is_border() const __attribute__((always_inline))
Represents one thread running on one NUMA core.
Definition: thread.hpp:48
ThreadId get_thread_id() const
Definition: thread.cpp:53
MiniPage & get_minipage(uint8_t index) __attribute__((always_inline))
Represents one border page in Masstree Storage.
DualPagePointer * get_next_layer(SlotIndex index) __attribute__((always_inline))
ErrorCode prefetch_pages_normalized(thread::Thread *context, bool install_volatile, bool cache_snapshot, KeySlice from, KeySlice to)
defined in masstree_storage_prefetch.cpp
uint64_t KeySlice
Each key slice is an 8-byte integer.
Common base of MasstreeIntermediatePage and MasstreeBorderPage.
VolatilePagePointer get_foster_minor() const __attribute__((always_inline))
VolatilePagePointer get_foster_major() const __attribute__((always_inline))
KeySlice get_separator(uint8_t index) const __attribute__((always_inline))
DualPagePointer pointers_[kMaxIntermediateMiniSeparators+1]
VolatilePagePointer volatile_pointer_
Definition: storage_id.hpp:308
0 means no-error.
Definition: error_code.hpp:87
ErrorCode get_first_root(thread::Thread *context, bool for_write, MasstreeIntermediatePage **root)
Root-node related, such as a method to retrieve 1st-root, to grow, etc.
uint64_t stop()
Take another current time tick.
Definition: stop_watch.cpp:35
void prefetch_page_l2(const void *page)
Prefetch a page to L2/L3 cache.
SnapshotPagePointer snapshot_pointer_
Definition: storage_id.hpp:307
Just a marker to denote that the memory region represents a data page.
Definition: page.hpp:334
P * resolve_cast(storage::VolatilePagePointer ptr) const
resolve() plus reinterpret_cast
Definition: thread.hpp:110
KeySlice separators_[kMaxIntermediateMiniSeparators]
Same semantics as separators_ in enclosing class.
bool has_foster_child() const __attribute__((always_inline))
ErrorCode prefetch_pages_normalized_recurse(thread::Thread *context, bool install_volatile, bool cache_snapshot, KeySlice from, KeySlice to, MasstreePage *page)
double elapsed_us() const
Definition: stop_watch.hpp:45
#define CHECK_ERROR_CODE(x)
This macro calls x and checks its returned error code.
Definition: error_code.hpp:155
const KeySlice kSupremumSlice
Represents one intermediate page in Masstree Storage.
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
A high-resolution stop watch.
Definition: stop_watch.hpp:30
bool does_point_to_layer(SlotIndex index) const __attribute__((always_inline))
ErrorCode
Enum of error codes defined in error_code.xmacro.
Definition: error_code.hpp:85