20 #include <glog/logging.h>
35 uint32_t count = end - begin;
37 std::memcpy(chunk_ + size_, begin, count *
sizeof(
PagePoolOffset));
42 std::memcpy(destination, chunk_ + (size_ - count), count *
sizeof(
PagePoolOffset));
66 const OffsetAndEpoch* result = std::lower_bound(chunk_, chunk_ + size_, dummy, CompareEpoch());
69 return result - chunk_;
75 for (uint32_t i = 0; i < count; ++i) {
76 destination[i] = chunk_[i].
offset_;
81 std::memmove(chunk_, chunk_ + count, (size_ - count) *
sizeof(
OffsetAndEpoch));
88 for (uint32_t i = 1; i < size_; ++i) {
90 if (
Epoch(chunk_[i - 1U].safe_epoch_) >
Epoch(chunk_[i].safe_epoch_)) {
104 uint64_t memory_size,
106 bool rigorous_page_boundary_check) {
107 pimpl_->
attach(control_block, memory, memory_size, owns, rigorous_page_boundary_check);
130 return pimpl_->
grab(desired_grab_count, chunk);
135 pimpl_->
release(desired_release_count, chunk);
138 pimpl_->
release(desired_release_count, chunk);
141 pimpl_->
release(desired_release_count, chunk);
154 : engine_(engine), numa_node_count_(engine->get_options().thread_.group_count_) {
155 std::memset(chunks_, 0,
sizeof(
ChunkPtr) * 256);
165 chunks_[i] =
nullptr;
171 if (chunks_[numa_node]->full()) {
180 if (chunks_[numa_node]->empty()) {
185 chunks_[numa_node]->size(), chunks_[numa_node]);
195 : engine_(engine), numa_node_count_(engine->get_options().thread_.group_count_), current_node_(0) {
203 if (chunk_.
empty()) {
207 if (current_node_ >= numa_node_count_) {
212 uint32_t grab_count = std::min<uint32_t>(
221 LOG(WARNING) <<
"NUMA node-" << current_node_ <<
" has no more free pages."
222 <<
" trying another node..";
223 if (current_node_ == old) {
225 LOG(FATAL) <<
"No NUMA node has any free pages. This situation is so far "
226 " not handled. Aborting";
229 LOG(FATAL) <<
"Unexpected error code.. wtf error="
242 if (chunk_.
empty()) {
247 chunk_.
size(), &chunk_);
253 : engine_(engine), node_count_(engine->get_options().thread_.group_count_) {
255 for (uint16_t node = 0; node < node_count_; ++node) {
269 if (chunks_[node].empty()) {
271 uint32_t grab_count = std::min<uint32_t>(
276 LOG(FATAL) <<
"NUMA node " <<
static_cast<int>(node) <<
" has no free pages. This situation "
277 " is so far not handled in DivvyupPageGrabBatch. Aborting";
279 LOG(FATAL) <<
"Unexpected error code.. wtf error="
284 ret.
set(node, chunks_[node].pop_back());
290 if (node_count_ == 1U) {
293 node = cur * node_count_ / total;
299 for (uint16_t node = 0; node < node_count_; ++node) {
300 if (chunks_[node].empty()) {
304 chunks_[node].size(), chunks_ + node);
void attach(PagePoolControlBlock *control_block, void *memory, uint64_t memory_size, bool owns, bool rigorous_page_boundary_check)
void attach(PagePoolControlBlock *control_block, void *memory, uint64_t memory_size, bool owns, bool rigorous_page_boundary_check)
~RoundRobinPageGrabBatch()
Epoch::EpochInteger safe_epoch_
void move_to(PagePoolOffset *destination, uint32_t count)
std::string get_debug_pool_name() const
Page pool for volatile read/write store (VolatilePage) and the read-only bufferpool (SnapshotPage)...
Root package of FOEDUS (Fast Optimistic Engine for Data Unification Services).
ErrorCode grab_one(PagePoolOffset *offset)
Grab only one page.
uint32_t PagePoolOffset
Offset in PagePool that compactly represents the page address (unlike 8 bytes pointer).
void set_debug_pool_name(const std::string &name)
void release_one(PagePoolOffset offset)
Returns only one page.
Represents a pointer to a volatile page with modification count for preventing ABA.
storage::VolatilePagePointer grab(thread::ThreadGroupId node)
Grabs an in-memory page in specified NUMA node.
ErrorStack uninitialize() override final
Typical implementation of Initializable::uninitialize() that provides uninitialize-once semantics...
storage::Page * pool_base_
Just an auxiliary variable to the beginning of the pool.
Brings error stacktrace information as return value of functions.
uint64_t free_pool_capacity_
Size of free_pool_.
void release_one(PagePoolOffset offset)
ErrorCode grab(uint32_t desired_grab_count, PagePoolOffsetChunk *chunk)
Adds the specified number of free pages to the chunk.
ErrorCode grab(uint32_t desired_grab_count, PagePoolOffsetChunk *chunk)
Max number of pointers to pack.
ErrorStack uninitialize() override
An idempotent method to release all resources of this object, if any.
uint32_t get_recommended_pages_per_grab() const
const char * get_error_name(ErrorCode code)
Returns the names of ErrorCode enum defined in error_code.xmacro.
PagePool::Stat get_stat() const
storage::Page * get_base() const
ErrorStack initialize() override
Acquires resources in this object, usually called right after constructor.
DivvyupPageGrabBatch()=delete
void move_to(PagePoolOffset *destination, uint32_t count)
ErrorStack initialize() override final
Typical implementation of Initializable::initialize() that provides initialize-once semantics...
RoundRobinPageGrabBatch()=delete
Pimpl object of PagePool.
Database engine object that holds all resources and provides APIs.
void release(uint32_t desired_release_count, PagePoolOffsetChunk *chunk)
NumaNodeMemoryRef * get_node_memory(foedus::thread::ThreadGroupId group) const
Just a marker to denote that the memory region represents a data page.
ErrorCode grab_one(PagePoolOffset *offset)
void set(uint8_t numa_node, memory::PagePoolOffset offset)
void release_all()
Called at the end to return all remaining pages to their pools.
std::ostream & operator<<(std::ostream &o, const AlignedMemory &v)
0x0301 : "MEMORY : Not enough free volatile pages. Check the config of MemoryOptions" ...
void release(uint32_t desired_release_count, PagePoolOffsetChunk *chunk)
Returns the specified number of free pages from the chunk.
Used to point to an already existing array.
void release_all()
Called at the end to return all remaining pages to their pools.
void move_to(PagePoolOffset *destination, uint32_t count)
Note that the destination is PagePoolOffset* because that's the only usecase.
Shared data in PagePoolPimpl.
PagePoolOffset pop_back()
const LocalPageResolver & get_resolver() const
void push_back(PagePoolOffset pointer)
storage::VolatilePagePointer grab()
Grabs an in-memory page in some NUMA node.
To reduce the overhead of grabbing/releasing pages from pool, we pack this many pointers for each gra...
std::string print_backtrace()
Prints out backtrace.
uint64_t get_memory_size() const
PagePool * get_volatile_pool()
bool is_initialized() const override
Returns whether the object has been already initialized or not.
Used to store an epoch value with each entry in PagePoolOffsetChunk.
const LocalPageResolver & get_resolver() const
Gives an object to resolve an offset in this page pool (thus local) to an actual pointer and vice ver...
uint32_t capacity() const
uint64_t get_free_pool_capacity() const
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
uint64_t get_free_pool_capacity() const
uint8_t ThreadGroupId
Typedef for an ID of ThreadGroup (NUMA node).
uint32_t get_safe_offset_count(const Epoch &threshold) const
Returns the number of offsets (always from index-0) whose safe_epoch_ is strictly-before the given ep...
memory::EngineMemory * get_memory_manager() const
See Memory Manager.
ErrorCode
Enum of error codes defined in error_code.xmacro.
bool is_initialized() const override final
Returns whether the object has been already initialized or not.
std::string get_debug_pool_name() const
uint64_t memory_size_
Byte size of this page pool.
storage::VolatilePagePointer grab_evenly(uint64_t cur, uint64_t total)
Grabs an in-memory page evenly and contiguously from each NUMA node.
void set_debug_pool_name(const std::string &name)
Call this anytime after attach()
EpochInteger value() const
Returns the raw integer representation.