21 #include <glog/logging.h>
44 ASSERT_ND(logical_buckets <= physical_buckets);
45 ASSERT_ND((logical_buckets & (logical_buckets - 1U)) != 0);
46 return logical_buckets;
51 const uint32_t kOverflowFraction = 128U;
52 const uint32_t kOverflowMinSize = 256U;
53 uint32_t overflow_size = physical_buckets / kOverflowFraction;
54 if (overflow_size <= kOverflowMinSize) {
55 overflow_size = kOverflowMinSize;
62 physical_buckets_(physical_buckets),
63 bucket_div_(logical_buckets_) {
67 : numa_node_(numa_node),
69 hash_func_(physical_buckets),
105 if (i < overflow_buckets_count_ - 1U) {
125 new_bucket.
reset(content, tag);
130 if (!
buckets_[bucket].is_content_set()) {
141 DVLOG(0) <<
"Ohhh, we have to add this to overflow list! This should be really rare."
152 if (new_overflow_entry == 0) {
153 LOG(ERROR) <<
"Oh my god. we consumed all overflow entries, which means we have too many"
172 LOG(INFO) <<
"Snapshot-Cache eviction starts at node-" <<
numa_node_
180 cur = (cur >> 5) << 5;
189 const uint16_t kMaxLoops = 16;
190 for (loops = 0; loops < kMaxLoops; ++loops) {
207 LOG(INFO) <<
"Snapshot-Cache eviction completed at node-" <<
numa_node_
209 <<
", looped-over the whole hashtable for " << loops <<
" times";
218 const uint16_t decrements = 1U << loop;
226 uint32_t cur_cacheline = cur >> 5;
229 const uint16_t kL1PrefetchBatch = 8;
230 const uint16_t kL1PrefetchAhead = 16;
231 for (; cur_cacheline < end_cacheline; ++cur_cacheline) {
232 if (cur_cacheline / kL1PrefetchBatch == 0) {
234 refcounts_ + ((cur_cacheline + kL1PrefetchAhead) << 5),
238 BucketId bucket = cur_cacheline << 5;
241 bool all_zeros =
true;
242 for (uint16_t i = 0; i < 8U; ++i) {
254 for (uint16_t i = 0; i < 32U; ++i) {
255 if (base[i].count_ > 0) {
256 bool still_non_zero = base[i].
decrement(decrements);
257 if (!still_non_zero) {
271 LOG(INFO) <<
"Snapshot-Cache eviction main_loop at node-" <<
numa_node_ <<
", checked "
274 return cur_cacheline << 5;
278 const uint16_t decrements = 1U << loop;
279 uint32_t checked_count = 0;
299 if (!still_non_zero) {
302 prev_entry->
next_ = next;
304 cur_entry->
next_ = evicted_head;
316 if (!still_non_zero) {
320 cur_entry->
next_ = evicted_head;
327 LOG(INFO) <<
"Snapshot-Cache eviction overflow_loop at node-" <<
numa_node_ <<
", checked "
328 << (checked_count) <<
" buckets in " << watch.
elapsed_us() <<
"us";
357 for (
BucketId i = 0; i < end; ++i) {
380 if (batch_size == 0) {
390 for (uint16_t b = 0; b < batch_size; ++b) {
391 if (page_ids[b] == 0) {
393 }
else if (b > 0 && page_ids[b - 1] == page_ids[b]) {
403 for (uint16_t b = 0; b < batch_size; ++b) {
405 if (page_ids[b] == 0) {
407 }
else if (b > 0 && page_ids[b - 1] == page_ids[b]) {
414 BucketId bucket_number = bucket_numbers[b];
void evict_overflow_loop(EvictArgs *args, uint16_t loop)
0x0002 : "GENERAL: Invalid parameter given" .
numa_alloc_onnode() and numa_free().
BucketId clockhand_
We previously stopped eviction here for usual buckets.
soc::SharedMutex overflow_free_buckets_mutex_
The mutex to protect free overflow entries.
CacheOverflowEntry * overflow_buckets_
An entry in the overflow linked list for snapshot cache.
uint32_t BucketId
Offset in hashtable bucket.
memory::AlignedMemory buckets_memory_
ErrorCode install(storage::SnapshotPagePointer page_id, ContentId content)
Called when a cached page is not found.
memory::AlignedMemory refcounts_memory_
Root package of FOEDUS (Fast Optimistic Engine for Data Unification Services).
static PageIdTag get_tag(storage::SnapshotPagePointer page_id) __attribute__((always_inline))
Returns another hash value used to differentiate IDs with similar hash values.
const BucketId logical_buckets_
Number of logical (actually used) buckets in this table.
BucketId get_physical_buckets() const __attribute__((always_inline))
std::ostream & operator<<(std::ostream &o, const HashFunc &v)
uint32_t OverflowPointer
Position in the overflow buckets.
BucketId determine_logical_buckets(BucketId physical_buckets)
Brings error stacktrace information as return value of functions.
void alloc(uint64_t size, uint64_t alignment, AllocType alloc_type, int numa_node) noexcept
Allocate a memory, releasing the current memory if exists.
void add_evicted(ContentId content)
HashFunc(BucketId physical_buckets)
void prefetch_cachelines(const void *address, int cacheline_count)
Prefetch multiple contiguous cachelines to L1 cache.
A simple hash function logic used in snasphot cache.
#define ASSUME_ALIGNED(x, y)
Wraps GCC's __builtin_assume_aligned.
uint32_t determine_overflow_list_size(BucketId physical_buckets)
void increment() __attribute__((always_inline))
#define LIKELY(x)
Hints that x is highly likely true.
OverflowPointer overflow_free_buckets_head_
This forms another singly-linked list of free overflow entries.
PageIdTag get_tag() const
uint64_t evicted_count_
[Out] Number of entries that were actually evicted
const uint16_t kCachelineSize
Byte count of one cache line.
CacheHashtable(BucketId physical_buckets, uint16_t numa_node)
void evict(EvictArgs *args)
Evict some entries from the hashtable.
ErrorCode find_batch(uint16_t batch_size, const storage::SnapshotPagePointer *page_ids, ContentId *out) const
Batched version of find().
bool decrement(uint16_t subtract) __attribute__((always_inline))
returns whether the counter is still non-zero
Max size for find_batch()
BucketId evict_main_loop(EvictArgs *args, BucketId cur, uint16_t loop)
uint32_t PageIdTag
This is a lossy-compressed representation of SnapshotPagePointer used to quickly identify whether the...
ErrorStack verify_single_thread() const
only for debugging.
uint64_t SnapshotPagePointer
Page ID of a snapshot page.
memory::PagePoolOffset ContentId
Offset of the actual page image in PagePool for snapshot pages.
uint64_t stop()
Take another current time tick.
Auto-lock scope object for SharedMutex.
const uint32_t overflow_buckets_count_
CacheRefCount * refcounts_
memory::AlignedMemory overflow_buckets_memory_
void * get_block() const
Returns the memory block.
const uint16_t kHopNeighbors
Starting from the given position, we consider this many buckets for the entry.
uint64_t target_count_
[In] Evicts entries up to about target_count (maybe a bit more or less)
ContentId get_content_id() const
double elapsed_us() const
Hash bucket in cache table.
BucketId get_logical_buckets() const __attribute__((always_inline))
uint32_t overflow_entries_
uint64_t generate_almost_prime_below(uint64_t threshold)
Generate a prime or some number that is almost prime less than the given number.
const ErrorStack kRetOk
Normal return value for no-error case.
const uint16_t numa_node_
Convenient way of writing hex integers to stream.
Stat get_stat_single_thread() const
only for debugging.
void reset(ContentId id=0, PageIdTag tag=0)
#define UNLIKELY(x)
Hints that x is highly likely false.
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
A high-resolution stop watch.
const BucketId physical_buckets_
Number of buckets that are physically allocated, at least kHopNeighbors more than logical...
BucketId get_bucket_number(storage::SnapshotPagePointer page_id) const __attribute__((always_inline))
Returns a bucket number the given page ID should belong to.
void memory_fence_release()
Equivalent to std::atomic_thread_fence(std::memory_order_release).
OverflowPointer next_
Note that we don't have to atomically maintain/follow this pointer thanks to the loose requirements...
OverflowPointer overflow_buckets_head_
This forms a singly-linked list of active overflow entries.
0x0903 : "SPCACHE: Hashtable for snapshot cache got too many overflow entries" .
ErrorCode
Enum of error codes defined in error_code.xmacro.
A loosely maintained reference count for CLOCK algorithm.