18 #ifndef FOEDUS_XCT_XCT_ID_HPP_
19 #define FOEDUS_XCT_XCT_ID_HPP_
192 uint64_t word = thread_id;
198 return static_cast<uint32_t
>((word >> 32) & 0xFFFFFFFFUL);
201 return static_cast<McsBlockIndex
>(word & 0xFFFFFFFFUL);
212 return assorted::atomic_load_acquire<uint64_t>(&
word_);
215 return assorted::atomic_load_consume<uint64_t>(&
word_);
218 return assorted::atomic_load_seq_cst<uint64_t>(&
word_);
266 assorted::atomic_store_seq_cst<uint64_t>(&
word_, word);
269 assorted::atomic_store_release<uint64_t>(&
word_, word);
430 successor_thread_id_ = 0;
431 successor_block_index_ = 0;
443 assorted::raw_atomic_fetch_and_bitwise_and<uint8_t>(
445 static_cast<uint8_t
>(~kStateBlockedMask));
456 assorted::raw_atomic_fetch_and_bitwise_or<uint8_t>(
470 assorted::raw_atomic_fetch_and_bitwise_and<uint8_t>(
480 uint64_t *address =
reinterpret_cast<uint64_t*
>(
this);
481 uint64_t mask = *
reinterpret_cast<uint64_t*
>(&tmp);
482 assorted::raw_atomic_fetch_and_bitwise_or<uint64_t>(address, mask);
485 return assorted::atomic_load_acquire<uint8_t>(
611 assorted::raw_atomic_fetch_and_bitwise_or<uint32_t>(
616 assorted::raw_atomic_fetch_and_bitwise_or<uint32_t>(
621 assorted::raw_atomic_fetch_and_bitwise_or<uint32_t>(
628 assorted::raw_atomic_fetch_and_bitwise_or<uint32_t>(
632 assorted::raw_atomic_fetch_and_bitwise_or<uint32_t>(
640 assorted::raw_atomic_fetch_and_bitwise_or<uint32_t>(
644 assorted::raw_atomic_fetch_and_bitwise_or<uint32_t>(
649 assorted::raw_atomic_exchange<uint16_t>(
651 static_cast<uint16_t>(kSuccFlagLeaving));
654 assorted::raw_atomic_fetch_and_bitwise_and<uint32_t>(
658 assorted::atomic_store_release<uint64_t>(&next_.
data_, next);
661 return assorted::raw_atomic_compare_exchange_weak<uint64_t>(
662 &next_.
data_, &expected, desired);
665 return assorted::raw_atomic_compare_exchange_strong<uint64_t>(
666 &next_.
data_, &expected, desired);
686 assorted::raw_atomic_fetch_and_bitwise_or<uint32_t>(
691 assorted::raw_atomic_fetch_and_bitwise_and<uint32_t>(
695 assorted::raw_atomic_compare_exchange_strong<uint32_t>(
700 assorted::raw_atomic_compare_exchange_weak<uint32_t>(
705 assorted::raw_atomic_compare_exchange_strong<uint64_t>(
706 &next_.
data_, &expected, desired);
710 assorted::raw_atomic_compare_exchange_weak<uint64_t>(
711 &next_.
data_, &expected, desired);
715 return assorted::raw_atomic_exchange<uint32_t>(&next_.
components_.
id_, id);
718 return assorted::raw_atomic_compare_exchange_strong<uint32_t>(
722 return assorted::raw_atomic_compare_exchange_weak<uint32_t>(
726 return assorted::raw_atomic_compare_exchange_weak<uint32_t>(
730 return assorted::raw_atomic_compare_exchange_strong<uint32_t>(
734 assorted::raw_atomic_compare_exchange_weak<uint32_t>(
743 return assorted::atomic_load_acquire<uint32_t>(&pred_.
components_.
id_);
746 return assorted::atomic_load_acquire<uint32_t>(&next_.
components_.
id_);
749 return assorted::atomic_load_acquire<uint64_t>(&next_.
data_);
752 assorted::atomic_store_release<uint32_t>(&pred_.
components_.
id_, id);
755 assorted::atomic_store_release<uint32_t>(&next_.
components_.
id_, id);
758 return assorted::raw_atomic_exchange<uint32_t>(&pred_.
components_.
id_, id);
809 assorted::raw_atomic_fetch_add<uint16_t>(&
nreaders_, 1);
812 return assorted::raw_atomic_fetch_add<uint16_t>(&
nreaders_, -1);
815 return assorted::atomic_load_acquire<uint16_t>(&
nreaders_);
820 return assorted::atomic_load_acquire<thread::ThreadId>(&
next_writer_) != kNextWriterNone;
826 return assorted::atomic_load_acquire<thread::ThreadId>(&
next_writer_);
829 return assorted::raw_atomic_exchange<thread::ThreadId>(&
next_writer_, id);
832 return assorted::raw_atomic_compare_exchange_weak<thread::ThreadId>(
836 return assorted::raw_atomic_compare_exchange_strong<thread::ThreadId>(
840 return assorted::raw_atomic_exchange<uint32_t>(&
tail_, new_tail);
843 return assorted::raw_atomic_compare_exchange_strong<uint32_t>(&
tail_, &expected, desired);
846 return assorted::raw_atomic_compare_exchange_weak<uint32_t>(&
tail_, &expected, desired);
850 McsBlockIndex tail_waiter_block) {
852 return static_cast<uint32_t
>(tail_waiter) << 16 | (tail_waiter_block & 0xFFFFU);
855 return assorted::atomic_load_acquire<uint32_t>(&
tail_);
879 lock_id_(lock_id), block_index_(block) {}
961 data_ =
static_cast<uint64_t
>(epoch_int) << 32 | ordinal;
967 return (
data_ & kXctIdMaskEpoch) >> 32;
971 data_ = (
data_ & ~kXctIdMaskEpoch) | (static_cast<uint64_t>(epoch_int) << 32);
978 return static_cast<uint32_t
>(
data_);
982 data_ = (
data_ & (~kXctIdMaskOrdinal)) | ordinal;
997 Epoch other_epoch = other.get_epoch();
1000 if (this_epoch < other_epoch) {
1039 return (assorted::atomic_load_acquire<uint64_t>(&
data_) & kXctIdBeingWrittenBit) != 0; }
1045 return (
data_ & (kXctIdMovedBit | kXctIdNextLayerBit)) != 0;
1060 if (!other.is_valid()) {
1162 bool acquire_now =
true,
1163 bool non_racy_acquire =
false);
1170 void acquire(
bool non_racy_acquire);
1191 uint64_t copied_data = assorted::atomic_load_acquire<uint64_t>(&
data_);
1192 if (
UNLIKELY(copied_data & kXctIdBeingWrittenBit)) {
1193 while (copied_data & kXctIdBeingWrittenBit) {
1194 copied_data = assorted::atomic_load_acquire<uint64_t>(&
data_);
1198 ret.
data_ = copied_data;
1209 uintptr_t lock_ptr);
1219 uint64_t local_page_index,
1220 uintptr_t lock_ptr) {
1222 return (numa_node << 48) | (local_page_index * kLockPageSize + in_page_offset);
1243 const UniversalLockId universal_lock_id);
1252 #endif // FOEDUS_XCT_XCT_ID_HPP_
bool operator!=(const McsWwBlockData &other) const
void reset_atomic() __attribute__((always_inline))
bool cas_pred_id_strong(uint32_t expected, uint32_t desired)
void reset_release() __attribute__((always_inline))
uint32_t cas_val_next_flag_strong(uint32_t expected, uint32_t desired)
uint64_t cas_val_next_strong(uint64_t expected, uint64_t desired)
bool operator==(const XctId &other) const __attribute__((always_inline))
static const uint8_t kSuccessorClassNone
void reset_guest_id_release()
void reset(thread::ThreadId tail_waiter, McsBlockIndex tail_waiter_block) __attribute__((always_inline))
used only for initial_lock()
void reset() __attribute__((always_inline))
used only while page initialization
static McsBlockIndex decompose_block(uint64_t word) __attribute__((always_inline))
void set_next_flag_reader_successor()
taken_mode_: we took a read-lock, not write-lock yet.
static const uint8_t kStateClassWriterFlag
bool has_successor_atomic() const __attribute__((always_inline))
uint64_t cas_val_next_weak(uint64_t expected, uint64_t desired)
void reset_atomic(thread::ThreadId tail_waiter, McsBlockIndex tail_waiter_block) __attribute__((always_inline))
void set_epoch(Epoch epoch) __attribute__((always_inline))
bool is_guest_acquire() const __attribute__((always_inline))
thread::ThreadId xchg_next_writer(thread::ThreadId id)
uint32_t read_next_flags()
void set_successor_class_writer()
static uint32_t to_tail_int(thread::ThreadId tail_waiter, McsBlockIndex tail_waiter_block)
void set_moved() __attribute__((always_inline))
uint64_t get_word_consume() const __attribute__((always_inline))
void init_common() __attribute__((always_inline))
void reset() __attribute__((always_inline))
used only while page initialization
void clear_successor_atomic() __attribute__((always_inline))
McsWwBlockData get_tail_atomic() const __attribute__((always_inline))
McsWwLock * get_key_lock() __attribute__((always_inline))
void ownerless_initial_lock()
The followings are implemented in thread_pimpl.cpp along with the above methods, but these don't use ...
Exclusive-only (WW) MCS lock classes.
const uint64_t kXctIdBeingWrittenBit
void ownerless_release_lock()
bool is_moved() const __attribute__((always_inline))
void set_deleted() __attribute__((always_inline))
const uint64_t kXctIdMovedBit
bool next_flag_is_granted()
bool cas_next_writer_weak(thread::ThreadId expected, thread::ThreadId desired)
#define CXX11_NULLPTR
Used in public headers in place of "nullptr" of C++11.
void reset_release(thread::ThreadId tail_waiter, McsBlockIndex tail_waiter_block) __attribute__((always_inline))
void set_next_writer(thread::ThreadId thread_id)
static const uint32_t kPredFlagClassMask
bool is_valid_acquire() const __attribute__((always_inline))
RwLockableXctId * new_owner_address_
bool next_flag_is_waiting()
bool timeout_granted(int32_t timeout)
Epoch values wrap around at this value.
uint64_t get_word_acquire() const __attribute__((always_inline))
uint64_t get_word_atomic() const __attribute__((always_inline))
bool has_writer_successor()
static const uint32_t kSuccFlagWaiting
bool operator!=(const XctId &other) const __attribute__((always_inline))
bool has_successor_relaxed() const __attribute__((always_inline))
setter/getter for successor_.
McsWwBlockData copy_consume() const __attribute__((always_inline))
Root package of FOEDUS (Fast Optimistic Engine for Data Unification Services).
thread::ThreadId get_tail_waiter() const __attribute__((always_inline))
This is a "relaxed" check.
struct foedus::xct::McsRwExtendedBlock::Field::Components components_
Transaction ID, a 128-bit data to manage record versions and provide locking mechanism.
uint32_t get_successor_thread_id_relaxed() const __attribute__((always_inline))
Carefully use this! In some places you must call copy_once() then call this on the copy...
McsRwLock & operator=(const McsRwLock &other)=delete
union foedus::xct::McsRwSimpleBlock::Self self_
void set_notdeleted() __attribute__((always_inline))
void hotter(thread::Thread *context) const
TrackMovedRecordResult(RwLockableXctId *new_owner_address, char *new_payload_address)
Represents one thread running on one NUMA core.
Typedefs of ID types used in thread package.
RwLockableXctId * from_universal_lock_id(const memory::GlobalVolatilePageResolver &resolver, const UniversalLockId universal_lock_id)
Always use this method rather than doing the conversion yourself.
Epoch::EpochInteger get_epoch_int() const __attribute__((always_inline))
const uint64_t kXctIdMaskSerializer
void set_successor_release(thread::ThreadId thread_id, McsBlockIndex block) __attribute__((always_inline))
bool pred_flag_is_granted()
static const uint8_t kStateFinalizedMask
static const uint32_t kSuccFlagBusy
uint32_t EpochInteger
Unsigned integer representation of epoch.
void set_pred_flag_granted()
Result of track_moved_record().
bool before(const XctId &other) const __attribute__((always_inline))
Returns if this XctId is before other in serialization order, meaning this is either an invalid (unus...
bool successor_is_ready()
void set_release(uint32_t thread_id, McsBlockIndex block) __attribute__((always_inline))
static const int32_t kTimeoutZero
Forward declarations of classes in root package.
Persistent status part of Transaction ID.
bool is_keylocked() const __attribute__((always_inline))
static const uint64_t kSuccReleased
void unset_next_flag_busy()
static const uint32_t kPredFlagWaiting
Pred flags: |—31—|--—|—0—| |my class|empty|waiting|.
void set_next_flag_granted()
Reader-writer (RW) MCS lock classes.
const LockListPosition kLockListPositionInvalid
void release()
Release the lock if acquired.
XctId xct_id_
the second 64bit: Persistent status part of TID.
void set_next_flag_no_successor()
uint32_t get_ordinal() const __attribute__((always_inline))
Snapshot isolation (SI), meaning the transaction reads a consistent and complete image of the databas...
McsBlockIndex block_index_
the queue node we pushed.
void set_next_flag_busy_granted()
void set_relaxed(uint32_t thread_id, McsBlockIndex block) __attribute__((always_inline))
bool is_blocked() __attribute__((always_inline))
bool next_flag_has_successor()
uint32_t cas_val_pred_id_weak(uint32_t expected, uint32_t desired)
void increment_nreaders()
bool next_flag_is_leaving_granted()
bool is_guest_relaxed() const __attribute__((always_inline))
bool cas_next_id_weak(uint32_t expected, uint32_t desired)
const uint64_t kXctIdDeletedBit
struct foedus::xct::McsRwSimpleBlock::Self::Components components_
bool has_next_writer() const
bool needs_track_moved() const __attribute__((always_inline))
bool is_valid() const __attribute__((always_inline))
uint32_t get_thread_id_relaxed() const __attribute__((always_inline))
Carefully use this! In some places you must call get_word_once() then call this on the copy...
A few macros and helper methods related to byte endian-ness.
uint32_t xchg_pred_id(uint32_t id)
uint32_t read_pred_flags()
bool acquired_
whether we immediately acquired the lock or not
bool cas_next_strong(uint64_t expected, uint64_t desired)
McsWwBlockData copy_acquire() const __attribute__((always_inline))
McsBlockIndex get_tail_waiter_block() const
void unblock() __attribute__((always_inline))
static const uint8_t kStateBlockedMask
taken_mode_: Not taken the lock yet.
UniversalLockId rw_lock_to_universal_lock_id(const memory::GlobalVolatilePageResolver &resolver, McsRwLock *lock)
void set_next_id(uint32_t id)
void clear() __attribute__((always_inline))
McsRwLock lock_
the first 64bit: Locking part of TID
uintptr_t UniversalLockId
Universally ordered identifier of each lock.
static const uint32_t kSuccFlagSuccessorNone
McsWwBlockData get_tail_once() const __attribute__((always_inline))
bool cas_next_writer_strong(thread::ThreadId expected, thread::ThreadId desired)
bool pred_flag_is_waiting()
static const uint32_t kSuccFlagSuccessorReader
void set(Epoch::EpochInteger epoch_int, uint32_t ordinal)
Zero is always reserved for invalid epoch.
The MCS reader-writer lock variant of LockableXctId.
bool is_deleted() const __attribute__((always_inline))
friend std::ostream & operator<<(std::ostream &o, const XctId &v)
void clear_successor_release() __attribute__((always_inline))
thread::ThreadId successor_thread_id_
bool is_next_layer() const __attribute__((always_inline))
void set_write_complete() __attribute__((always_inline))
bool cas_tail_strong(uint32_t expected, uint32_t desired)
McsWwBlockData get_tail_relaxed() const __attribute__((always_inline))
thread::ThreadId next_writer_
XctId xct_id_
the second 64bit: Persistent status part of TID.
UniversalLockId to_universal_lock_id(storage::VolatilePagePointer page_id, uintptr_t addr)
static uint32_t decompose_thread_id(uint64_t word) __attribute__((always_inline))
const uint64_t kLockPageSize
Must be same as storage::kPageSize.
bool is_valid_relaxed() const __attribute__((always_inline))
bool is_moved() const __attribute__((always_inline))
thread::ThreadId get_next_writer()
static uint64_t combine(uint32_t thread_id, McsBlockIndex block) __attribute__((always_inline))
static const uint32_t kPredFlagWriter
Return value of acquire_async_rw.
UniversalLockId xct_id_to_universal_lock_id(const memory::GlobalVolatilePageResolver &resolver, RwLockableXctId *lock)
friend std::ostream & operator<<(std::ostream &o, const McsRwLock &v)
void store_max(const XctId &other) __attribute__((always_inline))
Kind of std::max(this, other).
bool cas_next_id_strong(uint32_t expected, uint32_t desired)
Forward declarations of classes in storage package.
Pre-allocated MCS block for WW-locks.
McsRwLock * get_key_lock() __attribute__((always_inline))
bool has_successor_acquire() const __attribute__((always_inline))
An exclusive-only (WW) MCS lock data structure.
bool is_being_written() const __attribute__((always_inline))
void acquire(bool non_racy_acquire)
Acquires the lock.
McsWwBlockData copy_once() const __attribute__((always_inline))
uint32_t LockListPosition
Index in a lock-list, either RLL or CLL.
Pre-allocated MCS block for extended version of RW-locks.
static const uint32_t kPredIdAcquired
bool timeout_granted(int32_t timeout)
MCS block classes.
static const uint32_t kSuccIdSuccessorLeaving
bool is_valid_atomic() const __attribute__((always_inline))
void reset() __attribute__((always_inline))
used only while page initialization
McsWwLock & operator=(const McsWwLock &other)=delete
bool is_locked_by_me() const
taken_mode_: we took a write-lock.
void clear_release() __attribute__((always_inline))
Epoch get_epoch() const __attribute__((always_inline))
static const uint32_t kPredFlagGranted
McsBlockIndex get_successor_block_relaxed() const __attribute__((always_inline))
Carefully use this! In some places you must call copy_once() then call this on the copy...
bool is_next_layer() const __attribute__((always_inline))
static const uint32_t kSuccFlagSuccessorWriter
static const uint32_t kSuccFlagMask
const uint64_t kMaxXctOrdinal
Maximum value of in-epoch ordinal.
const uint64_t kXctIdMaskOrdinal
bool is_reader() __attribute__((always_inline))
bool is_deleted() const __attribute__((always_inline))
const uint64_t kMcsGuestId
A special value meaning the lock is held by a non-regular guest that doesn't have a context...
bool is_guest_atomic() const __attribute__((always_inline))
static const uint8_t kSuccessorClassWriter
McsRwAsyncMapping(UniversalLockId lock_id, McsBlockIndex block)
friend std::ostream & operator<<(std::ostream &o, const LockableXctId &v)
friend std::ostream & operator<<(std::ostream &o, const McsWwLock &v)
Debug out operators.
bool is_deleted() const __attribute__((always_inline))
static const uint32_t kPredFlagReader
static const int32_t kTimeoutNever
void set_ordinal(uint32_t ordinal) __attribute__((always_inline))
void increment_ordinal() __attribute__((always_inline))
bool is_guest_consume() const __attribute__((always_inline))
IsolationLevel
Specifies the level of isolation during transaction processing.
bool next_flag_is_direct_granted()
static const uint32_t kSuccFlagLeavingGranted
bool next_flag_has_reader_successor()
void clear_atomic() __attribute__((always_inline))
uint16_t make_blocked_with_reader_successor_state()
void set_combined_release(uint64_t word) __attribute__((always_inline))
static const uint32_t kSuccFlagDirectGranted
bool is_locked() const
This is a "relaxed" check.
#define CXX11_FUNC_DELETE
Used in public headers in place of " = delete" of C++11.
static const uint8_t kStateClassMask
uint32_t make_next_flag_waiting_with_reader_successor()
bool is_valid_consume() const __attribute__((always_inline))
uint32_t McsBlockIndex
Index in thread-local MCS block.
static const uint32_t kSuccFlagLeaving
uint32_t cas_val_next_flag_weak(uint32_t expected, uint32_t desired)
bool needs_track_moved() const __attribute__((always_inline))
is_moved() || is_next_layer()
void set_epoch_int(Epoch::EpochInteger epoch_int) __attribute__((always_inline))
uint16_t ThreadId
Typedef for a global ID of Thread (core), which is unique across NUMA nodes.
static const uint8_t kSuccessorClassReader
bool is_being_written() const __attribute__((always_inline))
static const uint8_t kStateBlockedFlag
McsBlockIndex block_index_
bool cas_next_weak(uint64_t expected, uint64_t desired)
McsWwBlockData get_tail_consume() const __attribute__((always_inline))
void set_combined_atomic(uint64_t word) __attribute__((always_inline))
bool needs_track_moved() const __attribute__((always_inline))
bool cas_tail_weak(uint32_t expected, uint32_t desired)
LockMode
Represents a mode of lock.
void set_successor_atomic(thread::ThreadId thread_id, McsBlockIndex block) __attribute__((always_inline))
static const int32_t kTimeoutNever
void set_atomic(uint32_t thread_id, McsBlockIndex block) __attribute__((always_inline))
bool operator==(const McsWwBlockData &other) const
McsWwLock(thread::ThreadId tail_waiter, McsBlockIndex tail_waiter_block)
Atomic fence methods and load/store with fences that work for both C++11/non-C++11 code...
bool is_being_written() const __attribute__((always_inline))
uint32_t make_next_flag_waiting_with_no_successor()
XctId spin_while_being_written() const __attribute__((always_inline))
Returns a version of this Xid whose being_written flag is off.
char padding_[16-sizeof(McsBlockIndex)-sizeof(UniversalLockId)]
bool has_successor_consume() const __attribute__((always_inline))
bool next_flag_has_writer_successor()
#define STATIC_SIZE_CHECK(desired, actual)
void set_being_written() __attribute__((always_inline))
McsWwBlockData get_tail_acquire() const __attribute__((always_inline))
No guarantee at all for reads, for the sake of best performance and scalability.
const uint64_t kXctIdMaskEpoch
McsBlockIndex get_tail_waiter_block() const __attribute__((always_inline))
This is a "relaxed" check.
uint32_t xchg_tail(uint32_t new_tail)
void ownerless_acquire_lock()
static const thread::ThreadId kNextWriterNone
void set_successor_next_only(thread::ThreadId thread_id, McsBlockIndex block_index)
static const uint32_t kSuccIdNoSuccessor
McsBlockIndex get_block_relaxed() const __attribute__((always_inline))
Carefully use this! In some places you must call get_word_once() then call this on the copy...
#define UNLIKELY(x)
Hints that x is highly likely false.
bool is_moved() const __attribute__((always_inline))
void set_next(uint64_t next)
bool next_flag_is_leaving()
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
McsWwLock lock_
the first 64bit: Locking part of TID
Forward declarations of classes in thread package.
McsWwBlockData copy_atomic() const __attribute__((always_inline))
uint64_t word_
The high 32-bits is thread_id, the low 32-bit is block-index.
McsWwBlockData(uint64_t word)
bool before(const Epoch &other) const
Returns if this epoch is before the given epoch in the sense of distance defined in RFC 1982...
#define ALWAYS_INLINE
A function suffix to hint that the function should always be inlined.
Raw atomic operations that work for both C++11 and non-C++11 code.
void memory_fence_release()
Equivalent to std::atomic_thread_fence(std::memory_order_release).
McsBlockIndex successor_block_index_
bool is_next_layer() const __attribute__((always_inline))
McsWwBlockData(uint32_t thread_id, McsBlockIndex block)
bool is_hot(thread::Thread *context) const
char * new_payload_address_
bool cas_pred_id_weak(uint32_t expected, uint32_t desired)
uint16_t make_blocked_with_no_successor_state()
bool has_reader_successor()
uint64_t get_word_once() const __attribute__((always_inline))
The access_once semantics, which is widely used in linux.
McsWwBlockData successor_
The successor of MCS lock queue after this thread (in other words, the thread that is waiting for thi...
An MCS reader-writer lock data structure.
void set_next_flag_writer_successor()
uint32_t xchg_next_id(uint32_t id)
int compare_epoch_and_orginal(const XctId &other) const __attribute__((always_inline))
Returns -1, 0, 1 when this is less than, same, larger than other in terms of epoch/ordinal.
void set_pred_id(uint32_t id)
friend std::ostream & operator<<(std::ostream &o, const RwLockableXctId &v)
bool is_keylocked() const __attribute__((always_inline))
const UniversalLockId kNullUniversalLockId
This never points to a valid lock, and also evaluates less than any vaild alocks. ...
Protects against all anomalies in all situations.
static const uint32_t kSuccFlagSuccessorClassMask
void set_next_flag_busy()
thread::ThreadId get_tail_waiter() const
uint16_t decrement_nreaders()
const uint64_t kXctIdNextLayerBit
static const uint8_t kStateClassReaderFlag
void set_next_flag_leaving()
void set_next_layer() __attribute__((always_inline))