libfoedus-core
FOEDUS Core Library
xct_id.hpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014-2015, Hewlett-Packard Development Company, LP.
3  * This program is free software; you can redistribute it and/or modify it
4  * under the terms of the GNU General Public License as published by the Free
5  * Software Foundation; either version 2 of the License, or (at your option)
6  * any later version.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11  * more details. You should have received a copy of the GNU General Public
12  * License along with this program; if not, write to the Free Software
13  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14  *
15  * HP designates this particular file as subject to the "Classpath" exception
16  * as provided by HP in the LICENSE.txt file that accompanied this code.
17  */
18 #ifndef FOEDUS_XCT_XCT_ID_HPP_
19 #define FOEDUS_XCT_XCT_ID_HPP_
20 #include <stdint.h>
21 
22 #include <iosfwd>
23 
24 #include "foedus/assert_nd.hpp"
25 #include "foedus/compiler.hpp"
26 #include "foedus/cxx11.hpp"
27 #include "foedus/epoch.hpp"
28 #include "foedus/fwd.hpp"
33 #include "foedus/storage/fwd.hpp"
34 #include "foedus/thread/fwd.hpp"
36 
42 namespace foedus {
43 namespace xct {
44 
66 
79 
87 };
88 
95 enum LockMode {
100  kNoLock = 0,
111 };
112 
134 typedef uintptr_t UniversalLockId;
135 
137 const UniversalLockId kNullUniversalLockId = 0;
138 
148 typedef uint32_t LockListPosition;
149 const LockListPosition kLockListPositionInvalid = 0;
150 
151 
153 typedef uint32_t McsBlockIndex;
158 const uint64_t kMcsGuestId = -1;
159 
163  bool acquired_;
170  McsBlockIndex block_index_;
171 };
172 
179 
189  uint64_t word_;
190 
191  static uint64_t combine(uint32_t thread_id, McsBlockIndex block) ALWAYS_INLINE {
192  uint64_t word = thread_id;
193  word <<= 32;
194  word |= block;
195  return word;
196  }
197  static uint32_t decompose_thread_id(uint64_t word) ALWAYS_INLINE {
198  return static_cast<uint32_t>((word >> 32) & 0xFFFFFFFFUL);
199  }
200  static McsBlockIndex decompose_block(uint64_t word) ALWAYS_INLINE {
201  return static_cast<McsBlockIndex>(word & 0xFFFFFFFFUL);
202  }
203 
204  McsWwBlockData() : word_(0) {}
205  explicit McsWwBlockData(uint64_t word) : word_(word) {}
206  McsWwBlockData(uint32_t thread_id, McsBlockIndex block) : word_(combine(thread_id, block)) {}
207 
208  bool operator==(const McsWwBlockData& other) const { return word_ == other.word_; }
209  bool operator!=(const McsWwBlockData& other) const { return word_ != other.word_; }
210 
211  uint64_t get_word_acquire() const ALWAYS_INLINE {
212  return assorted::atomic_load_acquire<uint64_t>(&word_);
213  }
214  uint64_t get_word_consume() const ALWAYS_INLINE {
215  return assorted::atomic_load_consume<uint64_t>(&word_);
216  }
217  uint64_t get_word_atomic() const ALWAYS_INLINE {
218  return assorted::atomic_load_seq_cst<uint64_t>(&word_);
219  }
224  uint64_t get_word_once() const ALWAYS_INLINE { return *(&word_); }
229 
230  bool is_valid_relaxed() const ALWAYS_INLINE { return word_ != 0; }
231  bool is_valid_consume() const ALWAYS_INLINE { return get_word_consume() != 0; }
232  bool is_valid_acquire() const ALWAYS_INLINE { return get_word_acquire() != 0; }
233  bool is_valid_atomic() const ALWAYS_INLINE { return get_word_atomic() != 0; }
234 
235  bool is_guest_relaxed() const ALWAYS_INLINE { return word_ == kMcsGuestId; }
243  inline uint32_t get_thread_id_relaxed() const ALWAYS_INLINE {
245  }
250  inline McsBlockIndex get_block_relaxed() const ALWAYS_INLINE {
251  return McsWwBlockData::decompose_block(word_);
252  }
253  void clear() ALWAYS_INLINE { word_ = 0; }
254  void clear_atomic() ALWAYS_INLINE { assorted::atomic_store_seq_cst<uint64_t>(&word_, 0); }
255  void clear_release() ALWAYS_INLINE { assorted::atomic_store_release<uint64_t>(&word_, 0); }
256  void set_relaxed(uint32_t thread_id, McsBlockIndex block) ALWAYS_INLINE {
257  word_ = McsWwBlockData::combine(thread_id, block);
258  }
259  void set_atomic(uint32_t thread_id, McsBlockIndex block) ALWAYS_INLINE {
260  set_combined_atomic(McsWwBlockData::combine(thread_id, block));
261  }
262  void set_release(uint32_t thread_id, McsBlockIndex block) ALWAYS_INLINE {
264  }
265  void set_combined_atomic(uint64_t word) ALWAYS_INLINE {
266  assorted::atomic_store_seq_cst<uint64_t>(&word_, word);
267  }
268  void set_combined_release(uint64_t word) ALWAYS_INLINE {
269  assorted::atomic_store_release<uint64_t>(&word_, word);
270  }
271 };
272 
274 struct McsWwBlock {
281 
283  inline bool has_successor_relaxed() const ALWAYS_INLINE { return successor_.is_valid_relaxed(); }
284  inline bool has_successor_consume() const ALWAYS_INLINE { return successor_.is_valid_consume(); }
285  inline bool has_successor_acquire() const ALWAYS_INLINE { return successor_.is_valid_acquire(); }
286  inline bool has_successor_atomic() const ALWAYS_INLINE { return successor_.is_valid_atomic(); }
292  return successor_.get_thread_id_relaxed();
293  }
298  inline McsBlockIndex get_successor_block_relaxed() const ALWAYS_INLINE {
299  return successor_.get_block_relaxed();
300  }
301  inline void clear_successor_atomic() ALWAYS_INLINE { successor_.clear_atomic(); }
302  inline void clear_successor_release() ALWAYS_INLINE { successor_.clear_release(); }
303  inline void set_successor_atomic(thread::ThreadId thread_id, McsBlockIndex block) ALWAYS_INLINE {
304  successor_.set_atomic(thread_id, block);
305  }
306  inline void set_successor_release(thread::ThreadId thread_id, McsBlockIndex block) ALWAYS_INLINE {
307  successor_.set_release(thread_id, block);
308  }
309 };
310 
324 struct McsWwLock {
326  McsWwLock(thread::ThreadId tail_waiter, McsBlockIndex tail_waiter_block) {
327  tail_.set_relaxed(tail_waiter, tail_waiter_block);
328  }
329 
330  McsWwLock(const McsWwLock& other) CXX11_FUNC_DELETE;
332 
334  bool is_locked() const { return tail_.is_valid_relaxed(); }
335 
338  void ownerless_initial_lock();
339  void ownerless_acquire_lock();
340  void ownerless_release_lock();
341 
342 
346  McsBlockIndex get_tail_waiter_block() const ALWAYS_INLINE { return tail_.get_block_relaxed(); }
347 
353 
356 
358  tail_.set_combined_release(kMcsGuestId);
359  }
360 
362  void reset(thread::ThreadId tail_waiter, McsBlockIndex tail_waiter_block) ALWAYS_INLINE {
363  tail_.set_relaxed(tail_waiter, tail_waiter_block);
364  }
365 
367  void reset_atomic(thread::ThreadId tail_waiter, McsBlockIndex tail_waiter_block) ALWAYS_INLINE {
368  tail_.set_atomic(tail_waiter, tail_waiter_block);
369  }
371  void reset_release(thread::ThreadId tail_waiter, McsBlockIndex tail_waiter_block) ALWAYS_INLINE {
372  tail_.set_release(tail_waiter, tail_waiter_block);
373  }
374 
375  friend std::ostream& operator<<(std::ostream& o, const McsWwLock& v);
376 
378 };
379 
386 
388  static const uint8_t kStateClassMask = 3U; // [LSB + 1, LSB + 2]
389  static const uint8_t kStateClassReaderFlag = 1U; // LSB binary = 01
390  static const uint8_t kStateClassWriterFlag = 2U; // LSB binary = 10
391 
392  static const uint8_t kStateBlockedFlag = 1U << 7U; // MSB binary = 1
393  static const uint8_t kStateBlockedMask = 1U << 7U;
394 
395  static const uint8_t kStateFinalizedMask = 4U;
396 
397  static const uint8_t kSuccessorClassReader = 1U;
398  static const uint8_t kSuccessorClassWriter = 2U;
399  static const uint8_t kSuccessorClassNone = 3U; // LSB binary 11
400 
401  static const int32_t kTimeoutNever = 0xFFFFFFFF;
402 
403  union Self {
404  uint16_t data_; // +2 => 2
405  struct Components {
407  // state_ covers:
408  // Bit 0-1: my **own** class (am I a reader or writer?)
409  // Bit 2: whether we have checked the successor ("finalized", for readers only)
410  // Bit 7: blocked (am I waiting for the lock or acquired?)
411  uint8_t state_;
412  } components_;
413  } self_;
414  // TODO(tzwang): make these two fields 8 bytes by themselves. Now we need
415  // to worry about sub-word writes (ie have to use atomic ops even when
416  // changing only these two fields because they are in the same byte as data_).
418  McsBlockIndex successor_block_index_; // +4 => 8
419 
420  inline void init_reader() {
421  self_.components_.state_ = kStateClassReaderFlag | kStateBlockedFlag;
422  init_common();
423  }
424  inline void init_writer() {
425  self_.components_.state_ = kStateClassWriterFlag | kStateBlockedFlag;
426  init_common();
427  }
428  inline void init_common() ALWAYS_INLINE {
430  successor_thread_id_ = 0;
431  successor_block_index_ = 0;
433  }
434 
435  inline bool is_reader() ALWAYS_INLINE {
436  return (self_.components_.state_ & kStateClassMask) == kStateClassReaderFlag;
437  }
438  inline uint8_t read_state() {
439  return assorted::atomic_load_acquire<uint8_t>(&self_.components_.state_);
440  }
441  inline void unblock() ALWAYS_INLINE {
442  ASSERT_ND(read_state() & kStateBlockedFlag);
443  assorted::raw_atomic_fetch_and_bitwise_and<uint8_t>(
445  static_cast<uint8_t>(~kStateBlockedMask));
446  }
447  inline bool is_blocked() ALWAYS_INLINE {
448  return read_state() & kStateBlockedMask;
449  }
450  inline bool is_granted() {
451  return !is_blocked();
452  }
453  inline void set_finalized() {
454  ASSERT_ND(is_reader());
456  assorted::raw_atomic_fetch_and_bitwise_or<uint8_t>(
459  }
460  inline bool is_finalized() {
461  ASSERT_ND(is_reader());
462  return read_state() & kStateFinalizedMask;
463  }
464  bool timeout_granted(int32_t timeout);
466  // In case the caller is a reader appending after a writer or waiting reader,
467  // the requester should have already set the successor class to "reader" through by CASing
468  // self_.data_ from [no-successor, blocked] to [reader successor, blocked].
469  ASSERT_ND(self_.components_.successor_class_ == kSuccessorClassNone);
470  assorted::raw_atomic_fetch_and_bitwise_and<uint8_t>(
472  }
473  inline void set_successor_next_only(thread::ThreadId thread_id, McsBlockIndex block_index) {
474  McsRwSimpleBlock tmp;
475  tmp.self_.data_ = 0;
476  tmp.successor_thread_id_ = thread_id;
477  tmp.successor_block_index_ = block_index;
478  ASSERT_ND(successor_thread_id_ == 0);
479  ASSERT_ND(successor_block_index_ == 0);
480  uint64_t *address = reinterpret_cast<uint64_t*>(this);
481  uint64_t mask = *reinterpret_cast<uint64_t*>(&tmp);
482  assorted::raw_atomic_fetch_and_bitwise_or<uint64_t>(address, mask);
483  }
484  inline bool has_successor() {
485  return assorted::atomic_load_acquire<uint8_t>(
486  &self_.components_.successor_class_) != kSuccessorClassNone;
487  }
488  inline bool successor_is_ready() {
489  // Check block index only - thread ID could be 0
490  return assorted::atomic_load_acquire<McsBlockIndex>(&successor_block_index_) != 0;
491  }
492  inline bool has_reader_successor() {
493  uint8_t s = assorted::atomic_load_acquire<uint8_t>(&self_.components_.successor_class_);
494  return s == kSuccessorClassReader;
495  }
496  inline bool has_writer_successor() {
497  uint8_t s = assorted::atomic_load_acquire<uint8_t>(&self_.components_.successor_class_);
498  return s == kSuccessorClassWriter;
499  }
500 
502  // Only using the class bit, which doesn't change, so no need to use atomic ops.
503  uint8_t state = self_.components_.state_ | kStateBlockedFlag;
504  return (uint16_t)state << 8 | kSuccessorClassReader;
505  }
507  uint8_t state = self_.components_.state_ | kStateBlockedFlag;
508  return (uint16_t)state << 8 | kSuccessorClassNone;
509  }
510 };
511 
522  static const uint32_t kPredFlagWaiting = 0U;
523  static const uint32_t kPredFlagGranted = 1U;
524  static const uint32_t kPredFlagReader = 0U;
525  static const uint32_t kPredFlagWriter = 1U << 31;
526  static const uint32_t kPredFlagClassMask = 1U << 31;
527 
528  static const uint32_t kSuccFlagWaiting = 0U;
529  static const uint32_t kSuccFlagLeaving = 1U;
530  static const uint32_t kSuccFlagDirectGranted = 2U;
531  static const uint32_t kSuccFlagLeavingGranted = 3U;
532  static const uint32_t kSuccFlagMask = 3U;
533 
534  static const uint32_t kSuccFlagBusy = 4U;
535 
536  static const uint32_t kSuccFlagSuccessorClassMask = 3U << 30;
537  static const uint32_t kSuccFlagSuccessorReader = 3U << 30;
538  static const uint32_t kSuccFlagSuccessorNone = 0U;
539  static const uint32_t kSuccFlagSuccessorWriter = 1U << 30;
540 
541  static const uint32_t kSuccIdSuccessorLeaving = 0xFFFFFFFFU;
542  static const uint32_t kSuccIdNoSuccessor = 0xFFFFFFFEU;
543 
544  static const uint32_t kPredIdAcquired = 0xFFFFFFFFU;
545 
546 #ifndef NDEBUG
547  static const uint64_t kSuccReleased = 0xFFFFFFFFFFFFFFFFU;
548 #endif
549 
550  /* Special timeouts for instant return and unconditional acquire */
551  static const int32_t kTimeoutNever = 0xFFFFFFFFU;
552  static const int32_t kTimeoutZero = 0U;
553 
554  union Field {
555  uint64_t data_;
556  struct Components {
557  uint32_t flags_;
558  uint32_t id_;
559  } components_;
560  };
561 
564 
565 #ifndef NDEBUG
566  inline void mark_released() {
569  set_next(kSuccReleased);
570  }
571 
572  inline bool is_released() {
573  return get_next() == kSuccReleased;
574  }
575 #endif
576 
577  inline uint32_t read_pred_flags() {
578  return assorted::atomic_load_acquire<uint32_t>(&pred_.components_.flags_);
579  }
580  inline uint32_t read_next_flags() {
581  return assorted::atomic_load_acquire<uint32_t>(&next_.components_.flags_);
582  }
583  inline bool is_writer() { return read_pred_flags() & kPredFlagWriter; }
584  inline bool is_reader() { return !is_writer(); }
585  inline bool pred_flag_is_waiting() {
586  return !pred_flag_is_granted();
587  }
588  inline bool pred_flag_is_granted() {
590  }
592  uint32_t f = (read_next_flags() & kSuccFlagMask);
593  return f == kSuccFlagDirectGranted;
594  }
596  uint32_t f = (read_next_flags() & kSuccFlagMask);
597  return f == kSuccFlagLeavingGranted;
598  }
599  inline bool next_flag_is_granted() {
600  uint32_t f = (read_next_flags() & kSuccFlagMask);
601  return f == kSuccFlagLeavingGranted || f == kSuccFlagDirectGranted;
602  }
603  inline bool next_flag_is_leaving() {
604  return (read_next_flags() & kSuccFlagMask) == kSuccFlagLeaving;
605  }
606  inline bool next_flag_is_waiting() {
607  return (read_next_flags() & kSuccFlagMask) == kSuccFlagWaiting;
608  }
611  assorted::raw_atomic_fetch_and_bitwise_or<uint32_t>(
613  }
616  assorted::raw_atomic_fetch_and_bitwise_or<uint32_t>(
618  }
619  inline void set_pred_flag_granted() {
621  assorted::raw_atomic_fetch_and_bitwise_or<uint32_t>(
623  }
624  inline void set_next_flag_granted() {
627  if (next_flag_is_waiting()) {
628  assorted::raw_atomic_fetch_and_bitwise_or<uint32_t>(
630  } else {
632  assorted::raw_atomic_fetch_and_bitwise_or<uint32_t>(
634  }
635  }
639  if (next_flag_is_waiting()) {
640  assorted::raw_atomic_fetch_and_bitwise_or<uint32_t>(
641  &next_.components_.flags_, kSuccFlagDirectGranted | kSuccFlagBusy);
642  } else {
644  assorted::raw_atomic_fetch_and_bitwise_or<uint32_t>(
645  &next_.components_.flags_, kSuccFlagLeavingGranted | kSuccFlagBusy);
646  }
647  }
648  inline void set_next_flag_leaving() {
649  assorted::raw_atomic_exchange<uint16_t>(
650  reinterpret_cast<uint16_t*>(&next_.components_.flags_),
651  static_cast<uint16_t>(kSuccFlagLeaving));
652  }
654  assorted::raw_atomic_fetch_and_bitwise_and<uint32_t>(
655  &next_.components_.flags_, ~kSuccFlagSuccessorClassMask);
656  }
657  inline void set_next(uint64_t next) {
658  assorted::atomic_store_release<uint64_t>(&next_.data_, next);
659  }
660  inline bool cas_next_weak(uint64_t expected, uint64_t desired) {
661  return assorted::raw_atomic_compare_exchange_weak<uint64_t>(
662  &next_.data_, &expected, desired);
663  }
664  inline bool cas_next_strong(uint64_t expected, uint64_t desired) {
665  return assorted::raw_atomic_compare_exchange_strong<uint64_t>(
666  &next_.data_, &expected, desired);
667  }
668  inline void set_flags_granted() {
671  }
672  inline bool next_flag_has_successor() {
674  }
676  return (read_next_flags() & kSuccFlagSuccessorClassMask) == kSuccFlagSuccessorReader;
677  }
679  return (read_next_flags() & kSuccFlagSuccessorClassMask) == kSuccFlagSuccessorWriter;
680  }
681  inline bool next_flag_is_busy() {
682  return (read_next_flags() & kSuccFlagBusy) == kSuccFlagBusy;
683  }
684  inline void set_next_flag_busy() {
686  assorted::raw_atomic_fetch_and_bitwise_or<uint32_t>(
688  }
689  inline void unset_next_flag_busy() {
691  assorted::raw_atomic_fetch_and_bitwise_and<uint32_t>(
692  &next_.components_.flags_, ~kSuccFlagBusy);
693  }
694  inline uint32_t cas_val_next_flag_strong(uint32_t expected, uint32_t desired) {
695  assorted::raw_atomic_compare_exchange_strong<uint32_t>(
696  &next_.components_.flags_, &expected, desired);
697  return expected;
698  }
699  inline uint32_t cas_val_next_flag_weak(uint32_t expected, uint32_t desired) {
700  assorted::raw_atomic_compare_exchange_weak<uint32_t>(
701  &next_.components_.flags_, &expected, desired);
702  return expected;
703  }
704  inline uint64_t cas_val_next_strong(uint64_t expected, uint64_t desired) {
705  assorted::raw_atomic_compare_exchange_strong<uint64_t>(
706  &next_.data_, &expected, desired);
707  return expected;
708  }
709  inline uint64_t cas_val_next_weak(uint64_t expected, uint64_t desired) {
710  assorted::raw_atomic_compare_exchange_weak<uint64_t>(
711  &next_.data_, &expected, desired);
712  return expected;
713  }
714  inline uint32_t xchg_next_id(uint32_t id) {
715  return assorted::raw_atomic_exchange<uint32_t>(&next_.components_.id_, id);
716  }
717  inline bool cas_next_id_strong(uint32_t expected, uint32_t desired) {
718  return assorted::raw_atomic_compare_exchange_strong<uint32_t>(
719  &next_.components_.id_, &expected, desired);
720  }
721  inline bool cas_next_id_weak(uint32_t expected, uint32_t desired) {
722  return assorted::raw_atomic_compare_exchange_weak<uint32_t>(
723  &next_.components_.id_, &expected, desired);
724  }
725  inline bool cas_pred_id_weak(uint32_t expected, uint32_t desired) {
726  return assorted::raw_atomic_compare_exchange_weak<uint32_t>(
727  &pred_.components_.id_, &expected, desired);
728  }
729  inline bool cas_pred_id_strong(uint32_t expected, uint32_t desired) {
730  return assorted::raw_atomic_compare_exchange_strong<uint32_t>(
731  &pred_.components_.id_, &expected, desired);
732  }
733  inline uint32_t cas_val_pred_id_weak(uint32_t expected, uint32_t desired) {
734  assorted::raw_atomic_compare_exchange_weak<uint32_t>(
735  &pred_.components_.id_, &expected, desired);
736  return expected;
737  }
740  return kSuccFlagWaiting | kSuccFlagSuccessorReader;
741  }
742  inline uint32_t get_pred_id() {
743  return assorted::atomic_load_acquire<uint32_t>(&pred_.components_.id_);
744  }
745  inline uint32_t get_next_id() {
746  return assorted::atomic_load_acquire<uint32_t>(&next_.components_.id_);
747  }
748  inline uint64_t get_next() {
749  return assorted::atomic_load_acquire<uint64_t>(&next_.data_);
750  }
751  inline void set_pred_id(uint32_t id) {
752  assorted::atomic_store_release<uint32_t>(&pred_.components_.id_, id);
753  }
754  inline void set_next_id(uint32_t id) {
755  assorted::atomic_store_release<uint32_t>(&next_.components_.id_, id);
756  }
757  inline uint32_t xchg_pred_id(uint32_t id) {
758  return assorted::raw_atomic_exchange<uint32_t>(&pred_.components_.id_, id);
759  }
760  inline void init_reader() {
762  next_.components_.flags_ = 0;
763  pred_.components_.id_ = next_.components_.id_ = 0;
766  ASSERT_ND(is_reader());
767  }
768  inline void init_writer() {
770  next_.components_.flags_ = 0;
771  pred_.components_.id_ = next_.components_.id_ = 0;
774  ASSERT_ND(is_writer());
775  }
776  bool timeout_granted(int32_t timeout);
777 };
778 
795 struct McsRwLock {
796  static const thread::ThreadId kNextWriterNone = 0xFFFFU;
797 
798  McsRwLock() { reset(); }
799 
800  McsRwLock(const McsRwLock& other) CXX11_FUNC_DELETE;
802 
803  inline void reset() {
804  tail_ = nreaders_ = 0;
805  set_next_writer(kNextWriterNone);
807  }
808  inline void increment_nreaders() {
809  assorted::raw_atomic_fetch_add<uint16_t>(&nreaders_, 1);
810  }
811  inline uint16_t decrement_nreaders() {
812  return assorted::raw_atomic_fetch_add<uint16_t>(&nreaders_, -1);
813  }
814  inline uint16_t nreaders() {
815  return assorted::atomic_load_acquire<uint16_t>(&nreaders_);
816  }
817  inline McsBlockIndex get_tail_waiter_block() const { return tail_ & 0xFFFFU; }
818  inline thread::ThreadId get_tail_waiter() const { return tail_ >> 16U; }
819  inline bool has_next_writer() const {
820  return assorted::atomic_load_acquire<thread::ThreadId>(&next_writer_) != kNextWriterNone;
821  }
822  inline void set_next_writer(thread::ThreadId thread_id) {
823  xchg_next_writer(thread_id); // sub-word access...
824  }
826  return assorted::atomic_load_acquire<thread::ThreadId>(&next_writer_);
827  }
829  return assorted::raw_atomic_exchange<thread::ThreadId>(&next_writer_, id);
830  }
832  return assorted::raw_atomic_compare_exchange_weak<thread::ThreadId>(
833  &next_writer_, &expected, desired);
834  }
836  return assorted::raw_atomic_compare_exchange_strong<thread::ThreadId>(
837  &next_writer_, &expected, desired);
838  }
839  inline uint32_t xchg_tail(uint32_t new_tail) {
840  return assorted::raw_atomic_exchange<uint32_t>(&tail_, new_tail);
841  }
842  inline bool cas_tail_strong(uint32_t expected, uint32_t desired) {
843  return assorted::raw_atomic_compare_exchange_strong<uint32_t>(&tail_, &expected, desired);
844  }
845  inline bool cas_tail_weak(uint32_t expected, uint32_t desired) {
846  return assorted::raw_atomic_compare_exchange_weak<uint32_t>(&tail_, &expected, desired);
847  }
848  static inline uint32_t to_tail_int(
849  thread::ThreadId tail_waiter,
850  McsBlockIndex tail_waiter_block) {
851  ASSERT_ND(tail_waiter_block <= 0xFFFFU);
852  return static_cast<uint32_t>(tail_waiter) << 16 | (tail_waiter_block & 0xFFFFU);
853  }
854  inline uint32_t get_tail_int() {
855  return assorted::atomic_load_acquire<uint32_t>(&tail_);
856  }
857  bool is_locked() const {
858  return (tail_ & 0xFFFFU) != 0 || nreaders_ > 0;
859  }
860 
861  uint32_t tail_; // +4 => 4
862  /* Note that threadId starts from 0, so we use 0xFFFF as the "invalid"
863  * marker, unless we make the lock even larger than 8 bytes. This essentially
864  * limits the largest allowed number of cores we support to 256 sockets x 256
865  * cores per socket - 1.
866  */
868  uint16_t nreaders_; // +2 => 8
869 
870  friend std::ostream& operator<<(std::ostream& o, const McsRwLock& v);
871 };
872 
874  UniversalLockId lock_id_;
875  McsBlockIndex block_index_;
876  char padding_[16 - sizeof(McsBlockIndex) - sizeof(UniversalLockId)];
877 
878  McsRwAsyncMapping(UniversalLockId lock_id, McsBlockIndex block) :
879  lock_id_(lock_id), block_index_(block) {}
880  McsRwAsyncMapping() : lock_id_(kNullUniversalLockId), block_index_(0) {}
881 };
882 
883 const uint64_t kXctIdDeletedBit = 1ULL << 63;
884 const uint64_t kXctIdMovedBit = 1ULL << 62;
885 const uint64_t kXctIdBeingWrittenBit = 1ULL << 61;
886 const uint64_t kXctIdNextLayerBit = 1ULL << 60;
887 const uint64_t kXctIdMaskSerializer = 0x0FFFFFFFFFFFFFFFULL;
888 const uint64_t kXctIdMaskEpoch = 0x0FFFFFFF00000000ULL;
889 const uint64_t kXctIdMaskOrdinal = 0x00000000FFFFFFFFULL;
890 
898 const uint64_t kMaxXctOrdinal = (1ULL << 24) - 1U;
899 
955 struct XctId {
956  XctId() : data_(0) {}
957 
958  void set(Epoch::EpochInteger epoch_int, uint32_t ordinal) {
959  ASSERT_ND(epoch_int < Epoch::kEpochIntOverflow);
960  ASSERT_ND(ordinal <= kMaxXctOrdinal);
961  data_ = static_cast<uint64_t>(epoch_int) << 32 | ordinal;
962  }
963 
965  void set_epoch(Epoch epoch) ALWAYS_INLINE { set_epoch_int(epoch.value()); }
967  return (data_ & kXctIdMaskEpoch) >> 32;
968  }
970  ASSERT_ND(epoch_int < Epoch::kEpochIntOverflow);
971  data_ = (data_ & ~kXctIdMaskEpoch) | (static_cast<uint64_t>(epoch_int) << 32);
972  }
974 
975 
976  uint32_t get_ordinal() const ALWAYS_INLINE {
977  ASSERT_ND(static_cast<uint32_t>(data_) <= kMaxXctOrdinal);
978  return static_cast<uint32_t>(data_);
979  }
980  void set_ordinal(uint32_t ordinal) ALWAYS_INLINE {
981  ASSERT_ND(ordinal <= kMaxXctOrdinal);
982  data_ = (data_ & (~kXctIdMaskOrdinal)) | ordinal;
983  }
985  uint32_t ordinal = get_ordinal();
986  set_ordinal(ordinal + 1U);
987  }
994  // compare epoch
995  if (get_epoch_int() != other.get_epoch_int()) {
996  Epoch this_epoch = get_epoch();
997  Epoch other_epoch = other.get_epoch();
998  ASSERT_ND(this_epoch.is_valid());
999  ASSERT_ND(other_epoch.is_valid());
1000  if (this_epoch < other_epoch) {
1001  return -1;
1002  } else {
1003  ASSERT_ND(this_epoch > other_epoch);
1004  return 1;
1005  }
1006  }
1007 
1008  // if the epoch is the same, compare in_epoch_ordinal_.
1009  ASSERT_ND(get_epoch() == other.get_epoch());
1010  if (get_ordinal() < other.get_ordinal()) {
1011  return -1;
1012  } else if (get_ordinal() > other.get_ordinal()) {
1013  return 1;
1014  } else {
1015  return 0;
1016  }
1017  }
1018 
1020  void set_write_complete() ALWAYS_INLINE { data_ &= (~kXctIdBeingWrittenBit); }
1028  void set_notdeleted() ALWAYS_INLINE { data_ &= (~kXctIdDeletedBit); }
1031  // Delete-bit has no meaning for a next-layer record. To avoid confusion, turn it off.
1032  data_ = (data_ & (~kXctIdDeletedBit)) | kXctIdNextLayerBit;
1033  }
1034  // note, we should not need this method because becoming a next-layer-pointer is permanent.
1035  // we never revert it, which simplifies a concurrency control.
1036  // void set_not_next_layer() ALWAYS_INLINE { data_ &= (~kXctIdNextLayerBit); }
1037 
1039  return (assorted::atomic_load_acquire<uint64_t>(&data_) & kXctIdBeingWrittenBit) != 0; }
1040  bool is_deleted() const ALWAYS_INLINE { return (data_ & kXctIdDeletedBit) != 0; }
1041  bool is_moved() const ALWAYS_INLINE { return (data_ & kXctIdMovedBit) != 0; }
1042  bool is_next_layer() const ALWAYS_INLINE { return (data_ & kXctIdNextLayerBit) != 0; }
1045  return (data_ & (kXctIdMovedBit | kXctIdNextLayerBit)) != 0;
1046  }
1047 
1048 
1049  bool operator==(const XctId &other) const ALWAYS_INLINE { return data_ == other.data_; }
1050  bool operator!=(const XctId &other) const ALWAYS_INLINE { return data_ != other.data_; }
1051 
1059  void store_max(const XctId& other) ALWAYS_INLINE {
1060  if (!other.is_valid()) {
1061  return;
1062  }
1063 
1064  if (before(other)) {
1065  operator=(other);
1066  }
1067  }
1068 
1074  bool before(const XctId &other) const ALWAYS_INLINE {
1075  ASSERT_ND(other.is_valid());
1076  // compare epoch, then ordinal
1077  if (get_epoch_int() != other.get_epoch_int()) {
1078  return get_epoch().before(other.get_epoch());
1079  }
1080  return get_ordinal() < other.get_ordinal();
1081  }
1082 
1084 
1085  friend std::ostream& operator<<(std::ostream& o, const XctId& v);
1086 
1087  uint64_t data_;
1088 };
1089 
1112 
1114  bool is_keylocked() const ALWAYS_INLINE { return lock_.is_locked(); }
1115  bool is_deleted() const ALWAYS_INLINE { return xct_id_.is_deleted(); }
1116  bool is_moved() const ALWAYS_INLINE { return xct_id_.is_moved(); }
1117  bool is_next_layer() const ALWAYS_INLINE { return xct_id_.is_next_layer(); }
1118  bool needs_track_moved() const ALWAYS_INLINE { return xct_id_.needs_track_moved(); }
1119  bool is_being_written() const ALWAYS_INLINE { return xct_id_.is_being_written(); }
1120 
1123  lock_.reset();
1124  xct_id_.data_ = 0;
1125  }
1126  friend std::ostream& operator<<(std::ostream& o, const LockableXctId& v);
1127 };
1128 
1135 
1138 
1140  bool is_keylocked() const ALWAYS_INLINE { return lock_.is_locked(); }
1141  bool is_deleted() const ALWAYS_INLINE { return xct_id_.is_deleted(); }
1142  bool is_moved() const ALWAYS_INLINE { return xct_id_.is_moved(); }
1143  bool is_next_layer() const ALWAYS_INLINE { return xct_id_.is_next_layer(); }
1144  bool needs_track_moved() const ALWAYS_INLINE { return xct_id_.needs_track_moved(); }
1145  bool is_being_written() const ALWAYS_INLINE { return xct_id_.is_being_written(); }
1146  bool is_hot(thread::Thread* context) const;
1147  void hotter(thread::Thread* context) const;
1148 
1151  lock_.reset();
1152  xct_id_.data_ = 0;
1153  }
1154  friend std::ostream& operator<<(std::ostream& o, const RwLockableXctId& v);
1155 };
1156 
1158  public:
1161  McsWwLock* lock,
1162  bool acquire_now = true,
1163  bool non_racy_acquire = false);
1165 
1166  bool is_valid() const { return lock_; }
1167  bool is_locked_by_me() const { return locked_by_me_; }
1168 
1170  void acquire(bool non_racy_acquire);
1172  void release();
1173 
1174  private:
1175  McsWwLock* lock_;
1176  bool locked_by_me_;
1177 };
1178 
1183  TrackMovedRecordResult(RwLockableXctId* new_owner_address, char* new_payload_address)
1184  : new_owner_address_(new_owner_address), new_payload_address_(new_payload_address) {}
1185 
1188 };
1189 
1191  uint64_t copied_data = assorted::atomic_load_acquire<uint64_t>(&data_);
1192  if (UNLIKELY(copied_data & kXctIdBeingWrittenBit)) {
1193  while (copied_data & kXctIdBeingWrittenBit) {
1194  copied_data = assorted::atomic_load_acquire<uint64_t>(&data_);
1195  }
1196  }
1197  XctId ret;
1198  ret.data_ = copied_data;
1199  return ret;
1200 }
1201 
1202 
1207 UniversalLockId to_universal_lock_id(
1208  const memory::GlobalVolatilePageResolver& resolver,
1209  uintptr_t lock_ptr);
1210 
1215 const uint64_t kLockPageSize = 1 << 12;
1217 inline UniversalLockId to_universal_lock_id(
1218  uint64_t numa_node,
1219  uint64_t local_page_index,
1220  uintptr_t lock_ptr) {
1221  const uint64_t in_page_offset = lock_ptr % kLockPageSize;
1222  return (numa_node << 48) | (local_page_index * kLockPageSize + in_page_offset);
1223 }
1224 
1225 // just shorthands.
1226 inline UniversalLockId xct_id_to_universal_lock_id(
1227  const memory::GlobalVolatilePageResolver& resolver,
1228  RwLockableXctId* lock) {
1229  return to_universal_lock_id(resolver, reinterpret_cast<uintptr_t>(lock));
1230 }
1231 inline UniversalLockId rw_lock_to_universal_lock_id(
1232  const memory::GlobalVolatilePageResolver& resolver,
1233  McsRwLock* lock) {
1234  return to_universal_lock_id(resolver, reinterpret_cast<uintptr_t>(lock));
1235 }
1236 
1241 RwLockableXctId* from_universal_lock_id(
1242  const memory::GlobalVolatilePageResolver& resolver,
1243  const UniversalLockId universal_lock_id);
1244 
1245 // sizeof(XctId) must be 64 bits.
1246 STATIC_SIZE_CHECK(sizeof(XctId), sizeof(uint64_t))
1247 STATIC_SIZE_CHECK(sizeof(McsWwLock), 8)
1248 STATIC_SIZE_CHECK(sizeof(LockableXctId), 16)
1249 
1250 } // namespace xct
1251 } // namespace foedus
1252 #endif // FOEDUS_XCT_XCT_ID_HPP_
bool operator!=(const McsWwBlockData &other) const
Definition: xct_id.hpp:209
void reset_atomic() __attribute__((always_inline))
Definition: xct_id.hpp:366
bool cas_pred_id_strong(uint32_t expected, uint32_t desired)
Definition: xct_id.hpp:729
void reset_release() __attribute__((always_inline))
Definition: xct_id.hpp:370
uint32_t cas_val_next_flag_strong(uint32_t expected, uint32_t desired)
Definition: xct_id.hpp:694
uint64_t cas_val_next_strong(uint64_t expected, uint64_t desired)
Definition: xct_id.hpp:704
bool operator==(const XctId &other) const __attribute__((always_inline))
Definition: xct_id.hpp:1049
static const uint8_t kSuccessorClassNone
Definition: xct_id.hpp:399
void reset_guest_id_release()
Definition: xct_id.hpp:357
void reset(thread::ThreadId tail_waiter, McsBlockIndex tail_waiter_block) __attribute__((always_inline))
used only for initial_lock()
Definition: xct_id.hpp:362
void reset() __attribute__((always_inline))
used only while page initialization
Definition: xct_id.hpp:355
static McsBlockIndex decompose_block(uint64_t word) __attribute__((always_inline))
Definition: xct_id.hpp:200
taken_mode_: we took a read-lock, not write-lock yet.
Definition: xct_id.hpp:105
static const uint8_t kStateClassWriterFlag
Definition: xct_id.hpp:390
bool has_successor_atomic() const __attribute__((always_inline))
Definition: xct_id.hpp:286
uint64_t cas_val_next_weak(uint64_t expected, uint64_t desired)
Definition: xct_id.hpp:709
void reset_atomic(thread::ThreadId tail_waiter, McsBlockIndex tail_waiter_block) __attribute__((always_inline))
Definition: xct_id.hpp:367
void set_epoch(Epoch epoch) __attribute__((always_inline))
Definition: xct_id.hpp:965
bool is_guest_acquire() const __attribute__((always_inline))
Definition: xct_id.hpp:236
thread::ThreadId xchg_next_writer(thread::ThreadId id)
Definition: xct_id.hpp:828
static uint32_t to_tail_int(thread::ThreadId tail_waiter, McsBlockIndex tail_waiter_block)
Definition: xct_id.hpp:848
void set_moved() __attribute__((always_inline))
Definition: xct_id.hpp:1029
uint64_t get_word_consume() const __attribute__((always_inline))
Definition: xct_id.hpp:214
void init_common() __attribute__((always_inline))
Definition: xct_id.hpp:428
void reset() __attribute__((always_inline))
used only while page initialization
Definition: xct_id.hpp:1122
void clear_successor_atomic() __attribute__((always_inline))
Definition: xct_id.hpp:301
McsWwBlockData get_tail_atomic() const __attribute__((always_inline))
Definition: xct_id.hpp:352
McsWwLock * get_key_lock() __attribute__((always_inline))
Definition: xct_id.hpp:1113
void ownerless_initial_lock()
The followings are implemented in thread_pimpl.cpp along with the above methods, but these don't use ...
Definition: xct_id.cpp:86
Exclusive-only (WW) MCS lock classes.
Definition: xct_id.hpp:182
const uint64_t kXctIdBeingWrittenBit
Definition: xct_id.hpp:885
void ownerless_release_lock()
Definition: xct_id.cpp:82
bool is_moved() const __attribute__((always_inline))
Definition: xct_id.hpp:1116
void set_deleted() __attribute__((always_inline))
Definition: xct_id.hpp:1027
const uint64_t kXctIdMovedBit
Definition: xct_id.hpp:884
bool cas_next_writer_weak(thread::ThreadId expected, thread::ThreadId desired)
Definition: xct_id.hpp:831
#define CXX11_NULLPTR
Used in public headers in place of "nullptr" of C++11.
Definition: cxx11.hpp:132
void reset_release(thread::ThreadId tail_waiter, McsBlockIndex tail_waiter_block) __attribute__((always_inline))
Definition: xct_id.hpp:371
void set_next_writer(thread::ThreadId thread_id)
Definition: xct_id.hpp:822
static const uint32_t kPredFlagClassMask
Definition: xct_id.hpp:526
bool is_valid_acquire() const __attribute__((always_inline))
Definition: xct_id.hpp:232
RwLockableXctId * new_owner_address_
Definition: xct_id.hpp:1186
bool timeout_granted(int32_t timeout)
Definition: xct_id.cpp:148
Epoch values wrap around at this value.
Definition: epoch.hpp:81
uint64_t get_word_acquire() const __attribute__((always_inline))
Definition: xct_id.hpp:211
uint64_t get_word_atomic() const __attribute__((always_inline))
Definition: xct_id.hpp:217
static const uint32_t kSuccFlagWaiting
Definition: xct_id.hpp:528
bool operator!=(const XctId &other) const __attribute__((always_inline))
Definition: xct_id.hpp:1050
bool has_successor_relaxed() const __attribute__((always_inline))
setter/getter for successor_.
Definition: xct_id.hpp:283
McsWwBlockData copy_consume() const __attribute__((always_inline))
Definition: xct_id.hpp:226
Root package of FOEDUS (Fast Optimistic Engine for Data Unification Services).
Definition: assert_nd.hpp:44
thread::ThreadId get_tail_waiter() const __attribute__((always_inline))
This is a "relaxed" check.
Definition: xct_id.hpp:344
struct foedus::xct::McsRwExtendedBlock::Field::Components components_
Transaction ID, a 128-bit data to manage record versions and provide locking mechanism.
Definition: xct_id.hpp:1107
uint32_t get_successor_thread_id_relaxed() const __attribute__((always_inline))
Carefully use this! In some places you must call copy_once() then call this on the copy...
Definition: xct_id.hpp:291
McsRwLock & operator=(const McsRwLock &other)=delete
union foedus::xct::McsRwSimpleBlock::Self self_
void set_notdeleted() __attribute__((always_inline))
Definition: xct_id.hpp:1028
void hotter(thread::Thread *context) const
Definition: xct_id.cpp:74
TrackMovedRecordResult(RwLockableXctId *new_owner_address, char *new_payload_address)
Definition: xct_id.hpp:1183
Represents one thread running on one NUMA core.
Definition: thread.hpp:48
Typedefs of ID types used in thread package.
RwLockableXctId * from_universal_lock_id(const memory::GlobalVolatilePageResolver &resolver, const UniversalLockId universal_lock_id)
Always use this method rather than doing the conversion yourself.
Definition: xct_id.cpp:61
Epoch::EpochInteger get_epoch_int() const __attribute__((always_inline))
Definition: xct_id.hpp:966
const uint64_t kXctIdMaskSerializer
Definition: xct_id.hpp:887
void set_successor_release(thread::ThreadId thread_id, McsBlockIndex block) __attribute__((always_inline))
Definition: xct_id.hpp:306
static const uint8_t kStateFinalizedMask
Definition: xct_id.hpp:395
static const uint32_t kSuccFlagBusy
Definition: xct_id.hpp:534
uint32_t EpochInteger
Unsigned integer representation of epoch.
Definition: epoch.hpp:64
Result of track_moved_record().
Definition: xct_id.hpp:1180
bool before(const XctId &other) const __attribute__((always_inline))
Returns if this XctId is before other in serialization order, meaning this is either an invalid (unus...
Definition: xct_id.hpp:1074
void set_release(uint32_t thread_id, McsBlockIndex block) __attribute__((always_inline))
Definition: xct_id.hpp:262
static const int32_t kTimeoutZero
Definition: xct_id.hpp:552
Forward declarations of classes in root package.
Persistent status part of Transaction ID.
Definition: xct_id.hpp:955
bool is_keylocked() const __attribute__((always_inline))
Definition: xct_id.hpp:1114
static const uint64_t kSuccReleased
Definition: xct_id.hpp:547
static const uint32_t kPredFlagWaiting
Pred flags: |—31—|--—|—0—| |my class|empty|waiting|.
Definition: xct_id.hpp:522
Reader-writer (RW) MCS lock classes.
Definition: xct_id.hpp:387
const LockListPosition kLockListPositionInvalid
Definition: xct_id.hpp:149
void release()
Release the lock if acquired.
Definition: xct_id.cpp:119
Represents a time epoch.
Definition: epoch.hpp:61
uint32_t get_tail_int()
Definition: xct_id.hpp:854
XctId xct_id_
the second 64bit: Persistent status part of TID.
Definition: xct_id.hpp:1137
uint32_t get_ordinal() const __attribute__((always_inline))
Definition: xct_id.hpp:976
Snapshot isolation (SI), meaning the transaction reads a consistent and complete image of the databas...
Definition: xct_id.hpp:78
McsBlockIndex block_index_
the queue node we pushed.
Definition: xct_id.hpp:170
void set_relaxed(uint32_t thread_id, McsBlockIndex block) __attribute__((always_inline))
Definition: xct_id.hpp:256
bool is_blocked() __attribute__((always_inline))
Definition: xct_id.hpp:447
uint32_t cas_val_pred_id_weak(uint32_t expected, uint32_t desired)
Definition: xct_id.hpp:733
bool is_guest_relaxed() const __attribute__((always_inline))
Definition: xct_id.hpp:235
bool cas_next_id_weak(uint32_t expected, uint32_t desired)
Definition: xct_id.hpp:721
const uint64_t kXctIdDeletedBit
Definition: xct_id.hpp:883
struct foedus::xct::McsRwSimpleBlock::Self::Components components_
bool has_next_writer() const
Definition: xct_id.hpp:819
bool needs_track_moved() const __attribute__((always_inline))
Definition: xct_id.hpp:1144
bool is_valid() const __attribute__((always_inline))
Definition: xct_id.hpp:973
uint32_t get_thread_id_relaxed() const __attribute__((always_inline))
Carefully use this! In some places you must call get_word_once() then call this on the copy...
Definition: xct_id.hpp:243
A few macros and helper methods related to byte endian-ness.
uint32_t xchg_pred_id(uint32_t id)
Definition: xct_id.hpp:757
bool acquired_
whether we immediately acquired the lock or not
Definition: xct_id.hpp:163
bool cas_next_strong(uint64_t expected, uint64_t desired)
Definition: xct_id.hpp:664
McsWwBlockData copy_acquire() const __attribute__((always_inline))
Definition: xct_id.hpp:227
McsBlockIndex get_tail_waiter_block() const
Definition: xct_id.hpp:817
void unblock() __attribute__((always_inline))
Definition: xct_id.hpp:441
static const uint8_t kStateBlockedMask
Definition: xct_id.hpp:393
taken_mode_: Not taken the lock yet.
Definition: xct_id.hpp:100
UniversalLockId rw_lock_to_universal_lock_id(const memory::GlobalVolatilePageResolver &resolver, McsRwLock *lock)
Definition: xct_id.hpp:1231
void set_next_id(uint32_t id)
Definition: xct_id.hpp:754
void clear() __attribute__((always_inline))
Definition: xct_id.hpp:253
McsRwLock lock_
the first 64bit: Locking part of TID
Definition: xct_id.hpp:1134
uintptr_t UniversalLockId
Universally ordered identifier of each lock.
Definition: xct_id.hpp:134
static const uint32_t kSuccFlagSuccessorNone
Definition: xct_id.hpp:538
McsWwBlockData get_tail_once() const __attribute__((always_inline))
Definition: xct_id.hpp:349
bool cas_next_writer_strong(thread::ThreadId expected, thread::ThreadId desired)
Definition: xct_id.hpp:835
void clear_status_bits()
Definition: xct_id.hpp:1083
static const uint32_t kSuccFlagSuccessorReader
Definition: xct_id.hpp:537
void set(Epoch::EpochInteger epoch_int, uint32_t ordinal)
Definition: xct_id.hpp:958
Zero is always reserved for invalid epoch.
Definition: epoch.hpp:68
The MCS reader-writer lock variant of LockableXctId.
Definition: xct_id.hpp:1132
bool is_deleted() const __attribute__((always_inline))
Definition: xct_id.hpp:1141
friend std::ostream & operator<<(std::ostream &o, const XctId &v)
Definition: xct_id.cpp:178
void clear_successor_release() __attribute__((always_inline))
Definition: xct_id.hpp:302
thread::ThreadId successor_thread_id_
Definition: xct_id.hpp:417
bool is_next_layer() const __attribute__((always_inline))
Definition: xct_id.hpp:1117
void set_write_complete() __attribute__((always_inline))
Definition: xct_id.hpp:1020
bool cas_tail_strong(uint32_t expected, uint32_t desired)
Definition: xct_id.hpp:842
McsWwBlockData get_tail_relaxed() const __attribute__((always_inline))
Definition: xct_id.hpp:348
thread::ThreadId next_writer_
Definition: xct_id.hpp:867
XctId xct_id_
the second 64bit: Persistent status part of TID.
Definition: xct_id.hpp:1111
UniversalLockId to_universal_lock_id(storage::VolatilePagePointer page_id, uintptr_t addr)
Definition: sysxct_impl.hpp:63
static uint32_t decompose_thread_id(uint64_t word) __attribute__((always_inline))
Definition: xct_id.hpp:197
const uint64_t kLockPageSize
Must be same as storage::kPageSize.
Definition: xct_id.hpp:1215
bool is_valid_relaxed() const __attribute__((always_inline))
Definition: xct_id.hpp:230
bool is_moved() const __attribute__((always_inline))
Definition: xct_id.hpp:1142
thread::ThreadId get_next_writer()
Definition: xct_id.hpp:825
static uint64_t combine(uint32_t thread_id, McsBlockIndex block) __attribute__((always_inline))
Definition: xct_id.hpp:191
static const uint32_t kPredFlagWriter
Definition: xct_id.hpp:525
Return value of acquire_async_rw.
Definition: xct_id.hpp:161
UniversalLockId xct_id_to_universal_lock_id(const memory::GlobalVolatilePageResolver &resolver, RwLockableXctId *lock)
Definition: xct_id.hpp:1226
friend std::ostream & operator<<(std::ostream &o, const McsRwLock &v)
Definition: xct_id.cpp:195
void store_max(const XctId &other) __attribute__((always_inline))
Kind of std::max(this, other).
Definition: xct_id.hpp:1059
bool cas_next_id_strong(uint32_t expected, uint32_t desired)
Definition: xct_id.hpp:717
Forward declarations of classes in storage package.
Pre-allocated MCS block for WW-locks.
Definition: xct_id.hpp:274
McsRwLock * get_key_lock() __attribute__((always_inline))
Definition: xct_id.hpp:1139
bool has_successor_acquire() const __attribute__((always_inline))
Definition: xct_id.hpp:285
An exclusive-only (WW) MCS lock data structure.
Definition: xct_id.hpp:324
bool is_being_written() const __attribute__((always_inline))
Definition: xct_id.hpp:1145
void acquire(bool non_racy_acquire)
Acquires the lock.
Definition: xct_id.cpp:106
McsWwBlockData copy_once() const __attribute__((always_inline))
Definition: xct_id.hpp:225
uint32_t LockListPosition
Index in a lock-list, either RLL or CLL.
Definition: xct_id.hpp:148
Pre-allocated MCS block for extended version of RW-locks.
Definition: xct_id.hpp:513
static const uint32_t kPredIdAcquired
Definition: xct_id.hpp:544
bool timeout_granted(int32_t timeout)
MCS block classes.
Definition: xct_id.cpp:133
static const uint32_t kSuccIdSuccessorLeaving
Definition: xct_id.hpp:541
bool is_valid_atomic() const __attribute__((always_inline))
Definition: xct_id.hpp:233
void reset() __attribute__((always_inline))
used only while page initialization
Definition: xct_id.hpp:1150
McsWwLock & operator=(const McsWwLock &other)=delete
taken_mode_: we took a write-lock.
Definition: xct_id.hpp:110
void clear_release() __attribute__((always_inline))
Definition: xct_id.hpp:255
Epoch get_epoch() const __attribute__((always_inline))
Definition: xct_id.hpp:964
static const uint32_t kPredFlagGranted
Definition: xct_id.hpp:523
McsBlockIndex get_successor_block_relaxed() const __attribute__((always_inline))
Carefully use this! In some places you must call copy_once() then call this on the copy...
Definition: xct_id.hpp:298
bool is_next_layer() const __attribute__((always_inline))
Definition: xct_id.hpp:1042
static const uint32_t kSuccFlagSuccessorWriter
Definition: xct_id.hpp:539
static const uint32_t kSuccFlagMask
Definition: xct_id.hpp:532
const uint64_t kMaxXctOrdinal
Maximum value of in-epoch ordinal.
Definition: xct_id.hpp:898
const uint64_t kXctIdMaskOrdinal
Definition: xct_id.hpp:889
bool is_reader() __attribute__((always_inline))
Definition: xct_id.hpp:435
bool is_deleted() const __attribute__((always_inline))
Definition: xct_id.hpp:1040
const uint64_t kMcsGuestId
A special value meaning the lock is held by a non-regular guest that doesn't have a context...
Definition: xct_id.hpp:158
bool is_guest_atomic() const __attribute__((always_inline))
Definition: xct_id.hpp:238
static const uint8_t kSuccessorClassWriter
Definition: xct_id.hpp:398
McsRwAsyncMapping(UniversalLockId lock_id, McsBlockIndex block)
Definition: xct_id.hpp:878
friend std::ostream & operator<<(std::ostream &o, const LockableXctId &v)
Definition: xct_id.cpp:190
friend std::ostream & operator<<(std::ostream &o, const McsWwLock &v)
Debug out operators.
Definition: xct_id.cpp:171
bool is_deleted() const __attribute__((always_inline))
Definition: xct_id.hpp:1115
bool is_locked() const
Definition: xct_id.hpp:857
static const uint32_t kPredFlagReader
Definition: xct_id.hpp:524
static const int32_t kTimeoutNever
Definition: xct_id.hpp:551
void set_ordinal(uint32_t ordinal) __attribute__((always_inline))
Definition: xct_id.hpp:980
void increment_ordinal() __attribute__((always_inline))
Definition: xct_id.hpp:984
bool is_guest_consume() const __attribute__((always_inline))
Definition: xct_id.hpp:237
IsolationLevel
Specifies the level of isolation during transaction processing.
Definition: xct_id.hpp:55
static const uint32_t kSuccFlagLeavingGranted
Definition: xct_id.hpp:531
bool is_valid() const
Definition: epoch.hpp:96
void clear_atomic() __attribute__((always_inline))
Definition: xct_id.hpp:254
uint16_t make_blocked_with_reader_successor_state()
Definition: xct_id.hpp:501
void set_combined_release(uint64_t word) __attribute__((always_inline))
Definition: xct_id.hpp:268
static const uint32_t kSuccFlagDirectGranted
Definition: xct_id.hpp:530
bool is_locked() const
This is a "relaxed" check.
Definition: xct_id.hpp:334
#define CXX11_FUNC_DELETE
Used in public headers in place of " = delete" of C++11.
Definition: cxx11.hpp:128
static const uint8_t kStateClassMask
Definition: xct_id.hpp:388
uint32_t make_next_flag_waiting_with_reader_successor()
Definition: xct_id.hpp:739
bool is_valid_consume() const __attribute__((always_inline))
Definition: xct_id.hpp:231
uint32_t McsBlockIndex
Index in thread-local MCS block.
Definition: xct_id.hpp:153
static const uint32_t kSuccFlagLeaving
Definition: xct_id.hpp:529
uint32_t cas_val_next_flag_weak(uint32_t expected, uint32_t desired)
Definition: xct_id.hpp:699
bool needs_track_moved() const __attribute__((always_inline))
is_moved() || is_next_layer()
Definition: xct_id.hpp:1044
void set_epoch_int(Epoch::EpochInteger epoch_int) __attribute__((always_inline))
Definition: xct_id.hpp:969
uint16_t ThreadId
Typedef for a global ID of Thread (core), which is unique across NUMA nodes.
Definition: thread_id.hpp:80
static const uint8_t kSuccessorClassReader
Definition: xct_id.hpp:397
bool is_being_written() const __attribute__((always_inline))
Definition: xct_id.hpp:1119
static const uint8_t kStateBlockedFlag
Definition: xct_id.hpp:392
bool cas_next_weak(uint64_t expected, uint64_t desired)
Definition: xct_id.hpp:660
McsWwBlockData get_tail_consume() const __attribute__((always_inline))
Definition: xct_id.hpp:350
void set_combined_atomic(uint64_t word) __attribute__((always_inline))
Definition: xct_id.hpp:265
bool needs_track_moved() const __attribute__((always_inline))
Definition: xct_id.hpp:1118
bool cas_tail_weak(uint32_t expected, uint32_t desired)
Definition: xct_id.hpp:845
LockMode
Represents a mode of lock.
Definition: xct_id.hpp:95
void set_successor_atomic(thread::ThreadId thread_id, McsBlockIndex block) __attribute__((always_inline))
Definition: xct_id.hpp:303
static const int32_t kTimeoutNever
Definition: xct_id.hpp:401
void set_atomic(uint32_t thread_id, McsBlockIndex block) __attribute__((always_inline))
Definition: xct_id.hpp:259
bool operator==(const McsWwBlockData &other) const
Definition: xct_id.hpp:208
uint16_t nreaders()
Definition: xct_id.hpp:814
McsWwLock(thread::ThreadId tail_waiter, McsBlockIndex tail_waiter_block)
Definition: xct_id.hpp:326
Atomic fence methods and load/store with fences that work for both C++11/non-C++11 code...
bool is_being_written() const __attribute__((always_inline))
Definition: xct_id.hpp:1038
uint32_t make_next_flag_waiting_with_no_successor()
Definition: xct_id.hpp:738
XctId spin_while_being_written() const __attribute__((always_inline))
Returns a version of this Xid whose being_written flag is off.
Definition: xct_id.hpp:1190
char padding_[16-sizeof(McsBlockIndex)-sizeof(UniversalLockId)]
Definition: xct_id.hpp:876
bool has_successor_consume() const __attribute__((always_inline))
Definition: xct_id.hpp:284
#define STATIC_SIZE_CHECK(desired, actual)
void set_being_written() __attribute__((always_inline))
Definition: xct_id.hpp:1019
McsWwBlockData get_tail_acquire() const __attribute__((always_inline))
Definition: xct_id.hpp:351
Resolves an offset in a volatile page pool to an actual pointer and vice versa.
No guarantee at all for reads, for the sake of best performance and scalability.
Definition: xct_id.hpp:65
const uint64_t kXctIdMaskEpoch
Definition: xct_id.hpp:888
McsBlockIndex get_tail_waiter_block() const __attribute__((always_inline))
This is a "relaxed" check.
Definition: xct_id.hpp:346
uint32_t xchg_tail(uint32_t new_tail)
Definition: xct_id.hpp:839
void ownerless_acquire_lock()
Definition: xct_id.cpp:78
static const thread::ThreadId kNextWriterNone
Definition: xct_id.hpp:796
McsWwBlockData tail_
Definition: xct_id.hpp:377
void set_successor_next_only(thread::ThreadId thread_id, McsBlockIndex block_index)
Definition: xct_id.hpp:473
static const uint32_t kSuccIdNoSuccessor
Definition: xct_id.hpp:542
McsBlockIndex get_block_relaxed() const __attribute__((always_inline))
Carefully use this! In some places you must call get_word_once() then call this on the copy...
Definition: xct_id.hpp:250
#define UNLIKELY(x)
Hints that x is highly likely false.
Definition: compiler.hpp:104
bool is_moved() const __attribute__((always_inline))
Definition: xct_id.hpp:1041
void set_next(uint64_t next)
Definition: xct_id.hpp:657
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
McsWwLock lock_
the first 64bit: Locking part of TID
Definition: xct_id.hpp:1109
Forward declarations of classes in thread package.
McsWwBlockData copy_atomic() const __attribute__((always_inline))
Definition: xct_id.hpp:228
uint64_t word_
The high 32-bits is thread_id, the low 32-bit is block-index.
Definition: xct_id.hpp:189
McsWwBlockData(uint64_t word)
Definition: xct_id.hpp:205
bool before(const Epoch &other) const
Returns if this epoch is before the given epoch in the sense of distance defined in RFC 1982...
Definition: epoch.hpp:176
#define ALWAYS_INLINE
A function suffix to hint that the function should always be inlined.
Definition: compiler.hpp:106
Raw atomic operations that work for both C++11 and non-C++11 code.
void memory_fence_release()
Equivalent to std::atomic_thread_fence(std::memory_order_release).
McsBlockIndex successor_block_index_
Definition: xct_id.hpp:418
bool is_next_layer() const __attribute__((always_inline))
Definition: xct_id.hpp:1143
McsWwBlockData(uint32_t thread_id, McsBlockIndex block)
Definition: xct_id.hpp:206
bool is_hot(thread::Thread *context) const
Definition: xct_id.cpp:70
bool cas_pred_id_weak(uint32_t expected, uint32_t desired)
Definition: xct_id.hpp:725
uint16_t make_blocked_with_no_successor_state()
Definition: xct_id.hpp:506
uint64_t get_word_once() const __attribute__((always_inline))
The access_once semantics, which is widely used in linux.
Definition: xct_id.hpp:224
McsWwBlockData successor_
The successor of MCS lock queue after this thread (in other words, the thread that is waiting for thi...
Definition: xct_id.hpp:280
An MCS reader-writer lock data structure.
Definition: xct_id.hpp:795
uint32_t xchg_next_id(uint32_t id)
Definition: xct_id.hpp:714
int compare_epoch_and_orginal(const XctId &other) const __attribute__((always_inline))
Returns -1, 0, 1 when this is less than, same, larger than other in terms of epoch/ordinal.
Definition: xct_id.hpp:993
void set_pred_id(uint32_t id)
Definition: xct_id.hpp:751
friend std::ostream & operator<<(std::ostream &o, const RwLockableXctId &v)
Definition: xct_id.cpp:202
bool is_keylocked() const __attribute__((always_inline))
Definition: xct_id.hpp:1140
UniversalLockId lock_id_
Definition: xct_id.hpp:874
const UniversalLockId kNullUniversalLockId
This never points to a valid lock, and also evaluates less than any vaild alocks. ...
Definition: xct_id.hpp:137
Protects against all anomalies in all situations.
Definition: xct_id.hpp:86
static const uint32_t kSuccFlagSuccessorClassMask
Definition: xct_id.hpp:536
thread::ThreadId get_tail_waiter() const
Definition: xct_id.hpp:818
uint16_t decrement_nreaders()
Definition: xct_id.hpp:811
const uint64_t kXctIdNextLayerBit
Definition: xct_id.hpp:886
static const uint8_t kStateClassReaderFlag
Definition: xct_id.hpp:389
void set_next_layer() __attribute__((always_inline))
Definition: xct_id.hpp:1030