libfoedus-core
FOEDUS Core Library
retrospective_lock_list.hpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014-2015, Hewlett-Packard Development Company, LP.
3  * This program is free software; you can redistribute it and/or modify it
4  * under the terms of the GNU General Public License as published by the Free
5  * Software Foundation; either version 2 of the License, or (at your option)
6  * any later version.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11  * more details. You should have received a copy of the GNU General Public
12  * License along with this program; if not, write to the Free Software
13  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14  *
15  * HP designates this particular file as subject to the "Classpath" exception
16  * as provided by HP in the LICENSE.txt file that accompanied this code.
17  */
18 #ifndef FOEDUS_XCT_RETROSPECTIVE_LOCK_LIST_HPP_
19 #define FOEDUS_XCT_RETROSPECTIVE_LOCK_LIST_HPP_
20 
21 #include <stdint.h>
22 
23 #include <algorithm>
24 #include <iosfwd>
25 
26 #include "foedus/assert_nd.hpp"
27 #include "foedus/compiler.hpp"
28 #include "foedus/error_code.hpp"
30 #include "foedus/thread/fwd.hpp"
31 #include "foedus/xct/fwd.hpp"
32 #include "foedus/xct/xct_id.hpp"
33 
34 namespace foedus {
35 namespace xct {
36 
120 struct LockEntry {
126 
131 
137 
140 
145 
146  // want to have these.. but maitaining them is nasty after sort. let's revisit later
147  // ReadXctAccess* read_set_;
148  // WriteXctAccess* write_set_;
149 
150  void set(
151  UniversalLockId id,
152  RwLockableXctId* lock,
153  LockMode preferred_mode,
154  LockMode taken_mode) {
155  universal_lock_id_ = id;
156  lock_ = lock;
157  preferred_mode_ = preferred_mode;
158  taken_mode_ = taken_mode;
159  mcs_block_ = 0;
160  }
161 
162  bool is_locked() const { return taken_mode_ != kNoLock; }
163  bool is_enough() const { return taken_mode_ >= preferred_mode_; }
164 
165  bool operator<(const LockEntry& rhs) const {
166  return universal_lock_id_ < rhs.universal_lock_id_;
167  }
168 
169  friend std::ostream& operator<<(std::ostream& o, const LockEntry& v);
170 
172  struct LessThan {
173  bool operator()(UniversalLockId lhs, const LockEntry& rhs) const {
174  return lhs < rhs.universal_lock_id_;
175  }
176  bool operator()(const LockEntry& lhs, UniversalLockId rhs) const {
177  return lhs.universal_lock_id_ < rhs;
178  }
179  };
180 };
181 
190  public:
193 
194  void init(
195  LockEntry* array,
196  uint32_t capacity,
197  const memory::GlobalVolatilePageResolver& resolver);
198 
199  void uninit();
200  void clear_entries();
201 
207 
215 
232  void construct(thread::Thread* context, uint32_t read_lock_threshold);
233 
234  const LockEntry* get_array() const { return array_; }
237  return array_ + pos;
238  }
241  return array_ + pos;
242  }
243  uint32_t get_capacity() const { return capacity_; }
244  LockListPosition get_last_active_entry() const { return last_active_entry_; }
246  return pos != kLockListPositionInvalid && pos <= last_active_entry_;
247  }
248  bool is_empty() const { return last_active_entry_ == kLockListPositionInvalid; }
249 
250  friend std::ostream& operator<<(std::ostream& o, const RetrospectiveLockList& v);
251  void assert_sorted() const ALWAYS_INLINE;
252  void assert_sorted_impl() const;
253 
254  LockEntry* begin() { return array_ + 1U; }
255  LockEntry* end() { return array_ + 1U + last_active_entry_; }
256  const LockEntry* cbegin() const { return array_ + 1U; }
257  const LockEntry* cend() const { return array_ + 1U + last_active_entry_; }
259  return volatile_page_resolver_;
260  }
261 
262  private:
270  LockEntry* array_;
274  memory::GlobalVolatilePageResolver volatile_page_resolver_;
280  uint32_t capacity_;
285  LockListPosition last_active_entry_;
286 
287  LockListPosition issue_new_position() {
288  ++last_active_entry_;
289  ASSERT_ND(last_active_entry_ < capacity_);
290  return last_active_entry_;
291  }
292 };
293 
310  public:
311  CurrentLockList();
313 
314  void init(
315  LockEntry* array,
316  uint32_t capacity,
317  const memory::GlobalVolatilePageResolver& resolver);
318  void uninit();
319  void clear_entries();
320 
326 
338  UniversalLockId lock_id,
339  RwLockableXctId* lock,
340  LockMode preferred_mode);
341 
349 
350  const LockEntry* get_array() const { return array_; }
351  LockEntry* get_array() { return array_; }
354  return array_ + pos;
355  }
358  return array_ + pos;
359  }
360  uint32_t get_capacity() const { return capacity_; }
361  LockListPosition get_last_active_entry() const { return last_active_entry_; }
363  return pos != kLockListPositionInvalid && pos <= last_active_entry_;
364  }
365  bool is_empty() const { return last_active_entry_ == kLockListPositionInvalid; }
366 
367  friend std::ostream& operator<<(std::ostream& o, const CurrentLockList& v);
368  void assert_sorted() const ALWAYS_INLINE;
369  void assert_sorted_impl() const;
370 
371  LockEntry* begin() { return array_ + 1U; }
372  LockEntry* end() { return array_ + 1U + last_active_entry_; }
373  const LockEntry* cbegin() const { return array_ + 1U; }
374  const LockEntry* cend() const { return array_ + 1U + last_active_entry_; }
376  return volatile_page_resolver_;
377  }
378 
383  LockListPosition get_last_locked_entry() const { return last_locked_entry_; }
386  if (last_locked_entry_ == kLockListPositionInvalid) {
387  return kNullUniversalLockId;
388  } else {
389  return get_entry(last_locked_entry_)->universal_lock_id_;
390  }
391  }
394  return calculate_last_locked_entry_from(last_active_entry_);
395  }
398  for (LockListPosition pos = from; pos > kLockListPositionInvalid; --pos) {
399  if (array_[pos].is_locked()) {
400  return pos;
401  }
402  }
404  }
406 #ifndef NDEBUG
408  ASSERT_ND(correct == last_locked_entry_);
409 #endif // NDEBUG
410  }
411 
421  void batch_insert_write_placeholders(const WriteXctAccess* write_set, uint32_t write_set_size);
422 
432 
439 
447  template<typename MCS_RW_IMPL>
448  ErrorCode try_or_acquire_single_lock(LockListPosition pos, MCS_RW_IMPL* mcs_rw_impl);
449 
458  template<typename MCS_RW_IMPL>
459  ErrorCode try_or_acquire_multiple_locks(LockListPosition upto_pos, MCS_RW_IMPL* mcs_rw_impl);
460 
461  template<typename MCS_RW_IMPL>
462  void try_async_single_lock(LockListPosition pos, MCS_RW_IMPL* mcs_rw_impl);
463  template<typename MCS_RW_IMPL>
464  bool retry_async_single_lock(LockListPosition pos, MCS_RW_IMPL* mcs_rw_impl);
465  template<typename MCS_RW_IMPL>
466  void cancel_async_single_lock(LockListPosition pos, MCS_RW_IMPL* mcs_rw_impl);
467 
468  template<typename MCS_RW_IMPL>
469  void try_async_multiple_locks(LockListPosition upto_pos, MCS_RW_IMPL* mcs_rw_impl);
470 
472  template<typename MCS_RW_IMPL>
473  void release_all_locks(MCS_RW_IMPL* mcs_rw_impl) {
475  }
481  template<typename MCS_RW_IMPL>
482  void release_all_after(UniversalLockId address, MCS_RW_IMPL* mcs_rw_impl);
484  template<typename MCS_RW_IMPL>
485  void release_all_at_and_after(UniversalLockId address, MCS_RW_IMPL* mcs_rw_impl);
492  template<typename MCS_RW_IMPL>
493  void giveup_all_after(UniversalLockId address, MCS_RW_IMPL* mcs_rw_impl);
494  template<typename MCS_RW_IMPL>
495  void giveup_all_at_and_after(UniversalLockId address, MCS_RW_IMPL* mcs_rw_impl);
496 
497  private:
503  LockEntry* array_;
507  memory::GlobalVolatilePageResolver volatile_page_resolver_;
513  uint32_t capacity_;
518  LockListPosition last_active_entry_;
523  LockListPosition last_locked_entry_;
524 
525  LockListPosition issue_new_position() {
527  ++last_active_entry_;
528  ASSERT_ND(last_active_entry_ < capacity_);
529  return last_active_entry_;
530  }
531 
532  void release_all_after_debuglog(
533  uint32_t released_read_locks,
534  uint32_t released_write_locks,
535  uint32_t already_released_locks,
536  uint32_t canceled_async_read_locks,
537  uint32_t canceled_async_write_locks) const;
538 
539  void giveup_all_after_debuglog(
540  uint32_t givenup_read_locks,
541  uint32_t givenup_write_locks,
542  uint32_t givenup_upgrades,
543  uint32_t already_enough_locks,
544  uint32_t canceled_async_read_locks,
545  uint32_t canceled_async_write_locks) const;
546 };
547 
549  // In release mode, this code must be completely erased by compiler
550 #ifndef NDEBUG
552 #endif // NDEBUG
553 }
554 
555 inline void CurrentLockList::assert_sorted() const {
556 #ifndef NDEBUG
558 #endif // NDEBUG
559 }
560 
568 template<typename MCS_RW_IMPL>
570  LockListPosition pos,
571  MCS_RW_IMPL* mcs_rw_impl) {
572  LockEntry* lock_entry = get_entry(pos);
573  if (lock_entry->is_enough()) {
574  return kErrorCodeOk;
575  }
576  ASSERT_ND(lock_entry->taken_mode_ != kWriteLock);
577 
578  McsRwLock* lock_addr = lock_entry->lock_->get_key_lock();
579  if (lock_entry->taken_mode_ != kNoLock) {
580  ASSERT_ND(lock_entry->preferred_mode_ == kWriteLock);
581  ASSERT_ND(lock_entry->taken_mode_ == kReadLock);
582  ASSERT_ND(lock_entry->mcs_block_);
583  // This is reader->writer upgrade.
584  // We simply release read-lock first then take write-lock in this case.
585  // In traditional 2PL, such an unlock-then-lock violates serializability,
586  // but we guarantee serializability by read-verification anyways.
587  // We can release any lock anytime.. great flexibility!
588  mcs_rw_impl->release_rw_reader(lock_addr, lock_entry->mcs_block_);
589  lock_entry->taken_mode_ = kNoLock;
590  last_locked_entry_ = calculate_last_locked_entry_from(pos - 1U);
592  } else {
593  // This method is for unconditional acquire and try, not aync/retry.
594  // If we have a queue node already, something was misused.
595  ASSERT_ND(lock_entry->mcs_block_ == 0);
596  }
597 
598  // Now we need to take the lock. Are we in canonical mode?
599  if (last_locked_entry_ == kLockListPositionInvalid || last_locked_entry_ < pos) {
600  // yay, we are in canonical mode. we can unconditionally get the lock
601  ASSERT_ND(lock_entry->taken_mode_ == kNoLock); // not a lock upgrade, either
602  if (lock_entry->preferred_mode_ == kWriteLock) {
603  lock_entry->mcs_block_ = mcs_rw_impl->acquire_unconditional_rw_writer(lock_addr);
604  } else {
605  ASSERT_ND(lock_entry->preferred_mode_ == kReadLock);
606  lock_entry->mcs_block_ = mcs_rw_impl->acquire_unconditional_rw_reader(lock_addr);
607  }
608  lock_entry->taken_mode_ = lock_entry->preferred_mode_;
609  last_locked_entry_ = pos;
610  } else {
611  // hmm, we violated canonical mode. has a risk of deadlock.
612  // Let's just try acquire the lock and immediately give up if it fails.
613  // The RLL will take care of the next run.
614  // TODO(Hideaki) release some of the lock we have taken to restore canonical mode.
615  // We haven't imlpemented this optimization yet.
616  ASSERT_ND(lock_entry->mcs_block_ == 0);
617  ASSERT_ND(lock_entry->taken_mode_ == kNoLock);
618  if (lock_entry->preferred_mode_ == kWriteLock) {
619  lock_entry->mcs_block_ = mcs_rw_impl->acquire_try_rw_writer(lock_addr);
620  } else {
621  ASSERT_ND(lock_entry->preferred_mode_ == kReadLock);
622  lock_entry->mcs_block_ = mcs_rw_impl->acquire_try_rw_reader(lock_addr);
623  }
624  if (lock_entry->mcs_block_ == 0) {
625  return kErrorCodeXctLockAbort;
626  }
627  lock_entry->taken_mode_ = lock_entry->preferred_mode_;
628  }
629  ASSERT_ND(lock_entry->mcs_block_);
630  ASSERT_ND(lock_entry->is_locked());
632  return kErrorCodeOk;
633 }
634 
635 template<typename MCS_RW_IMPL>
637  LockListPosition upto_pos,
638  MCS_RW_IMPL* mcs_rw_impl) {
639  ASSERT_ND(upto_pos != kLockListPositionInvalid);
640  ASSERT_ND(upto_pos <= last_active_entry_);
641  // Especially in this case, we probably should release locks after upto_pos first.
642  for (LockListPosition pos = 1U; pos <= upto_pos; ++pos) {
643  CHECK_ERROR_CODE(try_or_acquire_single_lock(pos, mcs_rw_impl));
644  }
645  return kErrorCodeOk;
646 }
647 
648 template<typename MCS_RW_IMPL>
650  LockListPosition pos,
651  MCS_RW_IMPL* mcs_rw_impl) {
652  LockEntry* lock_entry = get_entry(pos);
653  if (lock_entry->is_enough()) {
654  return;
655  }
656  ASSERT_ND(lock_entry->taken_mode_ != kWriteLock);
657 
658  McsRwLock* lock_addr = lock_entry->lock_->get_key_lock();
659  if (lock_entry->taken_mode_ != kNoLock) {
660  ASSERT_ND(lock_entry->preferred_mode_ == kWriteLock);
661  ASSERT_ND(lock_entry->taken_mode_ == kReadLock);
662  ASSERT_ND(lock_entry->mcs_block_);
663  // This is reader->writer upgrade.
664  // We simply release read-lock first then take write-lock in this case.
665  // In traditional 2PL, such an unlock-then-lock violates serializability,
666  // but we guarantee serializability by read-verification anyways.
667  // We can release any lock anytime.. great flexibility!
668  mcs_rw_impl->release_rw_reader(lock_addr, lock_entry->mcs_block_);
669  lock_entry->taken_mode_ = kNoLock;
670  last_locked_entry_ = calculate_last_locked_entry_from(pos - 1U);
672  } else {
673  // This function is for pushing the queue node in the extended rwlock.
674  // Doomed if we already have a queue node.
675  ASSERT_ND(lock_entry->mcs_block_ == 0);
676  }
677 
678  // Don't really care canonical order here, just send out the request.
679  AcquireAsyncRet async_ret;
680  if (lock_entry->preferred_mode_ == kWriteLock) {
681  async_ret = mcs_rw_impl->acquire_async_rw_writer(lock_addr);
682  } else {
683  ASSERT_ND(lock_entry->preferred_mode_ == kReadLock);
684  async_ret = mcs_rw_impl->acquire_async_rw_reader(lock_addr);
685  }
686  ASSERT_ND(async_ret.block_index_);
687  lock_entry->mcs_block_ = async_ret.block_index_;
688  if (async_ret.acquired_) {
689  lock_entry->taken_mode_ = lock_entry->preferred_mode_;
690  ASSERT_ND(lock_entry->is_enough());
691  }
692  ASSERT_ND(lock_entry->mcs_block_);
693 }
694 
695 template<typename MCS_RW_IMPL>
697  LockListPosition pos,
698  MCS_RW_IMPL* mcs_rw_impl) {
699  LockEntry* lock_entry = get_entry(pos);
700  // Must be not taken yet, and must have pushed a qnode to the lock queue
701  ASSERT_ND(!lock_entry->is_enough());
702  ASSERT_ND(lock_entry->taken_mode_ == kNoLock);
703  ASSERT_ND(lock_entry->mcs_block_);
704  ASSERT_ND(!lock_entry->is_locked());
705 
706  McsRwLock* lock_addr = lock_entry->lock_->get_key_lock();
707  bool acquired = false;
708  if (lock_entry->preferred_mode_ == kWriteLock) {
709  acquired = mcs_rw_impl->retry_async_rw_writer(lock_addr, lock_entry->mcs_block_);
710  } else {
711  ASSERT_ND(lock_entry->preferred_mode_ == kReadLock);
712  acquired = mcs_rw_impl->retry_async_rw_reader(lock_addr, lock_entry->mcs_block_);
713  }
714  if (acquired) {
715  lock_entry->taken_mode_ = lock_entry->preferred_mode_;
716  ASSERT_ND(lock_entry->is_locked());
717  last_locked_entry_ = std::max(last_locked_entry_, pos);
719  }
720  return acquired;
721 }
722 
723 template<typename MCS_RW_IMPL>
725  LockListPosition pos,
726  MCS_RW_IMPL* mcs_rw_impl) {
727  LockEntry* lock_entry = get_entry(pos);
728  ASSERT_ND(!lock_entry->is_enough());
729  ASSERT_ND(lock_entry->taken_mode_ == kNoLock);
730  ASSERT_ND(lock_entry->mcs_block_);
731  McsRwLock* lock_addr = lock_entry->lock_->get_key_lock();
732  if (lock_entry->preferred_mode_ == kReadLock) {
733  mcs_rw_impl->cancel_async_rw_reader(lock_addr, lock_entry->mcs_block_);
734  } else {
735  ASSERT_ND(lock_entry->preferred_mode_ == kReadLock);
736  mcs_rw_impl->cancel_async_rw_writer(lock_addr, lock_entry->mcs_block_);
737  }
738  lock_entry->mcs_block_ = 0;
739 }
740 
741 template<typename MCS_RW_IMPL>
743  LockListPosition upto_pos,
744  MCS_RW_IMPL* mcs_rw_impl) {
745  ASSERT_ND(upto_pos != kLockListPositionInvalid);
746  ASSERT_ND(upto_pos <= last_active_entry_);
747  for (LockListPosition pos = 1U; pos <= upto_pos; ++pos) {
748  try_async_single_lock(pos, mcs_rw_impl);
749  }
750 }
751 
752 template<typename MCS_RW_IMPL>
753 inline void CurrentLockList::release_all_after(UniversalLockId address, MCS_RW_IMPL* mcs_rw_impl) {
754  // Only this and below logics are implemented here because this needs to know about CLL.
755  // This is not quite about the lock algorithm itself. It's something on higher level.
756  assert_sorted();
757  uint32_t released_read_locks = 0;
758  uint32_t released_write_locks = 0;
759  uint32_t already_released_locks = 0;
760  uint32_t canceled_async_read_locks = 0;
761  uint32_t canceled_async_write_locks = 0;
762 
763  LockListPosition new_last_locked_entry = kLockListPositionInvalid;
764  for (LockEntry* entry = begin(); entry != end(); ++entry) {
765  if (entry->universal_lock_id_ <= address) {
766  if (entry->is_locked()) {
767  new_last_locked_entry = entry - array_;
768  }
769  continue;
770  }
771  if (entry->is_locked()) {
772  if (entry->taken_mode_ == kReadLock) {
773  mcs_rw_impl->release_rw_reader(entry->lock_->get_key_lock(), entry->mcs_block_);
774  ++released_read_locks;
775  } else {
776  ASSERT_ND(entry->taken_mode_ == kWriteLock);
777  mcs_rw_impl->release_rw_writer(entry->lock_->get_key_lock(), entry->mcs_block_);
778  ++released_write_locks;
779  }
780  entry->mcs_block_ = 0;
781  entry->taken_mode_ = kNoLock;
782  } else if (entry->mcs_block_) {
783  // Not locked yet, but we have mcs_block_ set, this means we tried it in
784  // async mode, then still waiting or at least haven't confirmed that we acquired it.
785  // Cancel these "retrieable" locks to which we already pushed our qnode.
786  ASSERT_ND(entry->taken_mode_ == kNoLock);
787  if (entry->preferred_mode_ == kReadLock) {
788  mcs_rw_impl->cancel_async_rw_reader(entry->lock_->get_key_lock(), entry->mcs_block_);
789  ++canceled_async_read_locks;
790  } else {
791  ASSERT_ND(entry->preferred_mode_ == kWriteLock);
792  mcs_rw_impl->cancel_async_rw_writer(entry->lock_->get_key_lock(), entry->mcs_block_);
793  ++canceled_async_write_locks;
794  }
795  entry->mcs_block_ = 0;
796  } else {
797  ASSERT_ND(entry->taken_mode_ == kNoLock);
798  ++already_released_locks;
799  }
800  }
801 
802  last_locked_entry_ = new_last_locked_entry;
804 
805 #ifndef NDEBUG
806  release_all_after_debuglog(
807  released_read_locks,
808  released_write_locks,
809  already_released_locks,
810  canceled_async_read_locks,
811  canceled_async_write_locks);
812 #endif // NDEBUG
813 }
814 template<typename MCS_RW_IMPL>
816  UniversalLockId address,
817  MCS_RW_IMPL* mcs_rw_impl) {
818  if (address == kNullUniversalLockId) {
819  release_all_after<MCS_RW_IMPL>(kNullUniversalLockId, mcs_rw_impl);
820  } else {
821  release_all_after<MCS_RW_IMPL>(address - 1U, mcs_rw_impl);
822  }
823 }
824 
825 template<typename MCS_RW_IMPL>
826 inline void CurrentLockList::giveup_all_after(UniversalLockId address, MCS_RW_IMPL* mcs_rw_impl) {
827  assert_sorted();
828  uint32_t givenup_read_locks = 0;
829  uint32_t givenup_write_locks = 0;
830  uint32_t givenup_upgrades = 0;
831  uint32_t already_enough_locks = 0;
832  uint32_t canceled_async_read_locks = 0;
833  uint32_t canceled_async_write_locks = 0;
834 
835  for (LockEntry* entry = begin(); entry != end(); ++entry) {
836  if (entry->universal_lock_id_ <= address) {
837  continue;
838  }
839  if (entry->preferred_mode_ == kNoLock) {
840  continue;
841  }
842  if (entry->is_enough()) {
843  ++already_enough_locks;
844  continue;
845  }
846 
847  if (entry->is_locked()) {
848  ASSERT_ND(entry->taken_mode_ == kReadLock);
849  ASSERT_ND(entry->preferred_mode_ == kWriteLock);
850  ++givenup_upgrades;
851  entry->preferred_mode_ = entry->taken_mode_;
852  } else if (entry->mcs_block_) {
853  if (entry->preferred_mode_ == kReadLock) {
854  mcs_rw_impl->cancel_async_rw_reader(entry->lock_->get_key_lock(), entry->mcs_block_);
855  ++canceled_async_read_locks;
856  } else {
857  ASSERT_ND(entry->preferred_mode_ == kWriteLock);
858  mcs_rw_impl->cancel_async_rw_writer(entry->lock_->get_key_lock(), entry->mcs_block_);
859  ++canceled_async_write_locks;
860  }
861  entry->mcs_block_ = 0;
862  entry->preferred_mode_ = kNoLock;
863  } else {
864  ASSERT_ND(entry->taken_mode_ == kNoLock);
865  if (entry->preferred_mode_ == kReadLock) {
866  ++givenup_read_locks;
867  } else {
868  ASSERT_ND(entry->preferred_mode_ == kWriteLock);
869  ++givenup_write_locks;
870  }
871  entry->preferred_mode_ = kNoLock;
872  }
873  }
874 
875 #ifndef NDEBUG
876  giveup_all_after_debuglog(
877  givenup_read_locks,
878  givenup_write_locks,
879  givenup_upgrades,
880  already_enough_locks,
881  canceled_async_read_locks,
882  canceled_async_write_locks);
883 #endif // NDEBUG
884 }
885 template<typename MCS_RW_IMPL>
887  UniversalLockId address,
888  MCS_RW_IMPL* mcs_rw_impl) {
889  if (address == kNullUniversalLockId) {
890  giveup_all_after<MCS_RW_IMPL>(kNullUniversalLockId, mcs_rw_impl);
891  } else {
892  giveup_all_after<MCS_RW_IMPL>(address - 1U, mcs_rw_impl);
893  }
894 }
895 
902 template<typename LOCK_LIST, typename LOCK_ENTRY>
904  const LOCK_LIST& list,
905  UniversalLockId lock) {
906  LockListPosition last_active_entry = list.get_last_active_entry();
907  if (last_active_entry == kLockListPositionInvalid) {
908  return kLockListPositionInvalid + 1U;
909  }
910  // Check the easy cases first. This will be an wasted cost if it's not, but still cheap.
911  const LOCK_ENTRY* array = list.get_array();
912  // For example, [dummy, 3, 5, 7] (last_active_entry=3).
913  // id=7: 3, larger: 4, smaller: need to check more
914  if (array[last_active_entry].universal_lock_id_ == lock) {
915  return last_active_entry;
916  } else if (array[last_active_entry].universal_lock_id_ < lock) {
917  return last_active_entry + 1U;
918  }
919 
920  LockListPosition pos
921  = std::lower_bound(
922  array + 1U,
923  array + last_active_entry + 1U,
924  lock,
925  typename LOCK_ENTRY::LessThan())
926  - array;
927  // in the above example, id=6: 3, id=4,5: 2, smaller: 1
929  ASSERT_ND(pos <= last_active_entry); // otherwise we went into the branch above
930  ASSERT_ND(array[pos].universal_lock_id_ >= lock);
931  ASSERT_ND(pos == 1U || array[pos - 1U].universal_lock_id_ < lock);
932  return pos;
933 }
934 
935 template<typename LOCK_LIST, typename LOCK_ENTRY>
937  const LOCK_LIST& list,
938  UniversalLockId lock) {
939  LockListPosition last_active_entry = list.get_last_active_entry();
940  LockListPosition pos = lock_lower_bound<LOCK_LIST, LOCK_ENTRY>(list, lock);
941  if (pos != kLockListPositionInvalid && pos <= last_active_entry) {
942  const LOCK_ENTRY* array = list.get_array();
943  if (array[pos].universal_lock_id_ == lock) {
944  return pos;
945  }
946  }
948 }
949 
950 
951 } // namespace xct
952 } // namespace foedus
953 #endif // FOEDUS_XCT_RETROSPECTIVE_LOCK_LIST_HPP_
const LockEntry * get_entry(LockListPosition pos) const
taken_mode_: we took a read-lock, not write-lock yet.
Definition: xct_id.hpp:105
UniversalLockId universal_lock_id_
Used to order locks in canonical order.
void release_all_at_and_after(UniversalLockId address, MCS_RW_IMPL *mcs_rw_impl)
same as release_all_after(address - 1)
LockListPosition binary_search(UniversalLockId lock) const
Analogous to std::binary_search() for the given lock.
const LockEntry * cbegin() const
void try_async_multiple_locks(LockListPosition upto_pos, MCS_RW_IMPL *mcs_rw_impl)
friend std::ostream & operator<<(std::ostream &o, const LockEntry &v)
Debugging.
void set(UniversalLockId id, RwLockableXctId *lock, LockMode preferred_mode, LockMode taken_mode)
void construct(thread::Thread *context, uint32_t read_lock_threshold)
Fill out this retrospetive lock list for the next run of the given transaction.
Root package of FOEDUS (Fast Optimistic Engine for Data Unification Services).
Definition: assert_nd.hpp:44
Represents a record of write-access during a transaction.
Definition: xct_access.hpp:168
void release_all_after(UniversalLockId address, MCS_RW_IMPL *mcs_rw_impl)
Release all locks in CLL whose addresses are canonically ordered before the parameter.
Represents one thread running on one NUMA core.
Definition: thread.hpp:48
Forward declarations of classes in transaction package.
LockListPosition get_last_active_entry() const
UniversalLockId get_max_locked_id() const
McsBlockIndex mcs_block_
0 means the lock not taken.
const memory::GlobalVolatilePageResolver & get_volatile_page_resolver() const
LockListPosition lower_bound(UniversalLockId lock) const
Analogous to std::lower_bound() for the given lock.
void giveup_all_after(UniversalLockId address, MCS_RW_IMPL *mcs_rw_impl)
This gives-up locks in CLL that are not yet taken.
const LockListPosition kLockListPositionInvalid
Definition: xct_id.hpp:149
An entry in CLL and RLL, representing a lock that is taken or will be taken.
const LockEntry * get_entry(LockListPosition pos) const
bool operator()(const LockEntry &lhs, UniversalLockId rhs) const
McsBlockIndex block_index_
the queue node we pushed.
Definition: xct_id.hpp:170
void prepopulate_for_retrospective_lock_list(const RetrospectiveLockList &rll)
Another batch-insert method used at the beginning of a transaction.
LockListPosition lock_binary_search(const LOCK_LIST &list, UniversalLockId lock)
LockEntry * get_entry(LockListPosition pos)
bool acquired_
whether we immediately acquired the lock or not
Definition: xct_id.hpp:163
taken_mode_: Not taken the lock yet.
Definition: xct_id.hpp:100
uintptr_t UniversalLockId
Universally ordered identifier of each lock.
Definition: xct_id.hpp:134
The MCS reader-writer lock variant of LockableXctId.
Definition: xct_id.hpp:1132
0x0AA1 : "XCTION : Lock acquire failed." .
Definition: error_code.hpp:206
LockListPosition calculate_last_locked_entry_from(LockListPosition from) const
Only searches among entries at or before "from".
bool operator()(UniversalLockId lhs, const LockEntry &rhs) const
0 means no-error.
Definition: error_code.hpp:87
void release_all_locks(MCS_RW_IMPL *mcs_rw_impl)
LockListPosition lock_lower_bound(const LOCK_LIST &list, UniversalLockId lock)
General lower_bound/binary_search logic for any kind of LockList/LockEntry.
friend std::ostream & operator<<(std::ostream &o, const CurrentLockList &v)
Return value of acquire_async_rw.
Definition: xct_id.hpp:161
ErrorCode try_or_acquire_single_lock(LockListPosition pos, MCS_RW_IMPL *mcs_rw_impl)
Methods below take or release locks, so they receive MCS_RW_IMPL, a template param.
bool operator<(const LockEntry &rhs) const
Definitions of IDs in this package and a few related constant values.
McsRwLock * get_key_lock() __attribute__((always_inline))
Definition: xct_id.hpp:1139
void giveup_all_at_and_after(UniversalLockId address, MCS_RW_IMPL *mcs_rw_impl)
bool is_valid_entry(LockListPosition pos) const
uint32_t LockListPosition
Index in a lock-list, either RLL or CLL.
Definition: xct_id.hpp:148
LockListPosition binary_search(UniversalLockId lock) const
Analogous to std::binary_search() for the given lock.
bool is_valid_entry(LockListPosition pos) const
LockListPosition calculate_last_locked_entry() const
Calculate last_locked_entry_ by really checking the whole list.
LockMode preferred_mode_
Whick lock mode we should take according to RLL.
taken_mode_: we took a write-lock.
Definition: xct_id.hpp:110
LockListPosition lower_bound(UniversalLockId lock) const
Analogous to std::lower_bound() for the given lock.
ErrorCode try_or_acquire_multiple_locks(LockListPosition upto_pos, MCS_RW_IMPL *mcs_rw_impl)
Acquire multiple locks up to the given position in canonical order.
LockMode taken_mode_
Whick lock mode we have taken during the current run (of course initially kNoLock) ...
LockEntry * get_entry(LockListPosition pos)
void init(LockEntry *array, uint32_t capacity, const memory::GlobalVolatilePageResolver &resolver)
RwLockableXctId * lock_
Virtual address of the lock.
void try_async_single_lock(LockListPosition pos, MCS_RW_IMPL *mcs_rw_impl)
friend std::ostream & operator<<(std::ostream &o, const RetrospectiveLockList &v)
#define CHECK_ERROR_CODE(x)
This macro calls x and checks its returned error code.
Definition: error_code.hpp:155
uint32_t McsBlockIndex
Index in thread-local MCS block.
Definition: xct_id.hpp:153
void batch_insert_write_placeholders(const WriteXctAccess *write_set, uint32_t write_set_size)
Create entries for all write-sets in one-shot.
void cancel_async_single_lock(LockListPosition pos, MCS_RW_IMPL *mcs_rw_impl)
LockMode
Represents a mode of lock.
Definition: xct_id.hpp:95
bool retry_async_single_lock(LockListPosition pos, MCS_RW_IMPL *mcs_rw_impl)
void init(LockEntry *array, uint32_t capacity, const memory::GlobalVolatilePageResolver &resolver)
const LockEntry * get_array() const
Resolves an offset in a volatile page pool to an actual pointer and vice versa.
Sorted list of all locks, either read-lock or write-lock, taken in the current run.
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
Definition: assert_nd.hpp:72
void assert_sorted() const __attribute__((always_inline))
Forward declarations of classes in thread package.
#define ALWAYS_INLINE
A function suffix to hint that the function should always be inlined.
Definition: compiler.hpp:106
void assert_sorted() const __attribute__((always_inline))
for std::binary_search() etc without creating the object
An MCS reader-writer lock data structure.
Definition: xct_id.hpp:795
ErrorCode
Enum of error codes defined in error_code.xmacro.
Definition: error_code.hpp:85
const UniversalLockId kNullUniversalLockId
This never points to a valid lock, and also evaluates less than any vaild alocks. ...
Definition: xct_id.hpp:137
const memory::GlobalVolatilePageResolver & get_volatile_page_resolver() const
LockListPosition get_last_locked_entry() const
LockListPosition get_or_add_entry(UniversalLockId lock_id, RwLockableXctId *lock, LockMode preferred_mode)
Adds an entry to this list, re-sorting part of the list if necessary to keep the sortedness.
LockListPosition get_last_active_entry() const