20 #include <glog/logging.h>
36 ASSERT_ND(reinterpret_cast<uintptr_t>(address) % 8 == 0);
40 template <
typename COND>
52 template <
typename ADAPTOR>
58 std::atomic<bool>* me_waiting = adaptor_.me_waiting();
62 ASSERT_ND(adaptor_.get_cur_block() < 0xFFFFU);
66 McsWwBlock* my_block = adaptor_.get_ww_my_block(block_index);
68 me_waiting->store(
true, std::memory_order_release);
72 auto* address = &(mcs_lock->
tail_);
80 if (
UNLIKELY(address->is_guest_relaxed())) {
81 spin_until([address]{
return !address->is_guest_acquire(); });
88 pred.
word_ = assorted::raw_atomic_exchange<uint64_t>(&address->word_, group_tail.
word_);
95 DVLOG(2) <<
"Okay, got a lock uncontended. me=" << id;
96 me_waiting->store(
false, std::memory_order_release);
102 group_tail.
word_ = assorted::raw_atomic_exchange<uint64_t>(&address->word_,
kMcsGuestId);
117 DVLOG(0) <<
"mm, contended, we have to wait.. me=" <<
id <<
" pred=" << predecessor_id;
120 McsWwBlock* pred_block = adaptor_.get_ww_other_block(predecessor_id, predecessor_block);
121 ASSERT_ND(!pred_block->has_successor_atomic());
123 pred_block->set_successor_release(
id, block_index);
127 spin_until([me_waiting]{
return !me_waiting->load(std::memory_order_acquire); });
128 DVLOG(1) <<
"Okay, now I hold the lock. me=" <<
id <<
", ex-pred=" << predecessor_id;
136 template <
typename ADAPTOR>
141 ASSERT_ND(!adaptor_.me_waiting()->load());
142 ASSERT_ND(adaptor_.get_cur_block() < 0xFFFFU);
146 McsWwBlock* my_block = adaptor_.get_ww_my_block(block_index);
150 auto* address = &(mcs_lock->
tail_);
157 bool swapped = assorted::raw_atomic_compare_exchange_weak<uint64_t>(
165 DVLOG(2) <<
"Okay, got a lock uncontended. me=" << id;
169 ASSERT_ND(!adaptor_.me_waiting()->load());
177 ASSERT_ND(!adaptor_.me_waiting()->load());
186 adaptor_.cancel_new_block(block_index);
190 template <
typename ADAPTOR>
197 ASSERT_ND(!adaptor_.me_waiting()->load());
200 ASSERT_ND(adaptor_.get_cur_block() < 0xFFFFU);
203 ASSERT_ND(block_index > 0 && block_index <= 0xFFFFU);
204 McsWwBlock* my_block = adaptor_.get_ww_my_block(block_index);
211 template <
typename ADAPTOR>
218 ASSERT_ND(!adaptor_.me_waiting()->load());
221 ASSERT_ND(adaptor_.get_cur_block() >= block_index);
224 auto* address = &(mcs_lock->
tail_);
225 McsWwBlock* block = adaptor_.get_ww_my_block(block_index);
231 = assorted::raw_atomic_compare_exchange_strong<uint64_t>(
240 ASSERT_ND(address->copy_atomic() != myself);
241 DVLOG(2) <<
"Okay, release a lock uncontended. me=" << id;
246 DVLOG(0) <<
"Interesting contention on MCS release. I thought it's null, but someone has just "
247 " jumped in. me=" <<
id <<
", mcs_lock=" << *mcs_lock;
257 DVLOG(1) <<
"Okay, I have a successor. me=" <<
id <<
", succ=" << successor_id;
259 ASSERT_ND(address->copy_atomic() != myself);
261 ASSERT_ND(adaptor_.other_waiting(successor_id)->load());
264 ASSERT_ND(address->copy_atomic() != myself);
265 adaptor_.other_waiting(successor_id)->store(
false, std::memory_order_release);
266 ASSERT_ND(address->copy_atomic() != myself);
283 return assorted::raw_atomic_compare_exchange_weak<uint64_t>(
288 DVLOG(1) <<
"Okay, now I hold the lock. me=guest";
299 bool swapped = assorted::raw_atomic_compare_exchange_weak<uint64_t>(
306 DVLOG(2) <<
"Okay, got a guest lock uncontended.";
334 return assorted::raw_atomic_compare_exchange_weak<uint64_t>(int_address, &old.
word_, 0);
336 DVLOG(1) <<
"Okay, guest released the lock.";
347 template <
typename ADAPTOR>
357 adaptor_.cancel_new_block(block_index);
368 auto* my_block = adaptor_.get_rw_my_block(block_index);
375 adaptor_.cancel_new_block(block_index);
381 ASSERT_ND(adaptor_.get_cur_block() < 0xFFFFU);
383 const McsBlockIndex block_index = adaptor_.issue_new_block();
386 auto* my_block = adaptor_.get_rw_my_block(block_index);
389 my_block->init_reader();
390 ASSERT_ND(my_block->is_blocked() && my_block->is_reader());
392 ASSERT_ND(my_block->successor_block_index_ == 0);
396 uint32_t* tail_address = &(mcs_rw_lock->
tail_);
397 uint32_t pred_tail_int = assorted::raw_atomic_exchange<uint32_t>(tail_address, tail_desired);
399 if (pred_tail_int == 0) {
404 auto* pred_block = adaptor_.dereference_rw_tail_block(pred_tail_int);
405 uint16_t* pred_state_address = &pred_block->self_.data_;
406 uint16_t pred_state_expected = pred_block->make_blocked_with_no_successor_state();
407 uint16_t pred_state_desired = pred_block->make_blocked_with_reader_successor_state();
408 if (!pred_block->is_reader() || assorted::raw_atomic_compare_exchange_strong<uint16_t>(
410 &pred_state_expected,
411 pred_state_desired)) {
414 pred_block->set_successor_next_only(
id, block_index);
415 spin_until([my_block]{
return my_block->is_granted(); });
420 pred_block->set_successor_next_only(
id, block_index);
424 finalize_acquire_reader_simple(mcs_rw_lock, my_block);
434 ASSERT_ND(adaptor_.get_cur_block() >= block_index);
438 uint32_t* tail_address = &mcs_rw_lock->
tail_;
440 if (my_block->successor_is_ready() ||
441 !assorted::raw_atomic_compare_exchange_strong<uint32_t>(tail_address, &expected, 0)) {
446 spin_until([my_block]{
return my_block->successor_is_ready(); });
447 if (my_block->has_writer_successor()) {
448 assorted::raw_atomic_exchange<thread::ThreadId>(
450 my_block->successor_thread_id_);
457 = assorted::atomic_load_acquire<thread::ThreadId>(&mcs_rw_lock->
next_writer_);
460 assorted::raw_atomic_compare_exchange_strong<thread::ThreadId>(
467 McsBlockIndex next_cur_block = adaptor_.get_other_cur_block(next_writer);
468 McsRwSimpleBlock *writer_block = adaptor_.get_rw_other_block(next_writer, next_cur_block);
479 const McsBlockIndex block_index = adaptor_.issue_new_block();
480 ASSERT_ND(adaptor_.get_cur_block() < 0xFFFFU);
483 auto* my_block = adaptor_.get_rw_my_block(block_index);
485 my_block->init_writer();
486 ASSERT_ND(my_block->is_blocked() && !my_block->is_reader());
488 ASSERT_ND(my_block->successor_block_index_ == 0);
492 uint32_t* tail_address = &(mcs_rw_lock->
tail_);
493 uint32_t pred_tail_int = assorted::raw_atomic_exchange<uint32_t>(tail_address, tail_desired);
494 ASSERT_ND(pred_tail_int != tail_desired);
496 if (pred_tail_int == 0) {
498 assorted::raw_atomic_exchange<thread::ThreadId>(&mcs_rw_lock->
next_writer_, id);
500 old_next_writer = assorted::raw_atomic_exchange<thread::ThreadId>(
503 if (old_next_writer ==
id) {
509 auto* pred_block = adaptor_.dereference_rw_tail_block(pred_tail_int);
510 pred_block->set_successor_class_writer();
511 pred_block->set_successor_next_only(
id, block_index);
513 spin_until([my_block]{
return my_block->is_granted(); });
522 ASSERT_ND(adaptor_.get_cur_block() >= block_index);
523 auto* my_block = adaptor_.get_rw_my_block(block_index);
525 uint32_t* tail_address = &mcs_rw_lock->
tail_;
526 if (my_block->successor_is_ready() ||
527 !assorted::raw_atomic_compare_exchange_strong<uint32_t>(tail_address, &expected, 0)) {
528 if (
UNLIKELY(!my_block->successor_is_ready())) {
529 spin_until([my_block]{
return my_block->successor_is_ready(); });
531 ASSERT_ND(my_block->successor_is_ready());
532 auto* successor_block = adaptor_.get_rw_other_block(
533 my_block->successor_thread_id_,
534 my_block->successor_block_index_);
535 ASSERT_ND(successor_block->is_blocked());
536 if (successor_block->is_reader()) {
539 successor_block->unblock();
548 return {success, block_index};
553 return {success, block_index};
611 auto* my_block = adaptor_.get_rw_my_block(block_index);
612 my_block->init_reader();
615 uint64_t expected = *
reinterpret_cast<uint64_t*
>(&tmp);
619 uint64_t desired = *
reinterpret_cast<uint64_t*
>(&tmp2);
620 if (assorted::raw_atomic_compare_exchange_weak<uint64_t>(
621 reinterpret_cast<uint64_t*>(lock), &expected, desired)) {
623 finalize_acquire_reader_simple(lock, my_block);
630 auto* my_block = adaptor_.get_rw_my_block(block_index);
631 my_block->init_writer();
634 uint64_t expected = *
reinterpret_cast<uint64_t*
>(&tmp);
637 uint64_t desired = *
reinterpret_cast<uint64_t*
>(&tmp2);
639 return assorted::raw_atomic_compare_exchange_weak<uint64_t>(
640 reinterpret_cast<uint64_t*
>(lock), &expected, desired);
657 McsRwSimpleBlock* successor_block = adaptor_.get_rw_other_block(
661 successor_block->unblock();
675 template <
typename ADAPTOR>
685 auto* my_block = adaptor_.get_rw_my_block(block_index);
686 ASSERT_ND(my_block->next_flag_is_granted());
687 ASSERT_ND(my_block->pred_flag_is_granted());
697 auto* my_block = adaptor_.get_rw_my_block(block_index);
698 ASSERT_ND(my_block->next_flag_is_granted());
699 ASSERT_ND(my_block->pred_flag_is_granted());
708 auto* my_block = init_block(&block_index,
true);
711 uint64_t expected = *
reinterpret_cast<uint64_t*
>(&tmp);
714 uint64_t desired = *
reinterpret_cast<uint64_t*
>(&tmp2);
715 my_block->set_flags_granted();
716 if (assorted::raw_atomic_compare_exchange_weak<uint64_t>(
717 reinterpret_cast<uint64_t*>(lock), &expected, desired)) {
721 adaptor_.cancel_new_block(block_index);
763 auto ret = acquire_reader_lock(lock, &block_index, 10);
797 release_reader_lock(lock, block_index);
800 release_writer_lock(lock, block_index);
809 auto* my_block = adaptor_.get_rw_my_block(block_index);
811 ASSERT_ND(my_block->pred_flag_is_granted());
812 ASSERT_ND(my_block->next_flag_is_granted());
815 ASSERT_ND(!my_block->next_flag_is_granted());
827 auto* my_block = adaptor_.get_rw_my_block(block_index);
829 ASSERT_ND(my_block->pred_flag_is_granted());
830 ASSERT_ND(my_block->next_flag_is_granted());
833 ASSERT_ND(!my_block->next_flag_is_granted());
839 auto* block = adaptor_.get_rw_my_block(block_index);
840 if (block->pred_flag_is_granted()) {
842 if (!block->next_flag_is_granted()) {
843 auto ret = finish_acquire_reader_lock(lock, block,
847 ASSERT_ND(block->next_flag_is_granted());
850 ASSERT_ND(!block->next_flag_is_granted());
854 auto* block = adaptor_.get_rw_my_block(block_index);
855 if (block->pred_flag_is_granted()) {
857 if (!block->next_flag_is_granted()) {
858 block->set_next_flag_granted();
859 adaptor_.remove_rw_async_mapping(lock);
861 ASSERT_ND(block->next_flag_is_granted());
864 ASSERT_ND(!block->next_flag_is_granted());
870 if (cancel_reader_lock(lock, my_tail_int) ==
kErrorCodeOk) {
872 release_reader_lock(lock, block_index);
875 release_reader_lock(lock, block_index);
880 if (cancel_writer_lock(lock, my_tail_int) ==
kErrorCodeOk) {
881 release_writer_lock(lock, block_index);
890 if (*out_block_index) {
892 block_index = *out_block_index;
894 block_index = *out_block_index = adaptor_.issue_new_block();
898 ASSERT_ND(adaptor_.get_cur_block() < 0xFFFFU);
899 auto* my_block = adaptor_.get_rw_my_block(block_index);
901 my_block->init_writer();
903 my_block->init_reader();
910 McsRwExtendedBlock* pred_block,
912 McsRwExtendedBlock* my_block) {
914 ASSERT_ND(pred_block->get_next_id() == 0);
915 my_block->set_pred_id(pred);
916 pred_block->set_next_id(me);
920 auto* my_block = init_block(out_block_index,
false);
921 ASSERT_ND(my_block->pred_flag_is_waiting());
922 ASSERT_ND(my_block->next_flag_is_waiting());
923 ASSERT_ND(!my_block->next_flag_is_busy());
927 auto pred = lock->xchg_tail(my_tail_int);
929 lock->increment_nreaders();
931 my_block->set_pred_flag_granted();
932 return finish_acquire_reader_lock(lock, my_block, my_tail_int);
937 auto* pred_block = adaptor_.dereference_rw_tail_block(pred);
938 if (pred_block->is_reader()) {
939 return acquire_reader_lock_check_reader_pred(lock, my_block, my_tail_int, pred, timeout);
941 return acquire_reader_lock_check_writer_pred(lock, my_block, my_tail_int, pred, timeout);
945 McsRwLock* lock, McsRwExtendedBlock* my_block, uint32_t my_tail_int) {
946 my_block->set_next_flag_busy_granted();
947 ASSERT_ND(my_block->next_flag_is_granted());
948 ASSERT_ND(my_block->next_flag_is_busy());
953 if (lock->get_tail_int() == my_tail_int) {
954 my_block->unset_next_flag_busy();
958 spin_until([my_block]{
return my_block->get_next_id() != 0; });
959 uint64_t next = my_block->get_next();
960 uint32_t next_id = next >> 32;
963 ASSERT_ND(my_block->next_flag_is_granted());
964 ASSERT_ND(my_block->next_flag_is_busy());
966 my_block->unset_next_flag_busy();
970 auto* succ_block = adaptor_.dereference_rw_tail_block(next_id);
971 if (succ_block->is_writer()) {
972 my_block->unset_next_flag_busy();
984 my_block->unset_next_flag_busy();
988 if (my_block->next_flag_is_leaving_granted() && !my_block->next_flag_has_successor()) {
992 ASSERT_ND(succ_block->pred_flag_is_waiting());
996 spin_until([succ_block, my_tail_int]{
return succ_block->get_pred_id() == my_tail_int; });
998 lock->increment_nreaders();
999 succ_block->set_pred_flag_granted();
1004 if (my_block->next_flag_has_reader_successor()) {
1006 spin_until([succ_block, my_tail_int]{
return succ_block->get_pred_id() == my_tail_int; });
1008 ASSERT_ND(succ_block->pred_flag_is_waiting());
1009 lock->increment_nreaders();
1010 succ_block->set_pred_flag_granted();
1017 my_block->unset_next_flag_busy();
1022 ErrorCode acquire_reader_lock_check_reader_pred(
1024 McsRwExtendedBlock* my_block,
1025 uint32_t my_tail_int,
1028 auto* pred_block = adaptor_.dereference_rw_tail_block(pred);
1030 ASSERT_ND(my_block->get_pred_id() == 0);
1034 return !pred_block->get_next_id() && !pred_block->next_flag_has_successor(); });
1035 uint32_t expected = pred_block->make_next_flag_waiting_with_no_successor();
1036 uint32_t val = pred_block->cas_val_next_flag_weak(
1037 expected, pred_block->make_next_flag_waiting_with_reader_successor());
1038 if (val == expected) {
1039 link_pred(pred, pred_block, my_tail_int, my_block);
1040 if (my_block->timeout_granted(timeout)) {
1041 return finish_acquire_reader_lock(lock, my_block, my_tail_int);
1046 return cancel_reader_lock(lock, my_tail_int);
1051 link_pred(pred, pred_block, my_tail_int, my_block);
1053 spin_until([my_block, pred]{
return my_block->get_pred_id() != pred; });
1055 pred = my_block->xchg_pred_id(0);
1057 spin_until([my_block]{
return my_block->pred_flag_is_granted(); });
1058 return finish_acquire_reader_lock(lock, my_block, my_tail_int);
1060 ASSERT_ND(!my_block->pred_flag_is_granted());
1063 pred_block = adaptor_.dereference_rw_tail_block(pred);
1064 if (pred_block->is_writer()) {
1065 return acquire_reader_lock_check_writer_pred(lock, my_block, my_tail_int, pred, timeout);
1080 lock->increment_nreaders();
1081 my_block->set_pred_flag_granted();
1082 return finish_acquire_reader_lock(lock, my_block, my_tail_int);
1087 ErrorCode cancel_reader_lock(McsRwLock* lock, uint32_t my_tail_int) {
1088 auto* my_block = adaptor_.dereference_rw_tail_block(my_tail_int);
1089 ASSERT_ND(!my_block->next_flag_is_granted());
1090 auto pred = my_block->xchg_pred_id(0);
1092 spin_until([my_block]{
return my_block->pred_flag_is_granted(); });
1093 return finish_acquire_reader_lock(lock, my_block, my_tail_int);
1097 ASSERT_ND(!my_block->next_flag_is_granted());
1098 my_block->set_next_flag_leaving();
1103 auto* pred_block = adaptor_.dereference_rw_tail_block(pred);
1104 if (pred_block->is_reader()) {
1105 return cancel_reader_lock_with_reader_pred(lock, my_block, my_tail_int, pred);
1107 ASSERT_ND(my_block->get_pred_id() == 0);
1108 return cancel_reader_lock_with_writer_pred(lock, my_block, my_tail_int, pred);
1111 ErrorCode cancel_reader_lock_with_writer_pred(
1112 McsRwLock* lock, McsRwExtendedBlock* my_block, uint32_t my_tail_int, uint32_t pred) {
1114 ASSERT_ND(!my_block->next_flag_is_granted());
1115 ASSERT_ND(my_block->next_flag_is_leaving());
1117 ASSERT_ND(pred >> 16 != adaptor_.get_my_id());
1118 auto* pred_block = adaptor_.dereference_rw_tail_block(pred);
1120 ASSERT_ND(my_block->get_pred_id() == 0);
1123 return pred_block->get_next_id() == my_tail_int &&
1124 pred_block->next_flag_has_reader_successor(); });
1125 ASSERT_ND(pred_block->next_flag_has_reader_successor());
1127 ASSERT_ND(my_block->get_pred_id() == 0);
1129 uint64_t eflags = pred_block->read_next_flags();
1130 if ((eflags & McsRwExtendedBlock::kSuccFlagMask) ==
1134 ASSERT_ND(my_block->get_pred_id() == 0);
1135 my_block->set_pred_id(pred);
1136 pred = my_block->xchg_pred_id(0);
1138 spin_until([my_block]{
return my_block->pred_flag_is_granted(); });
1139 return finish_acquire_reader_lock(lock, my_block, my_tail_int);
1142 ASSERT_ND(!my_block->next_flag_is_granted());
1143 my_block->set_next_flag_leaving();
1146 auto* pred_block = adaptor_.dereference_rw_tail_block(pred);
1147 ASSERT_ND(my_block->get_pred_id() == 0);
1148 if (pred_block->is_reader()) {
1149 return cancel_reader_lock_with_reader_pred(lock, my_block, my_tail_int, pred);
1154 ASSERT_ND(pred_block->next_flag_is_granted());
1155 ASSERT_ND(pred_block->next_flag_is_busy());
1156 my_block->set_pred_id(pred);
1157 spin_until([my_block]{
return my_block->pred_flag_is_granted(); });
1158 return finish_acquire_reader_lock(lock, my_block, my_tail_int);
1161 if (pred_block->cas_next_weak(eflags | (static_cast<uint64_t>(my_tail_int) << 32),
1168 if (my_block->get_next_id() == 0 && lock->cas_tail_weak(my_tail_int, pred)) {
1169 pred_block->set_next_flag_no_successor();
1170 pred_block->set_next_id(0);
1171 ASSERT_ND(!my_block->next_flag_has_successor());
1175 cancel_reader_lock_relink(pred_block, my_block, my_tail_int, pred);
1179 ErrorCode cancel_reader_lock_with_reader_pred(
1180 McsRwLock* lock, McsRwExtendedBlock* my_block, uint32_t my_tail_int, uint32_t pred) {
1182 ASSERT_ND(!my_block->next_flag_is_granted());
1183 ASSERT_ND(my_block->next_flag_is_leaving());
1187 ASSERT_ND(pred >> 16 != adaptor_.get_my_id());
1188 auto* pred_block = adaptor_.dereference_rw_tail_block(pred);
1191 return pred_block->next_flag_has_reader_successor() &&
1192 (pred_block->get_next_id() == my_tail_int ||
1195 uint64_t expected = pred_block->make_next_flag_waiting_with_reader_successor() |
1196 (
static_cast<uint64_t
>(my_tail_int) << 32);
1198 uint64_t desired = pred_block->make_next_flag_waiting_with_reader_successor() |
1200 auto val = pred_block->cas_val_next_weak(expected, desired);
1202 if (val != expected) {
1213 ASSERT_ND(pred_block->next_flag_has_reader_successor());
1214 my_block->set_pred_id(pred);
1216 return finish_acquire_reader_lock(lock, my_block, my_tail_int);
1222 my_block->set_pred_id(pred);
1223 spin_until([my_block, pred]{
return my_block->get_pred_id() != pred; });
1225 pred = my_block->xchg_pred_id(0);
1227 spin_until([my_block]{
return my_block->pred_flag_is_granted(); });
1228 return finish_acquire_reader_lock(lock, my_block, my_tail_int);
1230 pred_block = adaptor_.dereference_rw_tail_block(pred);
1231 ASSERT_ND(!my_block->pred_flag_is_granted());
1233 if (pred_block->is_writer()) {
1234 return cancel_reader_lock_with_writer_pred(lock, my_block, my_tail_int, pred);
1241 ASSERT_ND(my_block->next_flag_is_leaving());
1242 if (!my_block->next_flag_has_successor() && lock->cas_tail_weak(my_tail_int, pred)) {
1245 ASSERT_ND(my_block->get_next_id() == 0);
1246 ASSERT_ND(my_block->next_flag_is_leaving());
1247 ASSERT_ND(!my_block->next_flag_has_successor());
1249 pred_block->set_next_flag_no_successor();
1250 pred_block->set_next_id(0);
1254 cancel_reader_lock_relink(pred_block, my_block, my_tail_int, pred);
1259 void cancel_reader_lock_relink(
1260 McsRwExtendedBlock* pred_block,
1261 McsRwExtendedBlock* my_block,
1262 uint32_t my_tail_int,
1264 spin_until([my_block]{
return my_block->get_next_id() != 0; });
1266 ASSERT_ND(my_block->next_flag_is_leaving());
1267 uint32_t next_id = my_block->get_next_id();
1270 auto* succ_block = adaptor_.dereference_rw_tail_block(next_id);
1273 uint64_t successor = 0;
1274 if (my_block->next_flag_has_reader_successor()) {
1276 (static_cast<uint64_t>(next_id) << 32);
1277 }
else if (my_block->next_flag_has_writer_successor()) {
1279 (static_cast<uint64_t>(next_id) << 32);
1281 ASSERT_ND(pred_block->next_flag_has_reader_successor());
1285 uint64_t expected = 0, new_next = 0;
1286 expected = pred_block->get_next();
1288 if (expected & static_cast<uint64_t>(McsRwExtendedBlock::kSuccFlagBusy)) {
1292 return pred_block->cas_next_weak(expected, new_next);
1297 return succ_block->cas_pred_id_strong(my_tail_int, pred);
1301 ErrorCode acquire_reader_lock_check_writer_pred(
1303 McsRwExtendedBlock* my_block,
1304 uint32_t my_tail_int,
1307 auto* pred_block = adaptor_.dereference_rw_tail_block(pred);
1311 return !pred_block->get_next_id() && !pred_block->next_flag_has_successor(); });
1313 ASSERT_ND(my_block->get_pred_id() == 0);
1314 pred_block->set_next_flag_reader_successor();
1315 pred_block->set_next_id(my_tail_int);
1320 if (my_block->timeout_granted(timeout)) {
1321 return finish_acquire_reader_lock(lock, my_block, my_tail_int);
1326 return cancel_reader_lock(lock, my_tail_int);
1329 void release_reader_lock(McsRwLock* lock,
McsBlockIndex block_index) {
1330 auto id = adaptor_.get_my_id();
1332 auto* my_block = adaptor_.get_rw_other_block(
id, block_index);
1339 ASSERT_ND(my_block->next_flag_is_granted());
1340 my_block->set_next_flag_busy();
1344 uint32_t next_id = my_block->get_next_id();
1345 while (next_id == 0) {
1346 if (lock->cas_tail_weak(my_tail_int, 0)) {
1347 finish_release_reader_lock(lock);
1349 my_block->mark_released();
1353 next_id = my_block->get_next_id();
1359 ASSERT_ND(my_block->next_flag_has_successor());
1360 if (my_block->next_flag_has_writer_successor()) {
1361 auto* succ_block = adaptor_.dereference_rw_tail_block(next_id);
1362 ASSERT_ND(!succ_block->pred_flag_is_granted());
1364 ASSERT_ND(my_block->next_flag_has_writer_successor());
1367 auto next_writer_id = next_id >> 16;
1368 lock->set_next_writer(next_writer_id);
1369 ASSERT_ND(adaptor_.get_rw_other_async_block(next_writer_id, lock));
1372 return succ_block->cas_pred_id_strong(my_tail_int, 0);
1376 finish_release_reader_lock(lock);
1378 my_block->mark_released();
1382 void finish_release_reader_lock(McsRwLock* lock) {
1383 if (lock->decrement_nreaders() > 1) {
1386 auto next_writer_id = lock->get_next_writer();
1387 ASSERT_ND(next_writer_id != adaptor_.get_my_id());
1389 lock->nreaders() == 0 &&
1391 auto* wb = adaptor_.get_rw_other_async_block(next_writer_id, lock);
1397 wb->set_pred_flag_granted();
1402 McsRwLock* lock,
McsBlockIndex* out_block_index, int32_t timeout) {
1403 auto* my_block = init_block(out_block_index,
true);
1405 auto id = adaptor_.get_my_id();
1408 adaptor_.add_rw_async_mapping(lock, *out_block_index);
1409 auto pred = lock->xchg_tail(my_tail_int);
1412 lock->set_next_writer(
id);
1413 if (lock->nreaders() == 0) {
1415 my_block->set_flags_granted();
1418 ASSERT_ND(my_block->next_flag_is_granted());
1419 adaptor_.remove_rw_async_mapping(lock);
1424 auto* pred_block = adaptor_.dereference_rw_tail_block(pred);
1426 return !pred_block->next_flag_has_successor() && !pred_block->get_next_id(); });
1429 pred_block->set_next_flag_writer_successor();
1430 pred_block->set_next_id(my_tail_int);
1437 if (my_block->timeout_granted(timeout)) {
1438 my_block->set_next_flag_granted();
1439 adaptor_.remove_rw_async_mapping(lock);
1442 ASSERT_ND(my_block->next_flag_is_granted());
1448 return cancel_writer_lock(lock, my_tail_int);
1451 void release_writer_lock(McsRwLock* lock,
McsBlockIndex block_index) {
1452 auto id = adaptor_.get_my_id();
1454 auto* my_block = adaptor_.get_rw_my_block(block_index);
1456 ASSERT_ND(my_block->next_flag_is_granted());
1459 ASSERT_ND(my_block->pred_flag_is_granted());
1460 ASSERT_ND(my_block->next_flag_is_granted());
1461 my_block->set_next_flag_busy();
1464 ASSERT_ND(my_block->pred_flag_is_granted());
1465 ASSERT_ND(my_block->next_flag_is_granted());
1466 ASSERT_ND(my_block->next_flag_is_busy());
1469 uint32_t next_id = my_block->get_next_id();
1470 while (next_id == 0) {
1472 if (lock->cas_tail_weak(my_tail_int, 0)) {
1475 next_id = my_block->get_next_id();
1478 ASSERT_ND(my_block->next_flag_has_successor());
1482 auto* succ_block = adaptor_.dereference_rw_tail_block(next_id);
1484 ASSERT_ND(!succ_block->pred_flag_is_granted());
1488 ASSERT_ND(my_block->get_next_id() == next_id);
1490 if (succ_block->is_reader()) {
1491 lock->increment_nreaders();
1493 succ_block->set_pred_flag_granted();
1496 ErrorCode cancel_writer_lock(McsRwLock* lock, uint32_t my_tail_int) {
1498 auto* my_block = adaptor_.dereference_rw_tail_block(my_tail_int);
1499 auto pred = my_block->xchg_pred_id(0);
1505 spin_until([my_block]{
return my_block->pred_flag_is_granted(); });
1506 my_block->set_next_flag_granted();
1507 adaptor_.remove_rw_async_mapping(lock);
1513 my_block->set_next_flag_leaving();
1514 ASSERT_ND(!my_block->next_flag_is_granted());
1522 return cancel_writer_lock_no_pred(lock, my_block, my_tail_int);
1525 auto* pred_block = adaptor_.dereference_rw_tail_block(pred);
1530 return pred_block->get_next_id() == my_tail_int &&
1531 pred_block->next_flag_has_writer_successor(); });
1533 uint64_t eflags = pred_block->read_next_flags();
1535 ASSERT_ND(my_block->get_pred_id() == 0);
1540 my_block->set_pred_id(pred);
1542 }
else if (eflags & static_cast<uint64_t>(McsRwExtendedBlock::kSuccFlagBusy)) {
1545 if (pred_block->is_writer()) {
1546 ASSERT_ND(pred_block->get_next_id() == my_tail_int);
1547 my_block->set_pred_id(pred);
1548 spin_until([my_block]{
return my_block->pred_flag_is_granted(); });
1550 my_block->set_next_flag_granted();
1551 adaptor_.remove_rw_async_mapping(lock);
1556 my_block->set_pred_id(pred);
1557 pred = my_block->xchg_pred_id(0);
1559 return cancel_writer_lock_no_pred(lock, my_block, my_tail_int);
1561 spin_until([my_block]{
return my_block->pred_flag_is_granted(); });
1562 my_block->set_next_flag_granted();
1563 adaptor_.remove_rw_async_mapping(lock);
1567 pred_block = adaptor_.dereference_rw_tail_block(pred);
1570 ASSERT_ND(pred_block->get_next_id() == my_tail_int);
1571 uint64_t desired = eflags |
1573 uint64_t expected = eflags | (
static_cast<uint64_t
>(my_tail_int) << 32);
1576 auto val = pred_block->cas_val_next_weak(expected, desired);
1577 if (val == expected) {
1584 if (!my_block->get_next_id() && lock->cas_tail_weak(my_tail_int, pred)) {
1585 pred_block->set_next_flag_no_successor();
1586 pred_block->set_next_id(0);
1587 adaptor_.remove_rw_async_mapping(lock);
1590 spin_until([my_block]{
return my_block->get_next_id() != 0; });
1592 ASSERT_ND(my_block->next_flag_is_leaving());
1593 uint32_t new_next_id = my_block->get_next_id();
1596 auto* succ_block = adaptor_.dereference_rw_tail_block(new_next_id);
1598 uint64_t successor = 0;
1599 if (my_block->next_flag_has_reader_successor()) {
1601 (static_cast<uint64_t>(new_next_id) << 32);
1602 }
else if (my_block->next_flag_has_writer_successor()) {
1604 (static_cast<uint64_t>(new_next_id) << 32);
1606 ASSERT_ND(pred_block->next_flag_has_writer_successor());
1611 uint64_t expected = 0, new_next = 0;
1612 expected = pred_block->get_next();
1614 bool wakeup =
false;
1616 if (pred_block->is_reader() && succ_block->is_reader()) {
1630 if (expected & McsRwExtendedBlock::kSuccFlagBusy) {
1633 if (!pred_block->cas_next_weak(expected, new_next)) {
1642 ASSERT_ND(succ_block->pred_flag_is_waiting());
1643 lock->increment_nreaders();
1644 succ_block->set_pred_flag_granted();
1650 return succ_block->cas_pred_id_strong(my_tail_int, pred);
1653 adaptor_.remove_rw_async_mapping(lock);
1658 McsRwLock* lock, McsRwExtendedBlock* my_block, uint32_t my_tail_int) {
1661 !my_block->pred_flag_is_waiting(); });
1662 if (my_block->pred_flag_is_granted() ||
1665 spin_until([my_block]{
return my_block->pred_flag_is_granted(); });
1666 my_block->set_next_flag_granted();
1667 adaptor_.remove_rw_async_mapping(lock);
1672 if (my_block->get_next_id() == 0 && lock->cas_tail_weak(my_tail_int, 0)) {
1673 adaptor_.remove_rw_async_mapping(lock);
1677 spin_until([my_block]{
return my_block->get_next_id() != 0; });
1678 auto next_id = my_block->get_next_id();
1682 auto* succ_block = adaptor_.dereference_rw_tail_block(next_id);
1683 ASSERT_ND(succ_block->pred_flag_is_waiting());
1684 if (succ_block->is_writer()) {
1685 ASSERT_ND(my_block->next_flag_has_writer_successor());
1688 lock->set_next_writer(next_id >> 16);
1690 return succ_block->cas_pred_id_strong(my_tail_int, 0);
1692 if (lock->nreaders() == 0) {
1698 succ_block->set_pred_flag_granted();
1703 ASSERT_ND(my_block->next_flag_has_reader_successor());
1707 lock->increment_nreaders();
1708 succ_block->set_pred_flag_granted();
1710 adaptor_.remove_rw_async_mapping(lock);
1722 template class McsWwImpl< McsMockAdaptor<McsRwSimpleBlock> >;
1723 template class McsWwImpl< thread::ThreadPimplMcsAdaptor<McsRwSimpleBlock> >;
1724 template class McsWwImpl< McsMockAdaptor<McsRwExtendedBlock> >;
1725 template class McsWwImpl< thread::ThreadPimplMcsAdaptor<McsRwExtendedBlock> >;
1727 template class McsImpl< McsMockAdaptor<McsRwSimpleBlock> , McsRwSimpleBlock>;
1728 template class McsImpl< McsMockAdaptor<McsRwExtendedBlock> , McsRwExtendedBlock>;
1729 template class McsImpl< thread::ThreadPimplMcsAdaptor<McsRwSimpleBlock> , McsRwSimpleBlock>;
1730 template class McsImpl< thread::ThreadPimplMcsAdaptor<McsRwExtendedBlock> , McsRwExtendedBlock>;
void reset_release() __attribute__((always_inline))
void reset_guest_id_release()
void cancel_async_rw_writer(McsRwLock *lock, McsBlockIndex block_index)
void release_rw_reader(McsRwLock *mcs_rw_lock, McsBlockIndex block_index)
bool retry_async_rw_reader(McsRwLock *lock, McsBlockIndex block_index)
McsBlockIndex initial(McsWwLock *lock)
[WW] This doesn't use any atomic operation.
static void ownerless_release(McsWwLock *lock)
void cancel_async_rw_writer(McsRwLock *, McsBlockIndex)
static uint32_t to_tail_int(thread::ThreadId tail_waiter, McsBlockIndex tail_waiter_block)
Exclusive-only (WW) MCS lock classes.
0x0AA3 : "XCTION : Lock acquire requested." .
Root package of FOEDUS (Fast Optimistic Engine for Data Unification Services).
thread::ThreadId get_tail_waiter() const __attribute__((always_inline))
This is a "relaxed" check.
uint32_t get_successor_thread_id_relaxed() const __attribute__((always_inline))
Carefully use this! In some places you must call copy_once() then call this on the copy...
uint64_t spin_until(COND spin_until_cond)
Spin locally until the given condition returns true.
static const uint32_t kSuccFlagBusy
bool successor_is_ready()
static const int32_t kTimeoutZero
static bool does_support_try_rw_reader()
Reader-writer (RW) MCS lock classes.
void release(McsWwLock *lock, McsBlockIndex block_index)
[WW] Unlcok an MCS lock acquired by this thread.
bool is_blocked() __attribute__((always_inline))
void increment_nreaders()
bool is_guest_relaxed() const __attribute__((always_inline))
void release_rw_reader(McsRwLock *lock, McsBlockIndex block_index)
bool retry_async_rw_writer(McsRwLock *lock, McsBlockIndex block_index)
uint32_t get_thread_id_relaxed() const __attribute__((always_inline))
Carefully use this! In some places you must call get_word_once() then call this on the copy...
void cancel_async_rw_reader(McsRwLock *, McsBlockIndex)
void unblock() __attribute__((always_inline))
AcquireAsyncRet acquire_async_rw_reader(McsRwLock *lock)
Async acquire methods, passing timeout 0 will avoid cancelling upon timeout in the internal rountines...
static const uint32_t kSuccFlagSuccessorReader
static void ownerless_initial(McsWwLock *lock)
bool retry_async_rw_reader(McsRwLock *lock, McsBlockIndex block_index)
[RW] Returns whether the lock requeust is now granted.
void clear_successor_release() __attribute__((always_inline))
thread::ThreadId successor_thread_id_
thread::ThreadId next_writer_
bool is_valid_relaxed() const __attribute__((always_inline))
McsBlockIndex acquire_unconditional_rw_reader(McsRwLock *lock)
thread::ThreadId get_next_writer()
Return value of acquire_async_rw.
Pre-allocated MCS block for WW-locks.
Definitions of IDs in this package and a few related constant values.
bool has_successor_acquire() const __attribute__((always_inline))
An exclusive-only (WW) MCS lock data structure.
Implements an MCS-locking Algorithm.
AcquireAsyncRet acquire_async_rw_writer(McsRwLock *lock)
AcquireAsyncRet acquire_async_rw_writer(McsRwLock *lock)
Pre-allocated MCS block for extended version of RW-locks.
static const uint32_t kPredIdAcquired
static const uint32_t kSuccIdSuccessorLeaving
static bool ownerless_acquire_try(McsWwLock *lock)
[WW-Guest] Try to take an exclusive guest lock on the given MCSg lock.
McsBlockIndex acquire_unconditional(McsWwLock *lock)
[WW] Unconditionally takes exclusive-only MCS lock on the given lock.
static const uint32_t kSuccFlagSuccessorWriter
static const uint32_t kSuccFlagMask
bool is_reader() __attribute__((always_inline))
const uint64_t kMcsGuestId
A special value meaning the lock is held by a non-regular guest that doesn't have a context...
void spin_until(COND spin_until_cond)
bool retry_async_rw_reader(McsRwLock *lock, McsBlockIndex block_index)
void release_rw_writer(McsRwLock *lock, McsBlockIndex block_index)
McsBlockIndex acquire_unconditional_rw_reader(McsRwLock *mcs_rw_lock)
static const int32_t kTimeoutNever
McsBlockIndex acquire_try(McsWwLock *lock)
[WW] Try to take an exclusive lock.
static const uint32_t kSuccFlagLeavingGranted
static const uint32_t kSuccFlagDirectGranted
bool is_locked() const
This is a "relaxed" check.
McsBlockIndex acquire_try_rw_writer(McsRwLock *lock)
Instant-try versions, won't leave node in the queue if failed.
void cancel_async_rw_reader(McsRwLock *lock, McsBlockIndex block_index)
McsBlockIndex acquire_try_rw_writer(McsRwLock *lock)
uint32_t McsBlockIndex
Index in thread-local MCS block.
static const uint32_t kSuccFlagLeaving
uint16_t ThreadId
Typedef for a global ID of Thread (core), which is unique across NUMA nodes.
static bool does_support_try_rw_reader()
Atomic fence methods and load/store with fences that work for both C++11/non-C++11 code...
McsBlockIndex acquire_unconditional_rw_writer(McsRwLock *mcs_rw_lock)
void assert_mcs_aligned(const void *address)
static const thread::ThreadId kNextWriterNone
bool retry_async_rw_writer(McsRwLock *lock, McsBlockIndex block_index)
McsBlockIndex acquire_unconditional_rw_writer(McsRwLock *lock)
static const uint32_t kSuccIdNoSuccessor
void yield_if_valgrind()
Use this in your while as a stop-gap before switching to spin_until().
McsBlockIndex get_block_relaxed() const __attribute__((always_inline))
Carefully use this! In some places you must call get_word_once() then call this on the copy...
#define UNLIKELY(x)
Hints that x is highly likely false.
#define ASSERT_ND(x)
A warning-free wrapper macro of assert() that has no performance effect in release mode even when 'x'...
bool retry_async_rw_writer(McsRwLock *lock, McsBlockIndex block_index)
uint64_t word_
The high 32-bits is thread_id, the low 32-bit is block-index.
McsBlockIndex acquire_try_rw_reader(McsRwLock *lock)
McsBlockIndex successor_block_index_
static void ownerless_acquire_unconditional(McsWwLock *lock)
[WW-Guest] Unconditionally takes exclusive-only guest lock on the given MCSg lock.
bool has_reader_successor()
AcquireAsyncRet acquire_async_rw_reader(McsRwLock *lock)
An MCS reader-writer lock data structure.
ErrorCode
Enum of error codes defined in error_code.xmacro.
0x0AA2 : "XCTION : Lock acquire cancelled." .
void release_rw_writer(McsRwLock *mcs_rw_lock, McsBlockIndex block_index)
McsBlockIndex acquire_try_rw_reader(McsRwLock *lock)
static const uint32_t kSuccFlagSuccessorClassMask
uint16_t decrement_nreaders()