Skip to content

Commit

Permalink
Merge pull request #133 from wangtzh/develop
Browse files Browse the repository at this point in the history
Recalculate last_locked_entry by scanning all entries in RLL during lock upgrade. This fixes Issue #132.
  • Loading branch information
hkimura authored Sep 29, 2016
2 parents 6be08b3 + beac063 commit adad8f1
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 8 deletions.
4 changes: 2 additions & 2 deletions experiments-core/src/foedus/tpcc/tpcc_driver.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,8 @@ DEFINE_int64(duration_micro, 10000000, "Duration of benchmark in microseconds.")
// We don't vary many things in this TPCC experiment, so we just represent the configurations
// by just one parameter.
DEFINE_int32(hcc_policy, 1, "Specifies configurations about HCC/MOCC."
" default: 0 (MOCC, RLL-on, threshold 10)"
" 1 (OCC, RLL-off, threshold 256)"
" 0 (MOCC, RLL-on, threshold 10)"
" default: 1 (OCC, RLL-off, threshold 256)"
" 2 (PCC, RLL-off, threshold 0)"
);

Expand Down
10 changes: 7 additions & 3 deletions foedus-core/include/foedus/xct/retrospective_lock_list.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -389,7 +389,7 @@ class CurrentLockList {
return get_entry(last_locked_entry_)->universal_lock_id_;
}
}
/** Calculate last_locked_entry_ by really checking the whole list. Usually for sanity checks */
/** Calculate last_locked_entry_ by really checking the whole list. */
LockListPosition calculate_last_locked_entry() const {
return calculate_last_locked_entry_from(last_active_entry_);
}
Expand Down Expand Up @@ -587,8 +587,12 @@ inline ErrorCode CurrentLockList::try_or_acquire_single_lock(
// We can release any lock anytime.. great flexibility!
mcs_rw_impl->release_rw_reader(lock_addr, lock_entry->mcs_block_);
lock_entry->taken_mode_ = kNoLock;
last_locked_entry_ = calculate_last_locked_entry_from(pos - 1U);
assert_last_locked_entry();
lock_entry->mcs_block_ = 0;
// Calculate last_locked_entry_ by scanning the whole list - during upgrade
// a reader-lock might get released and re-acquired in writer mode, violating
// canonical mode. In these cases last_locked_entry_ should not change unless
// the lock being upgraded is indeed the last entry.
last_locked_entry_ = calculate_last_locked_entry();
} else {
// This method is for unconditional acquire and try, not aync/retry.
// If we have a queue node already, something was misused.
Expand Down
7 changes: 4 additions & 3 deletions foedus-core/src/foedus/xct/retrospective_lock_list.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -260,9 +260,6 @@ void RetrospectiveLockList::construct(thread::Thread* context, uint32_t read_loc
ASSERT_ND(capacity_ >= read_set_size + write_set_size);

last_active_entry_ = kLockListPositionInvalid;
if (read_set_size == 0 && write_set_size == 0) {
return;
}

for (uint32_t i = 0; i < read_set_size; ++i) {
RwLockableXctId* lock = read_set[i].owner_id_address_;
Expand Down Expand Up @@ -294,6 +291,10 @@ void RetrospectiveLockList::construct(thread::Thread* context, uint32_t read_loc
kNoLock);
}

if (last_active_entry_ == kLockListPositionInvalid) {
return;
}

// Now, the entries are not sorted and we might have duplicates.
// Sort them, and merge entries for the same record.
// std::set? no joke. we can't afford heap allocation here.
Expand Down

0 comments on commit adad8f1

Please sign in to comment.