diff --git a/src/yb/client/backup-txn-test.cc b/src/yb/client/backup-txn-test.cc index c78fd479e6e6..6fdac177c353 100644 --- a/src/yb/client/backup-txn-test.cc +++ b/src/yb/client/backup-txn-test.cc @@ -205,6 +205,41 @@ TEST_F(BackupTxnTest, PointInTimeRestore) { ASSERT_NO_FATALS(VerifyData(/* num_transactions=*/ 1, WriteOpType::INSERT)); } +// This test writes a lot of update to the same key. +// Then takes snapshot and restores it to time before the write. +// So we test how filtering iterator works in case where a lot of record should be skipped. +TEST_F(BackupTxnTest, PointInTimeBigSkipRestore) { + constexpr int kNumWrites = RegularBuildVsSanitizers(100000, 100); + constexpr int kKey = 123; + + std::vector> futures; + auto session = CreateSession(); + ASSERT_OK(WriteRow(session, kKey, 0)); + auto hybrid_time = cluster_->mini_tablet_server(0)->server()->Clock()->Now(); + for (size_t r = 1; r <= kNumWrites; ++r) { + ASSERT_OK(WriteRow(session, kKey, r, WriteOpType::INSERT, client::Flush::kFalse)); + futures.push_back(session->FlushFuture()); + } + + int good = 0; + for (auto& future : futures) { + if (future.get().ok()) { + ++good; + } + } + + LOG(INFO) << "Total good: " << good; + + auto snapshot_id = ASSERT_RESULT(CreateSnapshot()); + ASSERT_OK(VerifySnapshot(snapshot_id, SysSnapshotEntryPB::COMPLETE)); + + ASSERT_OK(RestoreSnapshot(snapshot_id, hybrid_time)); + + auto value = ASSERT_RESULT(SelectRow(session, kKey)); + ASSERT_EQ(value, 0); +} + + TEST_F(BackupTxnTest, Persistence) { LOG(INFO) << "Write data"; diff --git a/src/yb/rocksdb/table/filtering_iterator.h b/src/yb/rocksdb/table/filtering_iterator.h index 4d7a7b207560..5e7669e53aa7 100644 --- a/src/yb/rocksdb/table/filtering_iterator.h +++ b/src/yb/rocksdb/table/filtering_iterator.h @@ -103,9 +103,9 @@ class FilteringIterator : public InternalIterator { break; } if (!backward) { - Next(); + iterator_->Next(); } else { - Prev(); + iterator_->Prev(); } } }