Skip to content

Commit

Permalink
*: some misc modify for testify (tikv#5299)
Browse files Browse the repository at this point in the history
ref tikv#4813

Signed-off-by: lhy1024 <admin@liudos.us>
  • Loading branch information
lhy1024 authored and CabinfeverB committed Jul 14, 2022
1 parent 1bc32ec commit 8b680a8
Show file tree
Hide file tree
Showing 20 changed files with 172 additions and 194 deletions.
2 changes: 1 addition & 1 deletion pkg/assertutil/assertutil_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,5 +31,5 @@ func TestNilFail(t *testing.T) {
}
re.Nil(checker.IsNil)
checker.AssertNil(nil)
re.NotNil(failErr)
re.Error(failErr)
}
28 changes: 3 additions & 25 deletions pkg/testutil/operator_check.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import (
// CheckTransferLeader checks if the operator is to transfer leader between the specified source and target stores.
func CheckTransferLeader(re *require.Assertions, op *operator.Operator, kind operator.OpKind, sourceID, targetID uint64) {
re.NotNil(op)
re.Equal(op.Len(), 1)
re.Equal(1, op.Len())
re.Equal(operator.TransferLeader{FromStore: sourceID, ToStore: targetID}, op.Step(0))
kind |= operator.OpLeader
re.Equal(kind, op.Kind()&kind)
Expand All @@ -31,7 +31,7 @@ func CheckTransferLeader(re *require.Assertions, op *operator.Operator, kind ope
// CheckTransferLeaderFrom checks if the operator is to transfer leader out of the specified store.
func CheckTransferLeaderFrom(re *require.Assertions, op *operator.Operator, kind operator.OpKind, sourceID uint64) {
re.NotNil(op)
re.Equal(op.Len(), 1)
re.Equal(1, op.Len())
re.Equal(sourceID, op.Step(0).(operator.TransferLeader).FromStore)
kind |= operator.OpLeader
re.Equal(kind, op.Kind()&kind)
Expand All @@ -40,7 +40,7 @@ func CheckTransferLeaderFrom(re *require.Assertions, op *operator.Operator, kind
// CheckMultiTargetTransferLeader checks if the operator is to transfer leader from the specified source to one of the target stores.
func CheckMultiTargetTransferLeader(re *require.Assertions, op *operator.Operator, kind operator.OpKind, sourceID uint64, targetIDs []uint64) {
re.NotNil(op)
re.Equal(op.Len(), 1)
re.Equal(1, op.Len())
expectedOps := make([]interface{}, 0, len(targetIDs))
for _, targetID := range targetIDs {
expectedOps = append(expectedOps, operator.TransferLeader{FromStore: sourceID, ToStore: targetID, ToStores: targetIDs})
Expand Down Expand Up @@ -138,28 +138,6 @@ func CheckRemovePeer(re *require.Assertions, op *operator.Operator, storeID uint
}
}

// CheckTransferLeaderWithTestify checks if the operator is to transfer leader between the specified source and target stores.
func CheckTransferLeaderWithTestify(re *require.Assertions, op *operator.Operator, kind operator.OpKind, sourceID, targetID uint64) {
re.NotNil(op)
re.Equal(1, op.Len())
re.Equal(operator.TransferLeader{FromStore: sourceID, ToStore: targetID}, op.Step(0))
kind |= operator.OpLeader
re.Equal(kind, op.Kind()&kind)
}

// CheckTransferPeerWithTestify checks if the operator is to transfer peer between the specified source and target stores.
func CheckTransferPeerWithTestify(re *require.Assertions, op *operator.Operator, kind operator.OpKind, sourceID, targetID uint64) {
re.NotNil(op)

steps, _ := trimTransferLeaders(op)
re.Len(steps, 3)
re.Equal(targetID, steps[0].(operator.AddLearner).ToStore)
re.IsType(operator.PromoteLearner{}, steps[1])
re.Equal(sourceID, steps[2].(operator.RemovePeer).FromStore)
kind |= operator.OpRegion
re.Equal(kind, op.Kind()&kind)
}

// CheckSteps checks if the operator matches the given steps.
func CheckSteps(re *require.Assertions, op *operator.Operator, steps []operator.OpStep) {
re.NotZero(op.Kind() & operator.OpMerge)
Expand Down
8 changes: 4 additions & 4 deletions server/api/checker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ func (suite *checkerTestSuite) TearDownSuite() {
func (suite *checkerTestSuite) TestAPI() {
suite.testErrCases()

cases := []struct {
testCases := []struct {
name string
}{
{name: "learner"},
Expand All @@ -67,9 +67,9 @@ func (suite *checkerTestSuite) TestAPI() {
{name: "merge"},
{name: "joint-state"},
}
for _, ca := range cases {
suite.testGetStatus(ca.name)
suite.testPauseOrResume(ca.name)
for _, testCase := range testCases {
suite.testGetStatus(testCase.name)
suite.testPauseOrResume(testCase.name)
}
}

Expand Down
42 changes: 21 additions & 21 deletions server/api/scheduler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ func (suite *scheduleTestSuite) TestAPI() {
opt string
value interface{}
}
cases := []struct {
testCases := []struct {
name string
createdName string
args []arg
Expand Down Expand Up @@ -321,25 +321,25 @@ func (suite *scheduleTestSuite) TestAPI() {
},
},
}
for _, ca := range cases {
for _, testCase := range testCases {
input := make(map[string]interface{})
input["name"] = ca.name
for _, a := range ca.args {
input["name"] = testCase.name
for _, a := range testCase.args {
input[a.opt] = a.value
}
body, err := json.Marshal(input)
suite.NoError(err)
suite.testPauseOrResume(ca.name, ca.createdName, body)
suite.testPauseOrResume(testCase.name, testCase.createdName, body)
}

// test pause and resume all schedulers.

// add schedulers.
cases = cases[:3]
for _, ca := range cases {
testCases = testCases[:3]
for _, testCase := range testCases {
input := make(map[string]interface{})
input["name"] = ca.name
for _, a := range ca.args {
input["name"] = testCase.name
for _, a := range testCase.args {
input[a.opt] = a.value
}
body, err := json.Marshal(input)
Expand All @@ -355,10 +355,10 @@ func (suite *scheduleTestSuite) TestAPI() {
err = tu.CheckPostJSON(testDialClient, suite.urlPrefix+"/all", pauseArgs, tu.StatusOK(re))
suite.NoError(err)
handler := suite.svr.GetHandler()
for _, ca := range cases {
createdName := ca.createdName
for _, testCase := range testCases {
createdName := testCase.createdName
if createdName == "" {
createdName = ca.name
createdName = testCase.name
}
isPaused, err := handler.IsSchedulerPaused(createdName)
suite.NoError(err)
Expand All @@ -370,10 +370,10 @@ func (suite *scheduleTestSuite) TestAPI() {
err = tu.CheckPostJSON(testDialClient, suite.urlPrefix+"/all", pauseArgs, tu.StatusOK(re))
suite.NoError(err)
time.Sleep(time.Second)
for _, ca := range cases {
createdName := ca.createdName
for _, testCase := range testCases {
createdName := testCase.createdName
if createdName == "" {
createdName = ca.name
createdName = testCase.name
}
isPaused, err := handler.IsSchedulerPaused(createdName)
suite.NoError(err)
Expand All @@ -391,21 +391,21 @@ func (suite *scheduleTestSuite) TestAPI() {
suite.NoError(err)
err = tu.CheckPostJSON(testDialClient, suite.urlPrefix+"/all", pauseArgs, tu.StatusOK(re))
suite.NoError(err)
for _, ca := range cases {
createdName := ca.createdName
for _, testCase := range testCases {
createdName := testCase.createdName
if createdName == "" {
createdName = ca.name
createdName = testCase.name
}
isPaused, err := handler.IsSchedulerPaused(createdName)
suite.NoError(err)
suite.False(isPaused)
}

// delete schedulers.
for _, ca := range cases {
createdName := ca.createdName
for _, testCase := range testCases {
createdName := testCase.createdName
if createdName == "" {
createdName = ca.name
createdName = testCase.name
}
suite.deleteScheduler(createdName)
}
Expand Down
10 changes: 5 additions & 5 deletions server/cluster/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ func TestStoreHeartbeat(t *testing.T) {
re.NoError(cluster.HandleStoreHeartbeat(coldHeartBeat))
time.Sleep(20 * time.Millisecond)
storeStats = cluster.hotStat.RegionStats(statistics.Read, 1)
re.Len(storeStats[1], 0)
re.Empty(storeStats[1])
// After hot heartbeat, we can find region 1 peer again
re.NoError(cluster.HandleStoreHeartbeat(hotHeartBeat))
time.Sleep(20 * time.Millisecond)
Expand All @@ -150,14 +150,14 @@ func TestStoreHeartbeat(t *testing.T) {
re.NoError(cluster.HandleStoreHeartbeat(coldHeartBeat))
time.Sleep(20 * time.Millisecond)
storeStats = cluster.hotStat.RegionStats(statistics.Read, 0)
re.Len(storeStats[1], 0)
re.Empty(storeStats[1])
re.Nil(cluster.HandleStoreHeartbeat(hotHeartBeat))
time.Sleep(20 * time.Millisecond)
storeStats = cluster.hotStat.RegionStats(statistics.Read, 1)
re.Len(storeStats[1], 1)
re.Equal(uint64(1), storeStats[1][0].RegionID)
storeStats = cluster.hotStat.RegionStats(statistics.Read, 3)
re.Len(storeStats[1], 0)
re.Empty(storeStats[1])
// after 2 hot heartbeats, wo can find region 1 peer again
re.NoError(cluster.HandleStoreHeartbeat(hotHeartBeat))
re.NoError(cluster.HandleStoreHeartbeat(hotHeartBeat))
Expand Down Expand Up @@ -614,7 +614,7 @@ func TestRegionHeartbeatHotStat(t *testing.T) {
time.Sleep(1 * time.Second)
stats = cluster.hotStat.RegionStats(statistics.Write, 0)
re.Len(stats[1], 1)
re.Len(stats[2], 0)
re.Empty(stats[2])
re.Len(stats[3], 1)
re.Len(stats[4], 1)
}
Expand Down Expand Up @@ -675,7 +675,7 @@ func TestBucketHeartbeat(t *testing.T) {
newRegion2 := regions[1].Clone(core.WithIncConfVer(), core.SetBuckets(nil))
re.NoError(cluster.processRegionHeartbeat(newRegion2))
re.Nil(cluster.GetRegion(uint64(1)).GetBuckets())
re.Len(cluster.GetRegion(uint64(1)).GetBuckets().GetKeys(), 0)
re.Empty(cluster.GetRegion(uint64(1)).GetBuckets().GetKeys())
}

func TestRegionHeartbeat(t *testing.T) {
Expand Down
26 changes: 13 additions & 13 deletions server/cluster/coordinator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -200,10 +200,10 @@ func TestDispatch(t *testing.T) {

// Wait for schedule and turn off balance.
waitOperator(re, co, 1)
testutil.CheckTransferPeerWithTestify(re, co.opController.GetOperator(1), operator.OpKind(0), 4, 1)
testutil.CheckTransferPeer(re, co.opController.GetOperator(1), operator.OpKind(0), 4, 1)
re.NoError(co.removeScheduler(schedulers.BalanceRegionName))
waitOperator(re, co, 2)
testutil.CheckTransferLeaderWithTestify(re, co.opController.GetOperator(2), operator.OpKind(0), 4, 2)
testutil.CheckTransferLeader(re, co.opController.GetOperator(2), operator.OpKind(0), 4, 2)
re.NoError(co.removeScheduler(schedulers.BalanceLeaderName))

stream := mockhbstream.NewHeartbeatStream()
Expand Down Expand Up @@ -502,7 +502,7 @@ func TestCheckCache(t *testing.T) {
co.patrolRegions()
oc := co.opController
re.Len(oc.GetOperators(), 1)
re.Len(co.checkers.GetWaitingRegions(), 0)
re.Empty(co.checkers.GetWaitingRegions())

// case 2: operator cannot be created due to store limit restriction
oc.RemoveOperator(oc.GetOperator(1))
Expand All @@ -517,7 +517,7 @@ func TestCheckCache(t *testing.T) {
co.wg.Add(1)
co.patrolRegions()
re.Len(oc.GetOperators(), 1)
re.Len(co.checkers.GetWaitingRegions(), 0)
re.Empty(co.checkers.GetWaitingRegions())

co.wg.Wait()
re.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/break-patrol"))
Expand All @@ -540,7 +540,7 @@ func TestPeerState(t *testing.T) {

// Wait for schedule.
waitOperator(re, co, 1)
testutil.CheckTransferPeerWithTestify(re, co.opController.GetOperator(1), operator.OpKind(0), 4, 1)
testutil.CheckTransferPeer(re, co.opController.GetOperator(1), operator.OpKind(0), 4, 1)

region := tc.GetRegion(1).Clone()

Expand Down Expand Up @@ -671,7 +671,7 @@ func TestAddScheduler(t *testing.T) {
re.NoError(co.removeScheduler(schedulers.BalanceRegionName))
re.NoError(co.removeScheduler(schedulers.HotRegionName))
re.NoError(co.removeScheduler(schedulers.SplitBucketName))
re.Len(co.schedulers, 0)
re.Empty(co.schedulers)

stream := mockhbstream.NewHeartbeatStream()

Expand Down Expand Up @@ -861,8 +861,8 @@ func TestRemoveScheduler(t *testing.T) {
// all removed
sches, _, err = storage.LoadAllScheduleConfig()
re.NoError(err)
re.Len(sches, 0)
re.Len(co.schedulers, 0)
re.Empty(sches)
re.Empty(co.schedulers)
re.NoError(co.cluster.opt.Persist(co.cluster.storage))
co.stop()
co.wg.Wait()
Expand All @@ -874,7 +874,7 @@ func TestRemoveScheduler(t *testing.T) {
tc.RaftCluster.opt = newOpt
co = newCoordinator(ctx, tc.RaftCluster, hbStreams)
co.run()
re.Len(co.schedulers, 0)
re.Empty(co.schedulers)
// the option remains default scheduler
re.Len(co.cluster.opt.GetSchedulers(), 4)
co.stop()
Expand Down Expand Up @@ -1053,7 +1053,7 @@ func TestStoreOverloaded(t *testing.T) {
if time.Since(start) > time.Second {
break
}
re.Len(ops, 0)
re.Empty(ops)
}

// reset all stores' limit
Expand Down Expand Up @@ -1103,7 +1103,7 @@ func TestStoreOverloadedWithReplace(t *testing.T) {
op3 := newTestOperator(1, tc.GetRegion(2).GetRegionEpoch(), operator.OpRegion, operator.AddPeer{ToStore: 1, PeerID: 3})
re.False(oc.AddOperator(op3))
ops, _ := lb.Schedule(tc, false /* dryRun */)
re.Len(ops, 0)
re.Empty(ops)
// sleep 2 seconds to make sure that token is filled up
time.Sleep(2 * time.Second)
ops, _ = lb.Schedule(tc, false /* dryRun */)
Expand Down Expand Up @@ -1186,7 +1186,7 @@ func TestController(t *testing.T) {

for i := schedulers.MinScheduleInterval; sc.GetInterval() != schedulers.MaxScheduleInterval; i = sc.GetNextInterval(i) {
re.Equal(i, sc.GetInterval())
re.Len(sc.Schedule(), 0)
re.Empty(sc.Schedule())
}
// limit = 2
lb.limit = 2
Expand Down Expand Up @@ -1269,7 +1269,7 @@ func TestInterval(t *testing.T) {
for _, n := range idleSeconds {
sc.nextInterval = schedulers.MinScheduleInterval
for totalSleep := time.Duration(0); totalSleep <= time.Second*time.Duration(n); totalSleep += sc.GetInterval() {
re.Len(sc.Schedule(), 0)
re.Empty(sc.Schedule())
}
re.Less(sc.GetInterval(), time.Second*time.Duration(n/2))
}
Expand Down
14 changes: 7 additions & 7 deletions server/cluster/unsafe_recovery_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ func TestFinished(t *testing.T) {
recoveryController.HandleStoreHeartbeat(req, resp)
re.NotNil(resp.RecoveryPlan)
re.NotNil(resp.RecoveryPlan.ForceLeader)
re.Equal(1, len(resp.RecoveryPlan.ForceLeader.EnterForceLeaders))
re.Len(resp.RecoveryPlan.ForceLeader.EnterForceLeaders, 1)
re.NotNil(resp.RecoveryPlan.ForceLeader.FailedStores)
applyRecoveryPlan(re, storeID, reports, resp)
}
Expand All @@ -207,7 +207,7 @@ func TestFinished(t *testing.T) {
resp := &pdpb.StoreHeartbeatResponse{}
recoveryController.HandleStoreHeartbeat(req, resp)
re.NotNil(resp.RecoveryPlan)
re.Equal(1, len(resp.RecoveryPlan.Demotes))
re.Len(resp.RecoveryPlan.Demotes, 1)
applyRecoveryPlan(re, storeID, reports, resp)
}
re.Equal(demoteFailedVoter, recoveryController.GetStage())
Expand Down Expand Up @@ -274,7 +274,7 @@ func TestFailed(t *testing.T) {
recoveryController.HandleStoreHeartbeat(req, resp)
re.NotNil(resp.RecoveryPlan)
re.NotNil(resp.RecoveryPlan.ForceLeader)
re.Equal(1, len(resp.RecoveryPlan.ForceLeader.EnterForceLeaders))
re.Len(resp.RecoveryPlan.ForceLeader.EnterForceLeaders, 1)
re.NotNil(resp.RecoveryPlan.ForceLeader.FailedStores)
applyRecoveryPlan(re, storeID, reports, resp)
}
Expand All @@ -286,7 +286,7 @@ func TestFailed(t *testing.T) {
resp := &pdpb.StoreHeartbeatResponse{}
recoveryController.HandleStoreHeartbeat(req, resp)
re.NotNil(resp.RecoveryPlan)
re.Equal(1, len(resp.RecoveryPlan.Demotes))
re.Len(resp.RecoveryPlan.Demotes, 1)
applyRecoveryPlan(re, storeID, reports, resp)
}
re.Equal(demoteFailedVoter, recoveryController.GetStage())
Expand Down Expand Up @@ -433,7 +433,7 @@ func TestAffectedTableID(t *testing.T) {

advanceUntilFinished(re, recoveryController, reports)

re.Equal(1, len(recoveryController.affectedTableIDs))
re.Len(recoveryController.affectedTableIDs, 1)
_, exists := recoveryController.affectedTableIDs[6]
re.True(exists)
}
Expand Down Expand Up @@ -494,7 +494,7 @@ func TestForceLeaderForCommitMerge(t *testing.T) {
// force leader on regions of commit merge first
re.NotNil(resp.RecoveryPlan)
re.NotNil(resp.RecoveryPlan.ForceLeader)
re.Equal(1, len(resp.RecoveryPlan.ForceLeader.EnterForceLeaders))
re.Len(resp.RecoveryPlan.ForceLeader.EnterForceLeaders, 1)
re.Equal(uint64(1002), resp.RecoveryPlan.ForceLeader.EnterForceLeaders[0])
re.NotNil(resp.RecoveryPlan.ForceLeader.FailedStores)
applyRecoveryPlan(re, 1, reports, resp)
Expand All @@ -505,7 +505,7 @@ func TestForceLeaderForCommitMerge(t *testing.T) {
// force leader on the rest regions
re.NotNil(resp.RecoveryPlan)
re.NotNil(resp.RecoveryPlan.ForceLeader)
re.Equal(1, len(resp.RecoveryPlan.ForceLeader.EnterForceLeaders))
re.Len(resp.RecoveryPlan.ForceLeader.EnterForceLeaders, 1)
re.Equal(uint64(1001), resp.RecoveryPlan.ForceLeader.EnterForceLeaders[0])
re.NotNil(resp.RecoveryPlan.ForceLeader.FailedStores)
applyRecoveryPlan(re, 1, reports, resp)
Expand Down
Loading

0 comments on commit 8b680a8

Please sign in to comment.