From 8b680a89029a11a829f5a055acb3d35a473ddcee Mon Sep 17 00:00:00 2001 From: lhy1024 Date: Wed, 13 Jul 2022 14:35:06 +0800 Subject: [PATCH] *: some misc modify for testify (#5299) ref tikv/pd#4813 Signed-off-by: lhy1024 --- pkg/assertutil/assertutil_test.go | 2 +- pkg/testutil/operator_check.go | 28 +------ server/api/checker_test.go | 8 +- server/api/scheduler_test.go | 42 +++++----- server/cluster/cluster_test.go | 10 +-- server/cluster/coordinator_test.go | 26 +++--- .../unsafe_recovery_controller_test.go | 14 ++-- server/core/region_test.go | 82 +++++++++---------- .../checker/priority_inspector_test.go | 2 +- .../schedule/checker/replica_checker_test.go | 28 +++---- server/schedule/checker/rule_checker_test.go | 8 +- server/schedule/filter/filters_test.go | 10 +-- server/schedule/operator/step_test.go | 36 ++++---- server/schedule/placement/fit_test.go | 10 +-- server/schedulers/balance_test.go | 30 +++---- server/schedulers/hot_region_test.go | 2 +- server/statistics/hot_peer_cache_test.go | 12 +-- tests/pdctl/config/config_test.go | 12 +-- tests/server/cluster/cluster_test.go | 2 +- tests/server/member/member_test.go | 2 +- 20 files changed, 172 insertions(+), 194 deletions(-) diff --git a/pkg/assertutil/assertutil_test.go b/pkg/assertutil/assertutil_test.go index dcdbbd4252db..84bd21cef059 100644 --- a/pkg/assertutil/assertutil_test.go +++ b/pkg/assertutil/assertutil_test.go @@ -31,5 +31,5 @@ func TestNilFail(t *testing.T) { } re.Nil(checker.IsNil) checker.AssertNil(nil) - re.NotNil(failErr) + re.Error(failErr) } diff --git a/pkg/testutil/operator_check.go b/pkg/testutil/operator_check.go index 88d02a11a849..178cb9b575d4 100644 --- a/pkg/testutil/operator_check.go +++ b/pkg/testutil/operator_check.go @@ -22,7 +22,7 @@ import ( // CheckTransferLeader checks if the operator is to transfer leader between the specified source and target stores. func CheckTransferLeader(re *require.Assertions, op *operator.Operator, kind operator.OpKind, sourceID, targetID uint64) { re.NotNil(op) - re.Equal(op.Len(), 1) + re.Equal(1, op.Len()) re.Equal(operator.TransferLeader{FromStore: sourceID, ToStore: targetID}, op.Step(0)) kind |= operator.OpLeader re.Equal(kind, op.Kind()&kind) @@ -31,7 +31,7 @@ func CheckTransferLeader(re *require.Assertions, op *operator.Operator, kind ope // CheckTransferLeaderFrom checks if the operator is to transfer leader out of the specified store. func CheckTransferLeaderFrom(re *require.Assertions, op *operator.Operator, kind operator.OpKind, sourceID uint64) { re.NotNil(op) - re.Equal(op.Len(), 1) + re.Equal(1, op.Len()) re.Equal(sourceID, op.Step(0).(operator.TransferLeader).FromStore) kind |= operator.OpLeader re.Equal(kind, op.Kind()&kind) @@ -40,7 +40,7 @@ func CheckTransferLeaderFrom(re *require.Assertions, op *operator.Operator, kind // CheckMultiTargetTransferLeader checks if the operator is to transfer leader from the specified source to one of the target stores. func CheckMultiTargetTransferLeader(re *require.Assertions, op *operator.Operator, kind operator.OpKind, sourceID uint64, targetIDs []uint64) { re.NotNil(op) - re.Equal(op.Len(), 1) + re.Equal(1, op.Len()) expectedOps := make([]interface{}, 0, len(targetIDs)) for _, targetID := range targetIDs { expectedOps = append(expectedOps, operator.TransferLeader{FromStore: sourceID, ToStore: targetID, ToStores: targetIDs}) @@ -138,28 +138,6 @@ func CheckRemovePeer(re *require.Assertions, op *operator.Operator, storeID uint } } -// CheckTransferLeaderWithTestify checks if the operator is to transfer leader between the specified source and target stores. -func CheckTransferLeaderWithTestify(re *require.Assertions, op *operator.Operator, kind operator.OpKind, sourceID, targetID uint64) { - re.NotNil(op) - re.Equal(1, op.Len()) - re.Equal(operator.TransferLeader{FromStore: sourceID, ToStore: targetID}, op.Step(0)) - kind |= operator.OpLeader - re.Equal(kind, op.Kind()&kind) -} - -// CheckTransferPeerWithTestify checks if the operator is to transfer peer between the specified source and target stores. -func CheckTransferPeerWithTestify(re *require.Assertions, op *operator.Operator, kind operator.OpKind, sourceID, targetID uint64) { - re.NotNil(op) - - steps, _ := trimTransferLeaders(op) - re.Len(steps, 3) - re.Equal(targetID, steps[0].(operator.AddLearner).ToStore) - re.IsType(operator.PromoteLearner{}, steps[1]) - re.Equal(sourceID, steps[2].(operator.RemovePeer).FromStore) - kind |= operator.OpRegion - re.Equal(kind, op.Kind()&kind) -} - // CheckSteps checks if the operator matches the given steps. func CheckSteps(re *require.Assertions, op *operator.Operator, steps []operator.OpStep) { re.NotZero(op.Kind() & operator.OpMerge) diff --git a/server/api/checker_test.go b/server/api/checker_test.go index d40a61f93a8a..f2d0d37ec5b1 100644 --- a/server/api/checker_test.go +++ b/server/api/checker_test.go @@ -57,7 +57,7 @@ func (suite *checkerTestSuite) TearDownSuite() { func (suite *checkerTestSuite) TestAPI() { suite.testErrCases() - cases := []struct { + testCases := []struct { name string }{ {name: "learner"}, @@ -67,9 +67,9 @@ func (suite *checkerTestSuite) TestAPI() { {name: "merge"}, {name: "joint-state"}, } - for _, ca := range cases { - suite.testGetStatus(ca.name) - suite.testPauseOrResume(ca.name) + for _, testCase := range testCases { + suite.testGetStatus(testCase.name) + suite.testPauseOrResume(testCase.name) } } diff --git a/server/api/scheduler_test.go b/server/api/scheduler_test.go index 3618bad98e58..54308c726dda 100644 --- a/server/api/scheduler_test.go +++ b/server/api/scheduler_test.go @@ -120,7 +120,7 @@ func (suite *scheduleTestSuite) TestAPI() { opt string value interface{} } - cases := []struct { + testCases := []struct { name string createdName string args []arg @@ -321,25 +321,25 @@ func (suite *scheduleTestSuite) TestAPI() { }, }, } - for _, ca := range cases { + for _, testCase := range testCases { input := make(map[string]interface{}) - input["name"] = ca.name - for _, a := range ca.args { + input["name"] = testCase.name + for _, a := range testCase.args { input[a.opt] = a.value } body, err := json.Marshal(input) suite.NoError(err) - suite.testPauseOrResume(ca.name, ca.createdName, body) + suite.testPauseOrResume(testCase.name, testCase.createdName, body) } // test pause and resume all schedulers. // add schedulers. - cases = cases[:3] - for _, ca := range cases { + testCases = testCases[:3] + for _, testCase := range testCases { input := make(map[string]interface{}) - input["name"] = ca.name - for _, a := range ca.args { + input["name"] = testCase.name + for _, a := range testCase.args { input[a.opt] = a.value } body, err := json.Marshal(input) @@ -355,10 +355,10 @@ func (suite *scheduleTestSuite) TestAPI() { err = tu.CheckPostJSON(testDialClient, suite.urlPrefix+"/all", pauseArgs, tu.StatusOK(re)) suite.NoError(err) handler := suite.svr.GetHandler() - for _, ca := range cases { - createdName := ca.createdName + for _, testCase := range testCases { + createdName := testCase.createdName if createdName == "" { - createdName = ca.name + createdName = testCase.name } isPaused, err := handler.IsSchedulerPaused(createdName) suite.NoError(err) @@ -370,10 +370,10 @@ func (suite *scheduleTestSuite) TestAPI() { err = tu.CheckPostJSON(testDialClient, suite.urlPrefix+"/all", pauseArgs, tu.StatusOK(re)) suite.NoError(err) time.Sleep(time.Second) - for _, ca := range cases { - createdName := ca.createdName + for _, testCase := range testCases { + createdName := testCase.createdName if createdName == "" { - createdName = ca.name + createdName = testCase.name } isPaused, err := handler.IsSchedulerPaused(createdName) suite.NoError(err) @@ -391,10 +391,10 @@ func (suite *scheduleTestSuite) TestAPI() { suite.NoError(err) err = tu.CheckPostJSON(testDialClient, suite.urlPrefix+"/all", pauseArgs, tu.StatusOK(re)) suite.NoError(err) - for _, ca := range cases { - createdName := ca.createdName + for _, testCase := range testCases { + createdName := testCase.createdName if createdName == "" { - createdName = ca.name + createdName = testCase.name } isPaused, err := handler.IsSchedulerPaused(createdName) suite.NoError(err) @@ -402,10 +402,10 @@ func (suite *scheduleTestSuite) TestAPI() { } // delete schedulers. - for _, ca := range cases { - createdName := ca.createdName + for _, testCase := range testCases { + createdName := testCase.createdName if createdName == "" { - createdName = ca.name + createdName = testCase.name } suite.deleteScheduler(createdName) } diff --git a/server/cluster/cluster_test.go b/server/cluster/cluster_test.go index 1d2120744e14..a114fcbde1dc 100644 --- a/server/cluster/cluster_test.go +++ b/server/cluster/cluster_test.go @@ -137,7 +137,7 @@ func TestStoreHeartbeat(t *testing.T) { re.NoError(cluster.HandleStoreHeartbeat(coldHeartBeat)) time.Sleep(20 * time.Millisecond) storeStats = cluster.hotStat.RegionStats(statistics.Read, 1) - re.Len(storeStats[1], 0) + re.Empty(storeStats[1]) // After hot heartbeat, we can find region 1 peer again re.NoError(cluster.HandleStoreHeartbeat(hotHeartBeat)) time.Sleep(20 * time.Millisecond) @@ -150,14 +150,14 @@ func TestStoreHeartbeat(t *testing.T) { re.NoError(cluster.HandleStoreHeartbeat(coldHeartBeat)) time.Sleep(20 * time.Millisecond) storeStats = cluster.hotStat.RegionStats(statistics.Read, 0) - re.Len(storeStats[1], 0) + re.Empty(storeStats[1]) re.Nil(cluster.HandleStoreHeartbeat(hotHeartBeat)) time.Sleep(20 * time.Millisecond) storeStats = cluster.hotStat.RegionStats(statistics.Read, 1) re.Len(storeStats[1], 1) re.Equal(uint64(1), storeStats[1][0].RegionID) storeStats = cluster.hotStat.RegionStats(statistics.Read, 3) - re.Len(storeStats[1], 0) + re.Empty(storeStats[1]) // after 2 hot heartbeats, wo can find region 1 peer again re.NoError(cluster.HandleStoreHeartbeat(hotHeartBeat)) re.NoError(cluster.HandleStoreHeartbeat(hotHeartBeat)) @@ -614,7 +614,7 @@ func TestRegionHeartbeatHotStat(t *testing.T) { time.Sleep(1 * time.Second) stats = cluster.hotStat.RegionStats(statistics.Write, 0) re.Len(stats[1], 1) - re.Len(stats[2], 0) + re.Empty(stats[2]) re.Len(stats[3], 1) re.Len(stats[4], 1) } @@ -675,7 +675,7 @@ func TestBucketHeartbeat(t *testing.T) { newRegion2 := regions[1].Clone(core.WithIncConfVer(), core.SetBuckets(nil)) re.NoError(cluster.processRegionHeartbeat(newRegion2)) re.Nil(cluster.GetRegion(uint64(1)).GetBuckets()) - re.Len(cluster.GetRegion(uint64(1)).GetBuckets().GetKeys(), 0) + re.Empty(cluster.GetRegion(uint64(1)).GetBuckets().GetKeys()) } func TestRegionHeartbeat(t *testing.T) { diff --git a/server/cluster/coordinator_test.go b/server/cluster/coordinator_test.go index ae5036343a56..0cca1420ce01 100644 --- a/server/cluster/coordinator_test.go +++ b/server/cluster/coordinator_test.go @@ -200,10 +200,10 @@ func TestDispatch(t *testing.T) { // Wait for schedule and turn off balance. waitOperator(re, co, 1) - testutil.CheckTransferPeerWithTestify(re, co.opController.GetOperator(1), operator.OpKind(0), 4, 1) + testutil.CheckTransferPeer(re, co.opController.GetOperator(1), operator.OpKind(0), 4, 1) re.NoError(co.removeScheduler(schedulers.BalanceRegionName)) waitOperator(re, co, 2) - testutil.CheckTransferLeaderWithTestify(re, co.opController.GetOperator(2), operator.OpKind(0), 4, 2) + testutil.CheckTransferLeader(re, co.opController.GetOperator(2), operator.OpKind(0), 4, 2) re.NoError(co.removeScheduler(schedulers.BalanceLeaderName)) stream := mockhbstream.NewHeartbeatStream() @@ -502,7 +502,7 @@ func TestCheckCache(t *testing.T) { co.patrolRegions() oc := co.opController re.Len(oc.GetOperators(), 1) - re.Len(co.checkers.GetWaitingRegions(), 0) + re.Empty(co.checkers.GetWaitingRegions()) // case 2: operator cannot be created due to store limit restriction oc.RemoveOperator(oc.GetOperator(1)) @@ -517,7 +517,7 @@ func TestCheckCache(t *testing.T) { co.wg.Add(1) co.patrolRegions() re.Len(oc.GetOperators(), 1) - re.Len(co.checkers.GetWaitingRegions(), 0) + re.Empty(co.checkers.GetWaitingRegions()) co.wg.Wait() re.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/break-patrol")) @@ -540,7 +540,7 @@ func TestPeerState(t *testing.T) { // Wait for schedule. waitOperator(re, co, 1) - testutil.CheckTransferPeerWithTestify(re, co.opController.GetOperator(1), operator.OpKind(0), 4, 1) + testutil.CheckTransferPeer(re, co.opController.GetOperator(1), operator.OpKind(0), 4, 1) region := tc.GetRegion(1).Clone() @@ -671,7 +671,7 @@ func TestAddScheduler(t *testing.T) { re.NoError(co.removeScheduler(schedulers.BalanceRegionName)) re.NoError(co.removeScheduler(schedulers.HotRegionName)) re.NoError(co.removeScheduler(schedulers.SplitBucketName)) - re.Len(co.schedulers, 0) + re.Empty(co.schedulers) stream := mockhbstream.NewHeartbeatStream() @@ -861,8 +861,8 @@ func TestRemoveScheduler(t *testing.T) { // all removed sches, _, err = storage.LoadAllScheduleConfig() re.NoError(err) - re.Len(sches, 0) - re.Len(co.schedulers, 0) + re.Empty(sches) + re.Empty(co.schedulers) re.NoError(co.cluster.opt.Persist(co.cluster.storage)) co.stop() co.wg.Wait() @@ -874,7 +874,7 @@ func TestRemoveScheduler(t *testing.T) { tc.RaftCluster.opt = newOpt co = newCoordinator(ctx, tc.RaftCluster, hbStreams) co.run() - re.Len(co.schedulers, 0) + re.Empty(co.schedulers) // the option remains default scheduler re.Len(co.cluster.opt.GetSchedulers(), 4) co.stop() @@ -1053,7 +1053,7 @@ func TestStoreOverloaded(t *testing.T) { if time.Since(start) > time.Second { break } - re.Len(ops, 0) + re.Empty(ops) } // reset all stores' limit @@ -1103,7 +1103,7 @@ func TestStoreOverloadedWithReplace(t *testing.T) { op3 := newTestOperator(1, tc.GetRegion(2).GetRegionEpoch(), operator.OpRegion, operator.AddPeer{ToStore: 1, PeerID: 3}) re.False(oc.AddOperator(op3)) ops, _ := lb.Schedule(tc, false /* dryRun */) - re.Len(ops, 0) + re.Empty(ops) // sleep 2 seconds to make sure that token is filled up time.Sleep(2 * time.Second) ops, _ = lb.Schedule(tc, false /* dryRun */) @@ -1186,7 +1186,7 @@ func TestController(t *testing.T) { for i := schedulers.MinScheduleInterval; sc.GetInterval() != schedulers.MaxScheduleInterval; i = sc.GetNextInterval(i) { re.Equal(i, sc.GetInterval()) - re.Len(sc.Schedule(), 0) + re.Empty(sc.Schedule()) } // limit = 2 lb.limit = 2 @@ -1269,7 +1269,7 @@ func TestInterval(t *testing.T) { for _, n := range idleSeconds { sc.nextInterval = schedulers.MinScheduleInterval for totalSleep := time.Duration(0); totalSleep <= time.Second*time.Duration(n); totalSleep += sc.GetInterval() { - re.Len(sc.Schedule(), 0) + re.Empty(sc.Schedule()) } re.Less(sc.GetInterval(), time.Second*time.Duration(n/2)) } diff --git a/server/cluster/unsafe_recovery_controller_test.go b/server/cluster/unsafe_recovery_controller_test.go index 2b3717dabd66..ebb2c917b636 100644 --- a/server/cluster/unsafe_recovery_controller_test.go +++ b/server/cluster/unsafe_recovery_controller_test.go @@ -195,7 +195,7 @@ func TestFinished(t *testing.T) { recoveryController.HandleStoreHeartbeat(req, resp) re.NotNil(resp.RecoveryPlan) re.NotNil(resp.RecoveryPlan.ForceLeader) - re.Equal(1, len(resp.RecoveryPlan.ForceLeader.EnterForceLeaders)) + re.Len(resp.RecoveryPlan.ForceLeader.EnterForceLeaders, 1) re.NotNil(resp.RecoveryPlan.ForceLeader.FailedStores) applyRecoveryPlan(re, storeID, reports, resp) } @@ -207,7 +207,7 @@ func TestFinished(t *testing.T) { resp := &pdpb.StoreHeartbeatResponse{} recoveryController.HandleStoreHeartbeat(req, resp) re.NotNil(resp.RecoveryPlan) - re.Equal(1, len(resp.RecoveryPlan.Demotes)) + re.Len(resp.RecoveryPlan.Demotes, 1) applyRecoveryPlan(re, storeID, reports, resp) } re.Equal(demoteFailedVoter, recoveryController.GetStage()) @@ -274,7 +274,7 @@ func TestFailed(t *testing.T) { recoveryController.HandleStoreHeartbeat(req, resp) re.NotNil(resp.RecoveryPlan) re.NotNil(resp.RecoveryPlan.ForceLeader) - re.Equal(1, len(resp.RecoveryPlan.ForceLeader.EnterForceLeaders)) + re.Len(resp.RecoveryPlan.ForceLeader.EnterForceLeaders, 1) re.NotNil(resp.RecoveryPlan.ForceLeader.FailedStores) applyRecoveryPlan(re, storeID, reports, resp) } @@ -286,7 +286,7 @@ func TestFailed(t *testing.T) { resp := &pdpb.StoreHeartbeatResponse{} recoveryController.HandleStoreHeartbeat(req, resp) re.NotNil(resp.RecoveryPlan) - re.Equal(1, len(resp.RecoveryPlan.Demotes)) + re.Len(resp.RecoveryPlan.Demotes, 1) applyRecoveryPlan(re, storeID, reports, resp) } re.Equal(demoteFailedVoter, recoveryController.GetStage()) @@ -433,7 +433,7 @@ func TestAffectedTableID(t *testing.T) { advanceUntilFinished(re, recoveryController, reports) - re.Equal(1, len(recoveryController.affectedTableIDs)) + re.Len(recoveryController.affectedTableIDs, 1) _, exists := recoveryController.affectedTableIDs[6] re.True(exists) } @@ -494,7 +494,7 @@ func TestForceLeaderForCommitMerge(t *testing.T) { // force leader on regions of commit merge first re.NotNil(resp.RecoveryPlan) re.NotNil(resp.RecoveryPlan.ForceLeader) - re.Equal(1, len(resp.RecoveryPlan.ForceLeader.EnterForceLeaders)) + re.Len(resp.RecoveryPlan.ForceLeader.EnterForceLeaders, 1) re.Equal(uint64(1002), resp.RecoveryPlan.ForceLeader.EnterForceLeaders[0]) re.NotNil(resp.RecoveryPlan.ForceLeader.FailedStores) applyRecoveryPlan(re, 1, reports, resp) @@ -505,7 +505,7 @@ func TestForceLeaderForCommitMerge(t *testing.T) { // force leader on the rest regions re.NotNil(resp.RecoveryPlan) re.NotNil(resp.RecoveryPlan.ForceLeader) - re.Equal(1, len(resp.RecoveryPlan.ForceLeader.EnterForceLeaders)) + re.Len(resp.RecoveryPlan.ForceLeader.EnterForceLeaders, 1) re.Equal(uint64(1001), resp.RecoveryPlan.ForceLeader.EnterForceLeaders[0]) re.NotNil(resp.RecoveryPlan.ForceLeader.FailedStores) applyRecoveryPlan(re, 1, reports, resp) diff --git a/server/core/region_test.go b/server/core/region_test.go index a86a8490bd80..451cb1e93043 100644 --- a/server/core/region_test.go +++ b/server/core/region_test.go @@ -146,43 +146,43 @@ func TestSortedEqual(t *testing.T) { return peers } // test NewRegionInfo - for _, test := range testCases { - regionA := NewRegionInfo(&metapb.Region{Id: 100, Peers: pickPeers(test.idsA)}, nil) - regionB := NewRegionInfo(&metapb.Region{Id: 100, Peers: pickPeers(test.idsB)}, nil) - re.Equal(test.isEqual, SortedPeersEqual(regionA.GetVoters(), regionB.GetVoters())) - re.Equal(test.isEqual, SortedPeersEqual(regionA.GetVoters(), regionB.GetVoters())) + for _, testCase := range testCases { + regionA := NewRegionInfo(&metapb.Region{Id: 100, Peers: pickPeers(testCase.idsA)}, nil) + regionB := NewRegionInfo(&metapb.Region{Id: 100, Peers: pickPeers(testCase.idsB)}, nil) + re.Equal(testCase.isEqual, SortedPeersEqual(regionA.GetVoters(), regionB.GetVoters())) + re.Equal(testCase.isEqual, SortedPeersEqual(regionA.GetVoters(), regionB.GetVoters())) } // test RegionFromHeartbeat - for _, test := range testCases { + for _, testCase := range testCases { regionA := RegionFromHeartbeat(&pdpb.RegionHeartbeatRequest{ - Region: &metapb.Region{Id: 100, Peers: pickPeers(test.idsA)}, - DownPeers: pickPeerStats(test.idsA), - PendingPeers: pickPeers(test.idsA), + Region: &metapb.Region{Id: 100, Peers: pickPeers(testCase.idsA)}, + DownPeers: pickPeerStats(testCase.idsA), + PendingPeers: pickPeers(testCase.idsA), }) regionB := RegionFromHeartbeat(&pdpb.RegionHeartbeatRequest{ - Region: &metapb.Region{Id: 100, Peers: pickPeers(test.idsB)}, - DownPeers: pickPeerStats(test.idsB), - PendingPeers: pickPeers(test.idsB), + Region: &metapb.Region{Id: 100, Peers: pickPeers(testCase.idsB)}, + DownPeers: pickPeerStats(testCase.idsB), + PendingPeers: pickPeers(testCase.idsB), }) - re.Equal(test.isEqual, SortedPeersEqual(regionA.GetVoters(), regionB.GetVoters())) - re.Equal(test.isEqual, SortedPeersEqual(regionA.GetVoters(), regionB.GetVoters())) - re.Equal(test.isEqual, SortedPeersEqual(regionA.GetPendingPeers(), regionB.GetPendingPeers())) - re.Equal(test.isEqual, SortedPeersStatsEqual(regionA.GetDownPeers(), regionB.GetDownPeers())) + re.Equal(testCase.isEqual, SortedPeersEqual(regionA.GetVoters(), regionB.GetVoters())) + re.Equal(testCase.isEqual, SortedPeersEqual(regionA.GetVoters(), regionB.GetVoters())) + re.Equal(testCase.isEqual, SortedPeersEqual(regionA.GetPendingPeers(), regionB.GetPendingPeers())) + re.Equal(testCase.isEqual, SortedPeersStatsEqual(regionA.GetDownPeers(), regionB.GetDownPeers())) } // test Clone region := NewRegionInfo(meta, meta.Peers[0]) - for _, test := range testCases { - downPeersA := pickPeerStats(test.idsA) - downPeersB := pickPeerStats(test.idsB) - pendingPeersA := pickPeers(test.idsA) - pendingPeersB := pickPeers(test.idsB) + for _, testCase := range testCases { + downPeersA := pickPeerStats(testCase.idsA) + downPeersB := pickPeerStats(testCase.idsB) + pendingPeersA := pickPeers(testCase.idsA) + pendingPeersB := pickPeers(testCase.idsB) regionA := region.Clone(WithDownPeers(downPeersA), WithPendingPeers(pendingPeersA)) regionB := region.Clone(WithDownPeers(downPeersB), WithPendingPeers(pendingPeersB)) - re.Equal(test.isEqual, SortedPeersStatsEqual(regionA.GetDownPeers(), regionB.GetDownPeers())) - re.Equal(test.isEqual, SortedPeersEqual(regionA.GetPendingPeers(), regionB.GetPendingPeers())) + re.Equal(testCase.isEqual, SortedPeersStatsEqual(regionA.GetDownPeers(), regionB.GetDownPeers())) + re.Equal(testCase.isEqual, SortedPeersEqual(regionA.GetPendingPeers(), regionB.GetPendingPeers())) } } @@ -202,16 +202,16 @@ func TestInherit(t *testing.T) { {true, 1, 2, 2}, {true, 2, 0, 2}, } - for _, test := range testCases { + for _, testCase := range testCases { var origin *RegionInfo - if test.originExists { + if testCase.originExists { origin = NewRegionInfo(&metapb.Region{Id: 100}, nil) - origin.approximateSize = int64(test.originSize) + origin.approximateSize = int64(testCase.originSize) } r := NewRegionInfo(&metapb.Region{Id: 100}, nil) - r.approximateSize = int64(test.size) + r.approximateSize = int64(testCase.size) r.Inherit(origin, false) - re.Equal(int64(test.expect), r.approximateSize) + re.Equal(int64(testCase.expect), r.approximateSize) } // bucket @@ -254,11 +254,11 @@ func TestRegionRoundingFlow(t *testing.T) { {252623, math.MaxInt64, 0}, {252623, math.MinInt64, 252623}, } - for _, test := range testCases { - r := NewRegionInfo(&metapb.Region{Id: 100}, nil, WithFlowRoundByDigit(test.digit)) - r.readBytes = test.flow - r.writtenBytes = test.flow - re.Equal(test.expect, r.GetRoundBytesRead()) + for _, testCase := range testCases { + r := NewRegionInfo(&metapb.Region{Id: 100}, nil, WithFlowRoundByDigit(testCase.digit)) + r.readBytes = testCase.flow + r.writtenBytes = testCase.flow + re.Equal(testCase.expect, r.GetRoundBytesRead()) } } @@ -280,11 +280,11 @@ func TestRegionWriteRate(t *testing.T) { {0, 0, 500, 0, 0}, {10, 3, 500, 0, 0}, } - for _, test := range testCases { - r := NewRegionInfo(&metapb.Region{Id: 100}, nil, SetWrittenBytes(test.bytes), SetWrittenKeys(test.keys), SetReportInterval(test.interval)) + for _, testCase := range testCases { + r := NewRegionInfo(&metapb.Region{Id: 100}, nil, SetWrittenBytes(testCase.bytes), SetWrittenKeys(testCase.keys), SetReportInterval(testCase.interval)) bytesRate, keysRate := r.GetWriteRate() - re.Equal(test.expectBytesRate, bytesRate) - re.Equal(test.expectKeysRate, keysRate) + re.Equal(testCase.expectBytesRate, bytesRate) + re.Equal(testCase.expectKeysRate, keysRate) } } @@ -357,11 +357,11 @@ func TestNeedSync(t *testing.T) { }, } - for _, test := range testCases { - regionA := region.Clone(test.optionsA...) - regionB := region.Clone(test.optionsB...) + for _, testCase := range testCases { + regionA := region.Clone(testCase.optionsA...) + regionB := region.Clone(testCase.optionsB...) _, _, _, needSync := RegionGuide(regionA, regionB) - re.Equal(test.needSync, needSync) + re.Equal(testCase.needSync, needSync) } } diff --git a/server/schedule/checker/priority_inspector_test.go b/server/schedule/checker/priority_inspector_test.go index 9e0b0264a454..b736ecc80970 100644 --- a/server/schedule/checker/priority_inspector_test.go +++ b/server/schedule/checker/priority_inspector_test.go @@ -56,7 +56,7 @@ func checkPriorityRegionTest(re *require.Assertions, pc *PriorityInspector, tc * pc.Inspect(region) re.Equal(1, pc.queue.Len()) // the region will not rerun after it checks - re.Len(pc.GetPriorityRegions(), 0) + re.Empty(pc.GetPriorityRegions()) // case3: inspect region 3, it will has high priority region = tc.GetRegion(3) diff --git a/server/schedule/checker/replica_checker_test.go b/server/schedule/checker/replica_checker_test.go index c7a1f13fb246..7a8fad9ee947 100644 --- a/server/schedule/checker/replica_checker_test.go +++ b/server/schedule/checker/replica_checker_test.go @@ -271,13 +271,13 @@ func (suite *replicaCheckerTestSuite) TestBasic() { } region = region.Clone(core.WithDownPeers(append(region.GetDownPeers(), downPeer))) - testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 2, 1) + testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 2, 1) region = region.Clone(core.WithDownPeers(nil)) suite.Nil(rc.Check(region)) // Peer in store 3 is offline, transfer peer to store 1. tc.SetStoreOffline(3) - testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 3, 1) + testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 3, 1) } func (suite *replicaCheckerTestSuite) TestLostStore() { @@ -339,14 +339,14 @@ func (suite *replicaCheckerTestSuite) TestOffline() { region = region.Clone(core.WithRemoveStorePeer(4)) // the number of region peers equals the maxReplicas // Transfer peer to store 4. - testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 3, 4) + testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 3, 4) // Store 5 has a same label score with store 4, but the region score smaller than store 4, we will choose store 5. tc.AddLabelsStore(5, 3, map[string]string{"zone": "z4", "rack": "r1", "host": "h1"}) - testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 3, 5) + testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 3, 5) // Store 5 has too many snapshots, choose store 4 tc.UpdateSnapshotCount(5, 100) - testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 3, 4) + testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 3, 4) tc.UpdatePendingPeerCount(4, 100) suite.Nil(rc.Check(region)) } @@ -401,7 +401,7 @@ func (suite *replicaCheckerTestSuite) TestDistinctScore() { region = region.Clone(core.WithAddPeer(peer7)) // Replace peer in store 1 with store 6 because it has a different rack. - testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 1, 6) + testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 1, 6) // Disable locationReplacement feature. tc.SetEnableLocationReplacement(false) suite.Nil(rc.Check(region)) @@ -422,7 +422,7 @@ func (suite *replicaCheckerTestSuite) TestDistinctScore() { // Store 2 and 6 have the same distinct score, but store 2 has larger region score. // So replace peer in store 2 with store 10. tc.AddLabelsStore(10, 1, map[string]string{"zone": "z3", "rack": "r1", "host": "h1"}) - testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 2, 10) + testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 2, 10) peer10, _ := tc.AllocPeer(10) region = region.Clone(core.WithAddPeer(peer10)) testutil.CheckRemovePeer(suite.Require(), rc.Check(region), 2) @@ -481,7 +481,7 @@ func (suite *replicaCheckerTestSuite) TestStorageThreshold() { // Move peer to better location. tc.UpdateStorageRatio(4, 0, 1) - testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 1, 4) + testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 1, 4) // If store4 is almost full, do not add peer on it. tc.UpdateStorageRatio(4, 0.9, 0.1) suite.Nil(rc.Check(region)) @@ -519,9 +519,9 @@ func (suite *replicaCheckerTestSuite) TestOpts() { })) tc.SetStoreOffline(2) // RemoveDownReplica has higher priority than replaceOfflineReplica. - testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 1, 4) + testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 1, 4) tc.SetEnableRemoveDownReplica(false) - testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 2, 4) + testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 2, 4) tc.SetEnableReplaceOfflineReplica(false) suite.Nil(rc.Check(region)) } @@ -548,10 +548,10 @@ func (suite *replicaCheckerTestSuite) TestFixDownPeer() { region = region.Clone(core.WithDownPeers([]*pdpb.PeerStats{ {Peer: region.GetStorePeer(4), DownSeconds: 6000}, })) - testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpRegion, 4, 5) + testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpRegion, 4, 5) tc.SetStoreDown(5) - testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpRegion, 4, 2) + testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpRegion, 4, 2) tc.SetIsolationLevel("zone") suite.Nil(rc.Check(region)) @@ -576,10 +576,10 @@ func (suite *replicaCheckerTestSuite) TestFixOfflinePeer() { suite.Nil(rc.Check(region)) tc.SetStoreOffline(4) - testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpRegion, 4, 5) + testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpRegion, 4, 5) tc.SetStoreOffline(5) - testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpRegion, 4, 2) + testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpRegion, 4, 2) tc.SetIsolationLevel("zone") suite.Nil(rc.Check(region)) diff --git a/server/schedule/checker/rule_checker_test.go b/server/schedule/checker/rule_checker_test.go index 9679d8eedd85..4bbaffa9a49b 100644 --- a/server/schedule/checker/rule_checker_test.go +++ b/server/schedule/checker/rule_checker_test.go @@ -539,10 +539,10 @@ func (suite *ruleCheckerTestSuite) TestFixDownPeer() { region = region.Clone(core.WithDownPeers([]*pdpb.PeerStats{ {Peer: region.GetStorePeer(4), DownSeconds: 6000}, })) - testutil.CheckTransferPeerWithTestify(suite.Require(), suite.rc.Check(region), operator.OpRegion, 4, 5) + testutil.CheckTransferPeer(suite.Require(), suite.rc.Check(region), operator.OpRegion, 4, 5) suite.cluster.SetStoreDown(5) - testutil.CheckTransferPeerWithTestify(suite.Require(), suite.rc.Check(region), operator.OpRegion, 4, 2) + testutil.CheckTransferPeer(suite.Require(), suite.rc.Check(region), operator.OpRegion, 4, 2) rule.IsolationLevel = "zone" suite.ruleManager.SetRule(rule) @@ -572,10 +572,10 @@ func (suite *ruleCheckerTestSuite) TestFixOfflinePeer() { suite.Nil(suite.rc.Check(region)) suite.cluster.SetStoreOffline(4) - testutil.CheckTransferPeerWithTestify(suite.Require(), suite.rc.Check(region), operator.OpRegion, 4, 5) + testutil.CheckTransferPeer(suite.Require(), suite.rc.Check(region), operator.OpRegion, 4, 5) suite.cluster.SetStoreOffline(5) - testutil.CheckTransferPeerWithTestify(suite.Require(), suite.rc.Check(region), operator.OpRegion, 4, 2) + testutil.CheckTransferPeer(suite.Require(), suite.rc.Check(region), operator.OpRegion, 4, 2) rule.IsolationLevel = "zone" suite.ruleManager.SetRule(rule) diff --git a/server/schedule/filter/filters_test.go b/server/schedule/filter/filters_test.go index 1250d2c615c2..cfc2df39ca33 100644 --- a/server/schedule/filter/filters_test.go +++ b/server/schedule/filter/filters_test.go @@ -322,11 +322,11 @@ func TestStoreStateFilterReason(t *testing.T) { } check := func(store *core.StoreInfo, testCases []testCase) { - for _, tc := range testCases { - filters[tc.filterIdx].Source(opt, store) - re.Equal(tc.sourceReason, filters[tc.filterIdx].(*StoreStateFilter).Reason) - filters[tc.filterIdx].Source(opt, store) - re.Equal(tc.targetReason, filters[tc.filterIdx].(*StoreStateFilter).Reason) + for _, testCase := range testCases { + filters[testCase.filterIdx].Source(opt, store) + re.Equal(testCase.sourceReason, filters[testCase.filterIdx].(*StoreStateFilter).Reason) + filters[testCase.filterIdx].Source(opt, store) + re.Equal(testCase.targetReason, filters[testCase.filterIdx].(*StoreStateFilter).Reason) } } diff --git a/server/schedule/operator/step_test.go b/server/schedule/operator/step_test.go index aa2f18c72203..a6c145aece35 100644 --- a/server/schedule/operator/step_test.go +++ b/server/schedule/operator/step_test.go @@ -54,7 +54,7 @@ func (suite *operatorStepTestSuite) SetupTest() { func (suite *operatorStepTestSuite) TestTransferLeader() { step := TransferLeader{FromStore: 1, ToStore: 2} - cases := []testCase{ + testCases := []testCase{ { []*metapb.Peer{ {Id: 1, StoreId: 1, Role: metapb.PeerRole_Voter}, @@ -86,10 +86,10 @@ func (suite *operatorStepTestSuite) TestTransferLeader() { suite.NoError, }, } - suite.check(step, "transfer leader from store 1 to store 2", cases) + suite.check(step, "transfer leader from store 1 to store 2", testCases) step = TransferLeader{FromStore: 1, ToStore: 9} // 9 is down - cases = []testCase{ + testCases = []testCase{ { []*metapb.Peer{ {Id: 1, StoreId: 1, Role: metapb.PeerRole_Voter}, @@ -101,12 +101,12 @@ func (suite *operatorStepTestSuite) TestTransferLeader() { suite.Error, }, } - suite.check(step, "transfer leader from store 1 to store 9", cases) + suite.check(step, "transfer leader from store 1 to store 9", testCases) } func (suite *operatorStepTestSuite) TestAddPeer() { step := AddPeer{ToStore: 2, PeerID: 2} - cases := []testCase{ + testCases := []testCase{ { []*metapb.Peer{ {Id: 1, StoreId: 1, Role: metapb.PeerRole_Voter}, @@ -125,10 +125,10 @@ func (suite *operatorStepTestSuite) TestAddPeer() { suite.NoError, }, } - suite.check(step, "add peer 2 on store 2", cases) + suite.check(step, "add peer 2 on store 2", testCases) step = AddPeer{ToStore: 9, PeerID: 9} - cases = []testCase{ + testCases = []testCase{ { []*metapb.Peer{ {Id: 1, StoreId: 1, Role: metapb.PeerRole_Voter}, @@ -138,12 +138,12 @@ func (suite *operatorStepTestSuite) TestAddPeer() { suite.Error, }, } - suite.check(step, "add peer 9 on store 9", cases) + suite.check(step, "add peer 9 on store 9", testCases) } func (suite *operatorStepTestSuite) TestAddLearner() { step := AddLearner{ToStore: 2, PeerID: 2} - cases := []testCase{ + testCases := []testCase{ { []*metapb.Peer{ {Id: 1, StoreId: 1, Role: metapb.PeerRole_Voter}, @@ -162,10 +162,10 @@ func (suite *operatorStepTestSuite) TestAddLearner() { suite.NoError, }, } - suite.check(step, "add learner peer 2 on store 2", cases) + suite.check(step, "add learner peer 2 on store 2", testCases) step = AddLearner{ToStore: 9, PeerID: 9} - cases = []testCase{ + testCases = []testCase{ { []*metapb.Peer{ {Id: 1, StoreId: 1, Role: metapb.PeerRole_Voter}, @@ -175,7 +175,7 @@ func (suite *operatorStepTestSuite) TestAddLearner() { suite.Error, }, } - suite.check(step, "add learner peer 9 on store 9", cases) + suite.check(step, "add learner peer 9 on store 9", testCases) } func (suite *operatorStepTestSuite) TestChangePeerV2Enter() { @@ -183,7 +183,7 @@ func (suite *operatorStepTestSuite) TestChangePeerV2Enter() { PromoteLearners: []PromoteLearner{{PeerID: 3, ToStore: 3}, {PeerID: 4, ToStore: 4}}, DemoteVoters: []DemoteVoter{{PeerID: 1, ToStore: 1}, {PeerID: 2, ToStore: 2}}, } - cases := []testCase{ + testCases := []testCase{ { // before step []*metapb.Peer{ {Id: 1, StoreId: 1, Role: metapb.PeerRole_Voter}, @@ -290,7 +290,7 @@ func (suite *operatorStepTestSuite) TestChangePeerV2Enter() { desc := "use joint consensus, " + "promote learner peer 3 on store 3 to voter, promote learner peer 4 on store 4 to voter, " + "demote voter peer 1 on store 1 to learner, demote voter peer 2 on store 2 to learner" - suite.check(cpe, desc, cases) + suite.check(cpe, desc, testCases) } func (suite *operatorStepTestSuite) TestChangePeerV2Leave() { @@ -298,7 +298,7 @@ func (suite *operatorStepTestSuite) TestChangePeerV2Leave() { PromoteLearners: []PromoteLearner{{PeerID: 3, ToStore: 3}, {PeerID: 4, ToStore: 4}}, DemoteVoters: []DemoteVoter{{PeerID: 1, ToStore: 1}, {PeerID: 2, ToStore: 2}}, } - cases := []testCase{ + testCases := []testCase{ { // before step []*metapb.Peer{ {Id: 3, StoreId: 3, Role: metapb.PeerRole_IncomingVoter}, @@ -416,12 +416,12 @@ func (suite *operatorStepTestSuite) TestChangePeerV2Leave() { desc := "leave joint state, " + "promote learner peer 3 on store 3 to voter, promote learner peer 4 on store 4 to voter, " + "demote voter peer 1 on store 1 to learner, demote voter peer 2 on store 2 to learner" - suite.check(cpl, desc, cases) + suite.check(cpl, desc, testCases) } -func (suite *operatorStepTestSuite) check(step OpStep, desc string, cases []testCase) { +func (suite *operatorStepTestSuite) check(step OpStep, desc string, testCases []testCase) { suite.Equal(desc, step.String()) - for _, testCase := range cases { + for _, testCase := range testCases { region := core.NewRegionInfo(&metapb.Region{Id: 1, Peers: testCase.Peers}, testCase.Peers[0]) suite.Equal(testCase.ConfVerChanged, step.ConfVerChanged(region)) suite.Equal(testCase.IsFinish, step.IsFinish(region)) diff --git a/server/schedule/placement/fit_test.go b/server/schedule/placement/fit_test.go index 919df84d3096..6325dadb30a8 100644 --- a/server/schedule/placement/fit_test.go +++ b/server/schedule/placement/fit_test.go @@ -113,7 +113,7 @@ func TestFitRegion(t *testing.T) { re := require.New(t) stores := makeStores() - cases := []struct { + testCases := []struct { region string rules []string fitPeers string @@ -138,14 +138,14 @@ func TestFitRegion(t *testing.T) { {"1111,2211,3111,3112", []string{"1/voter/rack=rack2/", "3/voter//zone"}, "2211/1111,3111,3112"}, } - for _, cc := range cases { - region := makeRegion(cc.region) + for _, testCase := range testCases { + region := makeRegion(testCase.region) var rules []*Rule - for _, r := range cc.rules { + for _, r := range testCase.rules { rules = append(rules, makeRule(r)) } rf := fitRegion(stores.GetStores(), region, rules) - expects := strings.Split(cc.fitPeers, "/") + expects := strings.Split(testCase.fitPeers, "/") for i, f := range rf.RuleFits { re.True(checkPeerMatch(f.Peers, expects[i])) } diff --git a/server/schedulers/balance_test.go b/server/schedulers/balance_test.go index 871625eab03b..9f9eda877fce 100644 --- a/server/schedulers/balance_test.go +++ b/server/schedulers/balance_test.go @@ -51,7 +51,7 @@ func TestShouldBalance(t *testing.T) { defer cancel() const R = 96 - tests := []testBalanceSpeedCase{ + testCases := []testBalanceSpeedCase{ // target size is zero {2, 0, R / 10, true, core.BySize}, {2, 0, R, false, core.BySize}, @@ -109,28 +109,28 @@ func TestShouldBalance(t *testing.T) { // create a region to control average region size. tc.AddLeaderRegion(1, 1, 2) - for _, t := range tests { - tc.AddLeaderStore(1, int(t.sourceCount)) - tc.AddLeaderStore(2, int(t.targetCount)) - region := tc.GetRegion(1).Clone(core.SetApproximateSize(t.regionSize)) + for _, testCase := range testCases { + tc.AddLeaderStore(1, int(testCase.sourceCount)) + tc.AddLeaderStore(2, int(testCase.targetCount)) + region := tc.GetRegion(1).Clone(core.SetApproximateSize(testCase.regionSize)) tc.PutRegion(region) - tc.SetLeaderSchedulePolicy(t.kind.String()) - kind := core.NewScheduleKind(core.LeaderKind, t.kind) + tc.SetLeaderSchedulePolicy(testCase.kind.String()) + kind := core.NewScheduleKind(core.LeaderKind, testCase.kind) plan := newBalancePlan(kind, tc, oc.GetOpInfluence(tc)) plan.source, plan.target, plan.region = tc.GetStore(1), tc.GetStore(2), tc.GetRegion(1) - re.Equal(t.expectedResult, plan.shouldBalance("")) + re.Equal(testCase.expectedResult, plan.shouldBalance("")) } - for _, t := range tests { - if t.kind.String() == core.BySize.String() { - tc.AddRegionStore(1, int(t.sourceCount)) - tc.AddRegionStore(2, int(t.targetCount)) - region := tc.GetRegion(1).Clone(core.SetApproximateSize(t.regionSize)) + for _, testCase := range testCases { + if testCase.kind.String() == core.BySize.String() { + tc.AddRegionStore(1, int(testCase.sourceCount)) + tc.AddRegionStore(2, int(testCase.targetCount)) + region := tc.GetRegion(1).Clone(core.SetApproximateSize(testCase.regionSize)) tc.PutRegion(region) - kind := core.NewScheduleKind(core.RegionKind, t.kind) + kind := core.NewScheduleKind(core.RegionKind, testCase.kind) plan := newBalancePlan(kind, tc, oc.GetOpInfluence(tc)) plan.source, plan.target, plan.region = tc.GetStore(1), tc.GetStore(2), tc.GetRegion(1) - re.Equal(t.expectedResult, plan.shouldBalance("")) + re.Equal(testCase.expectedResult, plan.shouldBalance("")) } } } diff --git a/server/schedulers/hot_region_test.go b/server/schedulers/hot_region_test.go index 9ffe941c92c5..cbef2db194dc 100644 --- a/server/schedulers/hot_region_test.go +++ b/server/schedulers/hot_region_test.go @@ -1340,7 +1340,7 @@ func TestHotCacheUpdateCache(t *testing.T) { stats = tc.RegionStats(statistics.Read, 0) re.Len(stats[4], 2) re.Len(stats[5], 1) - re.Len(stats[6], 0) + re.Empty(stats[6]) } func TestHotCacheKeyThresholds(t *testing.T) { diff --git a/server/statistics/hot_peer_cache_test.go b/server/statistics/hot_peer_cache_test.go index c021f05df3f6..dbd65b1d402e 100644 --- a/server/statistics/hot_peer_cache_test.go +++ b/server/statistics/hot_peer_cache_test.go @@ -530,15 +530,15 @@ func TestCoolDownTransferLeader(t *testing.T) { checkAndUpdate(re, cache, region) checkCoolDown(re, cache, region, false) } - cases := []func(){moveLeader, transferLeader, movePeer, addReplica, removeReplica} - for _, runCase := range cases { + testCases := []func(){moveLeader, transferLeader, movePeer, addReplica, removeReplica} + for _, testCase := range testCases { cache = NewHotPeerCache(Read) region = buildRegion(Read, 3, 60) for i := 1; i <= 200; i++ { checkAndUpdate(re, cache, region) } checkCoolDown(re, cache, region, false) - runCase() + testCase() } } @@ -600,7 +600,7 @@ func checkMovingAverage(re *require.Assertions, testCase *testMovingAverageCase) func TestUnstableData(t *testing.T) { re := require.New(t) - cases := []*testMovingAverageCase{ + testCases := []*testMovingAverageCase{ { report: []float64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, expect: []float64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, @@ -626,8 +626,8 @@ func TestUnstableData(t *testing.T) { expect: []float64{0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, }, } - for i := range cases { - checkMovingAverage(re, cases[i]) + for _, testCase := range testCases { + checkMovingAverage(re, testCase) } } diff --git a/tests/pdctl/config/config_test.go b/tests/pdctl/config/config_test.go index 3c4c5d910bc3..f9cc2b8949e2 100644 --- a/tests/pdctl/config/config_test.go +++ b/tests/pdctl/config/config_test.go @@ -33,13 +33,13 @@ import ( pdctlCmd "github.com/tikv/pd/tools/pd-ctl/pdctl" ) -type testItem struct { +type testCase struct { name string value interface{} read func(scheduleConfig *config.ScheduleConfig) interface{} } -func (t *testItem) judge(re *require.Assertions, scheduleConfigs ...*config.ScheduleConfig) { +func (t *testCase) judge(re *require.Assertions, scheduleConfigs ...*config.ScheduleConfig) { value := t.value for _, scheduleConfig := range scheduleConfigs { re.NotNil(scheduleConfig) @@ -211,7 +211,7 @@ func TestConfig(t *testing.T) { re.Equal(typeutil.NewDuration(0), svr.GetScheduleConfig().MaxStorePreparingTime) // test config read and write - testItems := []testItem{ + testCases := []testCase{ {"leader-schedule-limit", uint64(64), func(scheduleConfig *config.ScheduleConfig) interface{} { return scheduleConfig.LeaderScheduleLimit }}, {"hot-region-schedule-limit", uint64(64), func(scheduleConfig *config.ScheduleConfig) interface{} { @@ -229,9 +229,9 @@ func TestConfig(t *testing.T) { return scheduleConfig.EnableDebugMetrics }}, } - for _, item := range testItems { + for _, testCase := range testCases { // write - args1 = []string{"-u", pdAddr, "config", "set", item.name, reflect.TypeOf(item.value).String()} + args1 = []string{"-u", pdAddr, "config", "set", testCase.name, reflect.TypeOf(testCase.value).String()} _, err = pdctl.ExecuteCommand(cmd, args1...) re.NoError(err) // read @@ -241,7 +241,7 @@ func TestConfig(t *testing.T) { cfg = config.Config{} re.NoError(json.Unmarshal(output, &cfg)) // judge - item.judge(re, &cfg.Schedule, svr.GetScheduleConfig()) + testCase.judge(re, &cfg.Schedule, svr.GetScheduleConfig()) } // test error or deprecated config name diff --git a/tests/server/cluster/cluster_test.go b/tests/server/cluster/cluster_test.go index d5f623712aec..b16099ddda05 100644 --- a/tests/server/cluster/cluster_test.go +++ b/tests/server/cluster/cluster_test.go @@ -869,7 +869,7 @@ func TestTiFlashWithPlacementRules(t *testing.T) { re.NoError(err) err = svr.SetReplicationConfig(rep) re.NoError(err) - re.Equal(0, len(svr.GetScheduleConfig().StoreLimit)) + re.Empty(svr.GetScheduleConfig().StoreLimit) } func TestReplicationModeStatus(t *testing.T) { diff --git a/tests/server/member/member_test.go b/tests/server/member/member_test.go index 023f58103ef0..5d2b0bfdd5a8 100644 --- a/tests/server/member/member_test.go +++ b/tests/server/member/member_test.go @@ -116,7 +116,7 @@ func TestMemberDelete(t *testing.T) { key := member.GetServer().GetMember().GetDCLocationPath(member.GetServerID()) resp, err := etcdutil.EtcdKVGet(leader.GetEtcdClient(), key) re.NoError(err) - re.Len(resp.Kvs, 0) + re.Empty(resp.Kvs) } }