diff --git a/.golangci.yml b/.golangci.yml index 079e25ec2b3..59954cecee3 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -11,15 +11,12 @@ linters: - makezero - gosec - bodyclose + # TODO: enable when all existing errors are fixed + # - testifylint disable: - errcheck linters-settings: gocritic: - # Which checks should be enabled; can't be combined with 'disabled-checks'; - # See https://go-critic.github.io/overview#checks-overview - # To check which checks are enabled run `GL_DEBUG=gocritic golangci-lint run` - # By default list of stable checks is used. - enabled-checks: # Which checks should be disabled; can't be combined with 'enabled-checks'; default is empty disabled-checks: - regexpMust @@ -33,3 +30,15 @@ linters-settings: - G402 - G404 - G601 + testifylint: + enable: + - bool-compare + - compares + - empty + - error-is-as + - error-nil + - expected-actual + - len + - require-error + - suite-dont-use-pkg + - suite-extra-assert-call diff --git a/Makefile b/Makefile index 133f99cfac8..bf25730c0d9 100644 --- a/Makefile +++ b/Makefile @@ -172,7 +172,7 @@ install-tools: #### Static checks #### -check: install-tools tidy static generate-errdoc check-test +check: install-tools tidy static generate-errdoc static: install-tools @ echo "gofmt ..." @@ -199,11 +199,7 @@ check-plugin: @echo "checking plugin..." cd ./plugin/scheduler_example && $(MAKE) evictLeaderPlugin.so && rm evictLeaderPlugin.so -check-test: - @echo "checking test..." - ./scripts/check-test.sh - -.PHONY: check static tidy generate-errdoc check-plugin check-test +.PHONY: check static tidy generate-errdoc check-plugin #### Test utils #### diff --git a/pkg/autoscaling/calculation_test.go b/pkg/autoscaling/calculation_test.go index de3be68d68c..85f723b562c 100644 --- a/pkg/autoscaling/calculation_test.go +++ b/pkg/autoscaling/calculation_test.go @@ -233,7 +233,7 @@ func TestGetTotalCPUUseTime(t *testing.T) { } totalCPUUseTime, _ := getTotalCPUUseTime(querier, TiDB, instances, time.Now(), 0) expected := mockResultValue * float64(len(instances)) - re.True(math.Abs(expected-totalCPUUseTime) < 1e-6) + re.Less(math.Abs(expected-totalCPUUseTime), 1e-6) } func TestGetTotalCPUQuota(t *testing.T) { diff --git a/pkg/autoscaling/prometheus_test.go b/pkg/autoscaling/prometheus_test.go index 6c30e3ead4c..2efdc348ead 100644 --- a/pkg/autoscaling/prometheus_test.go +++ b/pkg/autoscaling/prometheus_test.go @@ -196,7 +196,7 @@ func TestRetrieveCPUMetrics(t *testing.T) { for i := 0; i < len(addresses)-1; i++ { value, ok := result[addresses[i]] re.True(ok) - re.True(math.Abs(value-mockResultValue) < 1e-6) + re.Less(math.Abs(value-mockResultValue), 1e-6) } _, ok := result[addresses[len(addresses)-1]] diff --git a/pkg/balancer/balancer_test.go b/pkg/balancer/balancer_test.go index f95487a4cc7..996b4f1da35 100644 --- a/pkg/balancer/balancer_test.go +++ b/pkg/balancer/balancer_test.go @@ -62,7 +62,7 @@ func TestBalancerDuplicate(t *testing.T) { NewRoundRobin[uint32](), } for _, balancer := range balancers { - re.Len(balancer.GetAll(), 0) + re.Empty(balancer.GetAll()) // test duplicate put balancer.Put(1) re.Len(balancer.GetAll(), 1) @@ -70,9 +70,9 @@ func TestBalancerDuplicate(t *testing.T) { re.Len(balancer.GetAll(), 1) // test duplicate delete balancer.Delete(1) - re.Len(balancer.GetAll(), 0) + re.Empty(balancer.GetAll()) balancer.Delete(1) - re.Len(balancer.GetAll(), 0) + re.Empty(balancer.GetAll()) } } diff --git a/pkg/cache/cache_test.go b/pkg/cache/cache_test.go index b02e8823398..fe9f84223c1 100644 --- a/pkg/cache/cache_test.go +++ b/pkg/cache/cache_test.go @@ -77,7 +77,7 @@ func TestExpireRegionCache(t *testing.T) { re.Equal(3, cache.Len()) - re.Equal(sortIDs(cache.GetAllID()), []uint64{1, 2, 3}) + re.Equal([]uint64{1, 2, 3}, sortIDs(cache.GetAllID())) // after 20ms, the key 1 will be expired time.Sleep(20 * time.Millisecond) @@ -98,7 +98,7 @@ func TestExpireRegionCache(t *testing.T) { // we can't ensure whether gc is executed, so we check the length of cache in a loop. return cache.Len() == 2 }, testutil.WithWaitFor(50*time.Millisecond), testutil.WithTickInterval(time.Millisecond)) - re.Equal(sortIDs(cache.GetAllID()), []uint64{2, 3}) + re.Equal([]uint64{2, 3}, sortIDs(cache.GetAllID())) cache.Remove(2) @@ -111,7 +111,7 @@ func TestExpireRegionCache(t *testing.T) { re.Equal(3.0, value) re.Equal(1, cache.Len()) - re.Equal(sortIDs(cache.GetAllID()), []uint64{3}) + re.Equal([]uint64{3}, sortIDs(cache.GetAllID())) } func sortIDs(ids []uint64) []uint64 { @@ -131,15 +131,15 @@ func TestLRUCache(t *testing.T) { val, ok := cache.Get(3) re.True(ok) - re.Equal(val, "3") + re.Equal("3", val) val, ok = cache.Get(2) re.True(ok) - re.Equal(val, "2") + re.Equal("2", val) val, ok = cache.Get(1) re.True(ok) - re.Equal(val, "1") + re.Equal("1", val) re.Equal(3, cache.Len()) @@ -153,27 +153,27 @@ func TestLRUCache(t *testing.T) { val, ok = cache.Get(1) re.True(ok) - re.Equal(val, "1") + re.Equal("1", val) val, ok = cache.Get(2) re.True(ok) - re.Equal(val, "2") + re.Equal("2", val) val, ok = cache.Get(4) re.True(ok) - re.Equal(val, "4") + re.Equal("4", val) re.Equal(3, cache.Len()) val, ok = cache.Peek(1) re.True(ok) - re.Equal(val, "1") + re.Equal("1", val) elems := cache.Elems() re.Len(elems, 3) - re.Equal(elems[0].Value, "4") - re.Equal(elems[1].Value, "2") - re.Equal(elems[2].Value, "1") + re.Equal("4", elems[0].Value) + re.Equal("2", elems[1].Value) + re.Equal("1", elems[2].Value) cache.Remove(1) cache.Remove(2) @@ -247,15 +247,15 @@ func TestFifoFromLastSameElems(t *testing.T) { }) } items := fun() - re.Equal(1, len(items)) + re.Len(items, 1) cache.Put(1, &testStruct{value: "3"}) cache.Put(2, &testStruct{value: "3"}) items = fun() - re.Equal(3, len(items)) + re.Len(items, 3) re.Equal("3", items[0].Value.(*testStruct).value) cache.Put(1, &testStruct{value: "2"}) items = fun() - re.Equal(1, len(items)) + re.Len(items, 1) re.Equal("2", items[0].Value.(*testStruct).value) } diff --git a/pkg/core/storelimit/limit_test.go b/pkg/core/storelimit/limit_test.go index 6f57c01eccb..946729f8ce2 100644 --- a/pkg/core/storelimit/limit_test.go +++ b/pkg/core/storelimit/limit_test.go @@ -30,7 +30,7 @@ func TestStoreLimit(t *testing.T) { re := require.New(t) rate := int64(15) limit := NewStoreRateLimit(float64(rate)).(*StoreRateLimit) - re.Equal(limit.Rate(AddPeer), float64(15)) + re.Equal(float64(15), limit.Rate(AddPeer)) re.True(limit.Available(influence*rate, AddPeer, constant.Low)) re.True(limit.Take(influence*rate, AddPeer, constant.Low)) re.False(limit.Take(influence, AddPeer, constant.Low)) diff --git a/pkg/dashboard/adapter/redirector_test.go b/pkg/dashboard/adapter/redirector_test.go index c5d837507fc..5fc9ea5ea99 100644 --- a/pkg/dashboard/adapter/redirector_test.go +++ b/pkg/dashboard/adapter/redirector_test.go @@ -65,37 +65,39 @@ func (suite *redirectorTestSuite) TearDownSuite() { } func (suite *redirectorTestSuite) TestReverseProxy() { + re := suite.Require() redirectorServer := httptest.NewServer(http.HandlerFunc(suite.redirector.ReverseProxy)) defer redirectorServer.Close() suite.redirector.SetAddress(suite.tempServer.URL) // Test normal forwarding req, err := http.NewRequest(http.MethodGet, redirectorServer.URL, http.NoBody) - suite.NoError(err) + re.NoError(err) checkHTTPRequest(suite.Require(), suite.noRedirectHTTPClient, req, http.StatusOK, suite.tempText) // Test the requests that are forwarded by others req, err = http.NewRequest(http.MethodGet, redirectorServer.URL, http.NoBody) - suite.NoError(err) + re.NoError(err) req.Header.Set(proxyHeader, "other") checkHTTPRequest(suite.Require(), suite.noRedirectHTTPClient, req, http.StatusOK, suite.tempText) // Test LoopDetected suite.redirector.SetAddress(redirectorServer.URL) req, err = http.NewRequest(http.MethodGet, redirectorServer.URL, http.NoBody) - suite.NoError(err) + re.NoError(err) checkHTTPRequest(suite.Require(), suite.noRedirectHTTPClient, req, http.StatusLoopDetected, "") } func (suite *redirectorTestSuite) TestTemporaryRedirect() { + re := suite.Require() redirectorServer := httptest.NewServer(http.HandlerFunc(suite.redirector.TemporaryRedirect)) defer redirectorServer.Close() suite.redirector.SetAddress(suite.tempServer.URL) // Test TemporaryRedirect req, err := http.NewRequest(http.MethodGet, redirectorServer.URL, http.NoBody) - suite.NoError(err) + re.NoError(err) checkHTTPRequest(suite.Require(), suite.noRedirectHTTPClient, req, http.StatusTemporaryRedirect, "") // Test Response req, err = http.NewRequest(http.MethodGet, redirectorServer.URL, http.NoBody) - suite.NoError(err) + re.NoError(err) checkHTTPRequest(suite.Require(), http.DefaultClient, req, http.StatusOK, suite.tempText) } diff --git a/pkg/election/leadership_test.go b/pkg/election/leadership_test.go index c259476e44e..be1922fe381 100644 --- a/pkg/election/leadership_test.go +++ b/pkg/election/leadership_test.go @@ -175,7 +175,7 @@ func TestExitWatch(t *testing.T) { resp2, err := client.MemberList(context.Background()) re.NoError(err) - re.Equal(3, len(resp2.Members)) + re.Len(resp2.Members, 3) etcd2.Server.HardStop() etcd3.Server.HardStop() diff --git a/pkg/encryption/config_test.go b/pkg/encryption/config_test.go index 30c9c9dded8..6f7e4a41b03 100644 --- a/pkg/encryption/config_test.go +++ b/pkg/encryption/config_test.go @@ -38,19 +38,19 @@ func TestAdjustInvalidDataEncryptionMethod(t *testing.T) { t.Parallel() re := require.New(t) config := &Config{DataEncryptionMethod: "unknown"} - re.NotNil(config.Adjust()) + re.Error(config.Adjust()) } func TestAdjustNegativeRotationDuration(t *testing.T) { t.Parallel() re := require.New(t) config := &Config{DataKeyRotationPeriod: typeutil.NewDuration(time.Duration(int64(-1)))} - re.NotNil(config.Adjust()) + re.Error(config.Adjust()) } func TestAdjustInvalidMasterKeyType(t *testing.T) { t.Parallel() re := require.New(t) config := &Config{MasterKey: MasterKeyConfig{Type: "unknown"}} - re.NotNil(config.Adjust()) + re.Error(config.Adjust()) } diff --git a/pkg/encryption/crypter_test.go b/pkg/encryption/crypter_test.go index 2f952d5b729..12a851d1563 100644 --- a/pkg/encryption/crypter_test.go +++ b/pkg/encryption/crypter_test.go @@ -26,11 +26,11 @@ import ( func TestEncryptionMethodSupported(t *testing.T) { t.Parallel() re := require.New(t) - re.NotNil(CheckEncryptionMethodSupported(encryptionpb.EncryptionMethod_PLAINTEXT)) - re.NotNil(CheckEncryptionMethodSupported(encryptionpb.EncryptionMethod_UNKNOWN)) - re.Nil(CheckEncryptionMethodSupported(encryptionpb.EncryptionMethod_AES128_CTR)) - re.Nil(CheckEncryptionMethodSupported(encryptionpb.EncryptionMethod_AES192_CTR)) - re.Nil(CheckEncryptionMethodSupported(encryptionpb.EncryptionMethod_AES256_CTR)) + re.Error(CheckEncryptionMethodSupported(encryptionpb.EncryptionMethod_PLAINTEXT)) + re.Error(CheckEncryptionMethodSupported(encryptionpb.EncryptionMethod_UNKNOWN)) + re.NoError(CheckEncryptionMethodSupported(encryptionpb.EncryptionMethod_AES128_CTR)) + re.NoError(CheckEncryptionMethodSupported(encryptionpb.EncryptionMethod_AES192_CTR)) + re.NoError(CheckEncryptionMethodSupported(encryptionpb.EncryptionMethod_AES256_CTR)) } func TestKeyLength(t *testing.T) { diff --git a/pkg/encryption/key_manager_test.go b/pkg/encryption/key_manager_test.go index 3134e714543..96bdb3c0eb5 100644 --- a/pkg/encryption/key_manager_test.go +++ b/pkg/encryption/key_manager_test.go @@ -313,7 +313,7 @@ func TestLoadKeyEmpty(t *testing.T) { // Simulate keys get deleted. _, err = client.Delete(context.Background(), EncryptionKeysPath) re.NoError(err) - re.NotNil(m.loadKeys()) + re.Error(m.loadKeys()) } func TestWatcher(t *testing.T) { diff --git a/pkg/keyspace/keyspace_test.go b/pkg/keyspace/keyspace_test.go index 27e7de359ee..552adc8d83e 100644 --- a/pkg/keyspace/keyspace_test.go +++ b/pkg/keyspace/keyspace_test.go @@ -75,13 +75,14 @@ func (m *mockConfig) GetCheckRegionSplitInterval() time.Duration { } func (suite *keyspaceTestSuite) SetupTest() { + re := suite.Require() suite.ctx, suite.cancel = context.WithCancel(context.Background()) store := endpoint.NewStorageEndpoint(kv.NewMemoryKV(), nil) allocator := mockid.NewIDAllocator() kgm := NewKeyspaceGroupManager(suite.ctx, store, nil, 0) suite.manager = NewKeyspaceManager(suite.ctx, store, nil, allocator, &mockConfig{}, kgm) - suite.NoError(kgm.Bootstrap(suite.ctx)) - suite.NoError(suite.manager.Bootstrap()) + re.NoError(kgm.Bootstrap(suite.ctx)) + re.NoError(suite.manager.Bootstrap()) } func (suite *keyspaceTestSuite) TearDownTest() { @@ -89,11 +90,13 @@ func (suite *keyspaceTestSuite) TearDownTest() { } func (suite *keyspaceTestSuite) SetupSuite() { - suite.NoError(failpoint.Enable("github.com/tikv/pd/pkg/keyspace/skipSplitRegion", "return(true)")) + re := suite.Require() + re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/keyspace/skipSplitRegion", "return(true)")) } func (suite *keyspaceTestSuite) TearDownSuite() { - suite.NoError(failpoint.Disable("github.com/tikv/pd/pkg/keyspace/skipSplitRegion")) + re := suite.Require() + re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/keyspace/skipSplitRegion")) } func makeCreateKeyspaceRequests(count int) []*CreateKeyspaceRequest { @@ -205,20 +208,20 @@ func (suite *keyspaceTestSuite) TestUpdateKeyspaceState() { // Disabling an ENABLED keyspace is allowed. Should update StateChangedAt. updated, err := manager.UpdateKeyspaceState(createRequest.Name, keyspacepb.KeyspaceState_DISABLED, oldTime) re.NoError(err) - re.Equal(updated.State, keyspacepb.KeyspaceState_DISABLED) - re.Equal(updated.StateChangedAt, oldTime) + re.Equal(keyspacepb.KeyspaceState_DISABLED, updated.State) + re.Equal(oldTime, updated.StateChangedAt) newTime := time.Now().Unix() // Disabling an DISABLED keyspace is allowed. Should NOT update StateChangedAt. updated, err = manager.UpdateKeyspaceState(createRequest.Name, keyspacepb.KeyspaceState_DISABLED, newTime) re.NoError(err) - re.Equal(updated.State, keyspacepb.KeyspaceState_DISABLED) - re.Equal(updated.StateChangedAt, oldTime) + re.Equal(keyspacepb.KeyspaceState_DISABLED, updated.State) + re.Equal(oldTime, updated.StateChangedAt) // Archiving a DISABLED keyspace is allowed. Should update StateChangeAt. updated, err = manager.UpdateKeyspaceState(createRequest.Name, keyspacepb.KeyspaceState_ARCHIVED, newTime) re.NoError(err) - re.Equal(updated.State, keyspacepb.KeyspaceState_ARCHIVED) - re.Equal(updated.StateChangedAt, newTime) + re.Equal(keyspacepb.KeyspaceState_ARCHIVED, updated.State) + re.Equal(newTime, updated.StateChangedAt) // Changing state of an ARCHIVED keyspace is not allowed. _, err = manager.UpdateKeyspaceState(createRequest.Name, keyspacepb.KeyspaceState_ENABLED, newTime) re.Error(err) @@ -244,7 +247,7 @@ func (suite *keyspaceTestSuite) TestLoadRangeKeyspace() { // Load all keyspaces including the default keyspace. keyspaces, err := manager.LoadRangeKeyspace(0, 0) re.NoError(err) - re.Equal(total+1, len(keyspaces)) + re.Len(keyspaces, total+1) for i := range keyspaces { re.Equal(uint32(i), keyspaces[i].Id) if i != 0 { @@ -256,7 +259,7 @@ func (suite *keyspaceTestSuite) TestLoadRangeKeyspace() { // Result should be keyspaces with id 0 - 49. keyspaces, err = manager.LoadRangeKeyspace(0, 50) re.NoError(err) - re.Equal(50, len(keyspaces)) + re.Len(keyspaces, 50) for i := range keyspaces { re.Equal(uint32(i), keyspaces[i].Id) if i != 0 { @@ -269,7 +272,7 @@ func (suite *keyspaceTestSuite) TestLoadRangeKeyspace() { loadStart := 33 keyspaces, err = manager.LoadRangeKeyspace(uint32(loadStart), 20) re.NoError(err) - re.Equal(20, len(keyspaces)) + re.Len(keyspaces, 20) for i := range keyspaces { re.Equal(uint32(loadStart+i), keyspaces[i].Id) checkCreateRequest(re, requests[i+loadStart-1], keyspaces[i]) @@ -280,7 +283,7 @@ func (suite *keyspaceTestSuite) TestLoadRangeKeyspace() { loadStart = 90 keyspaces, err = manager.LoadRangeKeyspace(uint32(loadStart), 30) re.NoError(err) - re.Equal(11, len(keyspaces)) + re.Len(keyspaces, 11) for i := range keyspaces { re.Equal(uint32(loadStart+i), keyspaces[i].Id) checkCreateRequest(re, requests[i+loadStart-1], keyspaces[i]) diff --git a/pkg/keyspace/tso_keyspace_group_test.go b/pkg/keyspace/tso_keyspace_group_test.go index 993923d2fd7..2dec780c3c8 100644 --- a/pkg/keyspace/tso_keyspace_group_test.go +++ b/pkg/keyspace/tso_keyspace_group_test.go @@ -43,13 +43,14 @@ func TestKeyspaceGroupTestSuite(t *testing.T) { } func (suite *keyspaceGroupTestSuite) SetupTest() { + re := suite.Require() suite.ctx, suite.cancel = context.WithCancel(context.Background()) store := endpoint.NewStorageEndpoint(kv.NewMemoryKV(), nil) suite.kgm = NewKeyspaceGroupManager(suite.ctx, store, nil, 0) idAllocator := mockid.NewIDAllocator() cluster := mockcluster.NewCluster(suite.ctx, mockconfig.NewTestOptions()) suite.kg = NewKeyspaceManager(suite.ctx, store, cluster, idAllocator, &mockConfig{}, suite.kgm) - suite.NoError(suite.kgm.Bootstrap(suite.ctx)) + re.NoError(suite.kgm.Bootstrap(suite.ctx)) } func (suite *keyspaceGroupTestSuite) TearDownTest() { @@ -191,7 +192,7 @@ func (suite *keyspaceGroupTestSuite) TestUpdateKeyspace() { re.Len(kg2.Keyspaces, 1) kg3, err := suite.kgm.GetKeyspaceGroupByID(3) re.NoError(err) - re.Len(kg3.Keyspaces, 0) + re.Empty(kg3.Keyspaces) _, err = suite.kg.UpdateKeyspaceConfig("test", []*Mutation{ { @@ -211,7 +212,7 @@ func (suite *keyspaceGroupTestSuite) TestUpdateKeyspace() { re.Len(kg2.Keyspaces, 1) kg3, err = suite.kgm.GetKeyspaceGroupByID(3) re.NoError(err) - re.Len(kg3.Keyspaces, 0) + re.Empty(kg3.Keyspaces) _, err = suite.kg.UpdateKeyspaceConfig("test", []*Mutation{ { Op: OpPut, @@ -227,7 +228,7 @@ func (suite *keyspaceGroupTestSuite) TestUpdateKeyspace() { re.NoError(err) kg2, err = suite.kgm.GetKeyspaceGroupByID(2) re.NoError(err) - re.Len(kg2.Keyspaces, 0) + re.Empty(kg2.Keyspaces) kg3, err = suite.kgm.GetKeyspaceGroupByID(3) re.NoError(err) re.Len(kg3.Keyspaces, 1) diff --git a/pkg/mcs/resourcemanager/server/config_test.go b/pkg/mcs/resourcemanager/server/config_test.go index dd8dd2d2814..64fd133ea73 100644 --- a/pkg/mcs/resourcemanager/server/config_test.go +++ b/pkg/mcs/resourcemanager/server/config_test.go @@ -42,8 +42,8 @@ read-cpu-ms-cost = 5.0 err = cfg.Adjust(&meta, false) re.NoError(err) - re.Equal(cfg.Controller.DegradedModeWaitDuration.Duration, time.Second*2) - re.Equal(cfg.Controller.LTBMaxWaitDuration.Duration, time.Second*60) + re.Equal(time.Second*2, cfg.Controller.DegradedModeWaitDuration.Duration) + re.Equal(time.Second*60, cfg.Controller.LTBMaxWaitDuration.Duration) re.LessOrEqual(math.Abs(cfg.Controller.RequestUnit.CPUMsCost-5), 1e-7) re.LessOrEqual(math.Abs(cfg.Controller.RequestUnit.WriteCostPerByte-4), 1e-7) re.LessOrEqual(math.Abs(cfg.Controller.RequestUnit.WriteBaseCost-3), 1e-7) diff --git a/pkg/mcs/resourcemanager/server/token_buckets_test.go b/pkg/mcs/resourcemanager/server/token_buckets_test.go index a7d3b9e3bad..4138be5d66e 100644 --- a/pkg/mcs/resourcemanager/server/token_buckets_test.go +++ b/pkg/mcs/resourcemanager/server/token_buckets_test.go @@ -70,27 +70,27 @@ func TestGroupTokenBucketRequest(t *testing.T) { clientUniqueID := uint64(0) tb, trickle := gtb.request(time1, 190000, uint64(time.Second)*10/uint64(time.Millisecond), clientUniqueID) re.LessOrEqual(math.Abs(tb.Tokens-190000), 1e-7) - re.Equal(trickle, int64(0)) + re.Zero(trickle) // need to lend token tb, trickle = gtb.request(time1, 11000, uint64(time.Second)*10/uint64(time.Millisecond), clientUniqueID) re.LessOrEqual(math.Abs(tb.Tokens-11000), 1e-7) - re.Equal(trickle, int64(time.Second)*11000./4000./int64(time.Millisecond)) + re.Equal(int64(time.Second)*11000./4000./int64(time.Millisecond), trickle) tb, trickle = gtb.request(time1, 35000, uint64(time.Second)*10/uint64(time.Millisecond), clientUniqueID) re.LessOrEqual(math.Abs(tb.Tokens-35000), 1e-7) - re.Equal(trickle, int64(time.Second)*10/int64(time.Millisecond)) + re.Equal(int64(time.Second)*10/int64(time.Millisecond), trickle) tb, trickle = gtb.request(time1, 60000, uint64(time.Second)*10/uint64(time.Millisecond), clientUniqueID) re.LessOrEqual(math.Abs(tb.Tokens-22000), 1e-7) - re.Equal(trickle, int64(time.Second)*10/int64(time.Millisecond)) + re.Equal(int64(time.Second)*10/int64(time.Millisecond), trickle) // Get reserved 10000 tokens = fillrate(2000) * 10 * defaultReserveRatio(0.5) // Max loan tokens is 60000. tb, trickle = gtb.request(time1, 3000, uint64(time.Second)*10/uint64(time.Millisecond), clientUniqueID) re.LessOrEqual(math.Abs(tb.Tokens-3000), 1e-7) - re.Equal(trickle, int64(time.Second)*10/int64(time.Millisecond)) + re.Equal(int64(time.Second)*10/int64(time.Millisecond), trickle) tb, trickle = gtb.request(time1, 12000, uint64(time.Second)*10/uint64(time.Millisecond), clientUniqueID) re.LessOrEqual(math.Abs(tb.Tokens-10000), 1e-7) - re.Equal(trickle, int64(time.Second)*10/int64(time.Millisecond)) + re.Equal(int64(time.Second)*10/int64(time.Millisecond), trickle) time2 := time1.Add(20 * time.Second) tb, trickle = gtb.request(time2, 20000, uint64(time.Second)*10/uint64(time.Millisecond), clientUniqueID) re.LessOrEqual(math.Abs(tb.Tokens-20000), 1e-7) - re.Equal(trickle, int64(time.Second)*10/int64(time.Millisecond)) + re.Equal(int64(time.Second)*10/int64(time.Millisecond), trickle) } diff --git a/pkg/ratelimit/controller_test.go b/pkg/ratelimit/controller_test.go index a830217cb9f..59cc0c16445 100644 --- a/pkg/ratelimit/controller_test.go +++ b/pkg/ratelimit/controller_test.go @@ -88,7 +88,7 @@ func TestControllerWithConcurrencyLimiter(t *testing.T) { opt: UpdateConcurrencyLimiter(10), checkOptionStatus: func(label string, o Option) { status := limiter.Update(label, o) - re.True(status&ConcurrencyChanged != 0) + re.NotZero(status & ConcurrencyChanged) }, totalRequest: 15, fail: 5, @@ -105,7 +105,7 @@ func TestControllerWithConcurrencyLimiter(t *testing.T) { opt: UpdateConcurrencyLimiter(10), checkOptionStatus: func(label string, o Option) { status := limiter.Update(label, o) - re.True(status&ConcurrencyNoChange != 0) + re.NotZero(status & ConcurrencyNoChange) }, checkStatusFunc: func(label string) {}, }, @@ -113,7 +113,7 @@ func TestControllerWithConcurrencyLimiter(t *testing.T) { opt: UpdateConcurrencyLimiter(5), checkOptionStatus: func(label string, o Option) { status := limiter.Update(label, o) - re.True(status&ConcurrencyChanged != 0) + re.NotZero(status & ConcurrencyChanged) }, totalRequest: 15, fail: 10, @@ -130,7 +130,7 @@ func TestControllerWithConcurrencyLimiter(t *testing.T) { opt: UpdateConcurrencyLimiter(0), checkOptionStatus: func(label string, o Option) { status := limiter.Update(label, o) - re.True(status&ConcurrencyDeleted != 0) + re.NotZero(status & ConcurrencyDeleted) }, totalRequest: 15, fail: 0, @@ -152,7 +152,7 @@ func TestControllerWithConcurrencyLimiter(t *testing.T) { opt: UpdateConcurrencyLimiter(15), checkOptionStatus: func(label string, o Option) { status := limiter.Update(label, o) - re.True(status&ConcurrencyChanged != 0) + re.NotZero(status & ConcurrencyChanged) }, totalRequest: 10, fail: 0, @@ -169,7 +169,7 @@ func TestControllerWithConcurrencyLimiter(t *testing.T) { opt: UpdateConcurrencyLimiter(10), checkOptionStatus: func(label string, o Option) { status := limiter.Update(label, o) - re.True(status&ConcurrencyChanged != 0) + re.NotZero(status & ConcurrencyChanged) }, totalRequest: 10, fail: 10, @@ -202,7 +202,7 @@ func TestBlockList(t *testing.T) { re.True(limiter.IsInAllowList(label)) status := UpdateQPSLimiter(float64(rate.Every(time.Second)), 1)(label, limiter) - re.True(status&InAllowList != 0) + re.NotZero(status & InAllowList) for i := 0; i < 10; i++ { _, err := limiter.Allow(label) re.NoError(err) @@ -221,7 +221,7 @@ func TestControllerWithQPSLimiter(t *testing.T) { opt: UpdateQPSLimiter(float64(rate.Every(time.Second)), 1), checkOptionStatus: func(label string, o Option) { status := limiter.Update(label, o) - re.True(status&QPSChanged != 0) + re.NotZero(status & QPSChanged) }, totalRequest: 3, fail: 2, @@ -237,7 +237,7 @@ func TestControllerWithQPSLimiter(t *testing.T) { opt: UpdateQPSLimiter(float64(rate.Every(time.Second)), 1), checkOptionStatus: func(label string, o Option) { status := limiter.Update(label, o) - re.True(status&QPSNoChange != 0) + re.NotZero(status & QPSNoChange) }, checkStatusFunc: func(label string) {}, }, @@ -245,7 +245,7 @@ func TestControllerWithQPSLimiter(t *testing.T) { opt: UpdateQPSLimiter(5, 5), checkOptionStatus: func(label string, o Option) { status := limiter.Update(label, o) - re.True(status&QPSChanged != 0) + re.NotZero(status & QPSChanged) }, totalRequest: 10, fail: 5, @@ -261,7 +261,7 @@ func TestControllerWithQPSLimiter(t *testing.T) { opt: UpdateQPSLimiter(0, 0), checkOptionStatus: func(label string, o Option) { status := limiter.Update(label, o) - re.True(status&QPSDeleted != 0) + re.NotZero(status & QPSDeleted) }, totalRequest: 10, fail: 0, @@ -271,7 +271,7 @@ func TestControllerWithQPSLimiter(t *testing.T) { checkStatusFunc: func(label string) { limit, burst := limiter.GetQPSLimiterStatus(label) re.Equal(rate.Limit(0), limit) - re.Equal(0, burst) + re.Zero(burst) }, }, }, @@ -283,7 +283,7 @@ func TestControllerWithQPSLimiter(t *testing.T) { opt: UpdateQPSLimiter(50, 5), checkOptionStatus: func(label string, o Option) { status := limiter.Update(label, o) - re.True(status&QPSChanged != 0) + re.NotZero(status & QPSChanged) }, totalRequest: 10, fail: 5, @@ -299,7 +299,7 @@ func TestControllerWithQPSLimiter(t *testing.T) { opt: UpdateQPSLimiter(0, 0), checkOptionStatus: func(label string, o Option) { status := limiter.Update(label, o) - re.True(status&QPSDeleted != 0) + re.NotZero(status & QPSDeleted) }, totalRequest: 10, fail: 0, @@ -309,7 +309,7 @@ func TestControllerWithQPSLimiter(t *testing.T) { checkStatusFunc: func(label string) { limit, burst := limiter.GetQPSLimiterStatus(label) re.Equal(rate.Limit(0), limit) - re.Equal(0, burst) + re.Zero(burst) }, }, }, @@ -334,7 +334,7 @@ func TestControllerWithTwoLimiters(t *testing.T) { }), checkOptionStatus: func(label string, o Option) { status := limiter.Update(label, o) - re.True(status&QPSChanged != 0) + re.NotZero(status & QPSChanged) }, totalRequest: 200, fail: 100, @@ -354,7 +354,7 @@ func TestControllerWithTwoLimiters(t *testing.T) { opt: UpdateQPSLimiter(float64(rate.Every(time.Second)), 1), checkOptionStatus: func(label string, o Option) { status := limiter.Update(label, o) - re.True(status&QPSChanged != 0) + re.NotZero(status & QPSChanged) }, totalRequest: 200, fail: 199, @@ -376,7 +376,7 @@ func TestControllerWithTwoLimiters(t *testing.T) { opt: UpdateQPSLimiter(50, 5), checkOptionStatus: func(label string, o Option) { status := limiter.Update(label, o) - re.True(status&QPSChanged != 0) + re.NotZero(status & QPSChanged) }, totalRequest: 10, fail: 5, @@ -392,7 +392,7 @@ func TestControllerWithTwoLimiters(t *testing.T) { opt: UpdateQPSLimiter(0, 0), checkOptionStatus: func(label string, o Option) { status := limiter.Update(label, o) - re.True(status&QPSDeleted != 0) + re.NotZero(status & QPSDeleted) }, totalRequest: 10, fail: 0, diff --git a/pkg/ratelimit/limiter_test.go b/pkg/ratelimit/limiter_test.go index 8834495f3e9..88da865879b 100644 --- a/pkg/ratelimit/limiter_test.go +++ b/pkg/ratelimit/limiter_test.go @@ -45,7 +45,7 @@ func TestWithConcurrencyLimiter(t *testing.T) { limiter := newLimiter() status := limiter.updateConcurrencyConfig(10) - re.True(status&ConcurrencyChanged != 0) + re.NotZero(status & ConcurrencyChanged) var lock syncutil.Mutex successCount, failedCount := 0, 0 var wg sync.WaitGroup @@ -68,10 +68,10 @@ func TestWithConcurrencyLimiter(t *testing.T) { re.Equal(uint64(0), current) status = limiter.updateConcurrencyConfig(10) - re.True(status&ConcurrencyNoChange != 0) + re.NotZero(status & ConcurrencyNoChange) status = limiter.updateConcurrencyConfig(5) - re.True(status&ConcurrencyChanged != 0) + re.NotZero(status & ConcurrencyChanged) failedCount = 0 successCount = 0 for i := 0; i < 15; i++ { @@ -86,7 +86,7 @@ func TestWithConcurrencyLimiter(t *testing.T) { } status = limiter.updateConcurrencyConfig(0) - re.True(status&ConcurrencyDeleted != 0) + re.NotZero(status & ConcurrencyDeleted) failedCount = 0 successCount = 0 for i := 0; i < 15; i++ { @@ -107,7 +107,7 @@ func TestWithQPSLimiter(t *testing.T) { re := require.New(t) limiter := newLimiter() status := limiter.updateQPSConfig(float64(rate.Every(time.Second)), 1) - re.True(status&QPSChanged != 0) + re.NotZero(status & QPSChanged) var lock syncutil.Mutex successCount, failedCount := 0, 0 @@ -126,10 +126,10 @@ func TestWithQPSLimiter(t *testing.T) { re.Equal(1, burst) status = limiter.updateQPSConfig(float64(rate.Every(time.Second)), 1) - re.True(status&QPSNoChange != 0) + re.NotZero(status & QPSNoChange) status = limiter.updateQPSConfig(5, 5) - re.True(status&QPSChanged != 0) + re.NotZero(status & QPSChanged) limit, burst = limiter.getQPSLimiterStatus() re.Equal(rate.Limit(5), limit) re.Equal(5, burst) @@ -147,19 +147,19 @@ func TestWithQPSLimiter(t *testing.T) { time.Sleep(time.Second) status = limiter.updateQPSConfig(0, 0) - re.True(status&QPSDeleted != 0) + re.NotZero(status & QPSDeleted) for i := 0; i < 10; i++ { _, err := limiter.allow() re.NoError(err) } qLimit, qCurrent := limiter.getQPSLimiterStatus() re.Equal(rate.Limit(0), qLimit) - re.Equal(0, qCurrent) + re.Zero(qCurrent) successCount = 0 failedCount = 0 status = limiter.updateQPSConfig(float64(rate.Every(3*time.Second)), 100) - re.True(status&QPSChanged != 0) + re.NotZero(status & QPSChanged) wg.Add(200) for i := 0; i < 200; i++ { go countSingleLimiterHandleResult(limiter, &successCount, &failedCount, &lock, &wg, r) @@ -186,8 +186,8 @@ func TestWithTwoLimiters(t *testing.T) { } limiter := newLimiter() status := limiter.updateDimensionConfig(cfg) - re.True(status&QPSChanged != 0) - re.True(status&ConcurrencyChanged != 0) + re.NotZero(status & QPSChanged) + re.NotZero(status & ConcurrencyChanged) var lock syncutil.Mutex successCount, failedCount := 0, 0 @@ -214,7 +214,7 @@ func TestWithTwoLimiters(t *testing.T) { r.release() } status = limiter.updateQPSConfig(float64(rate.Every(10*time.Second)), 1) - re.True(status&QPSChanged != 0) + re.NotZero(status & QPSChanged) wg.Add(100) for i := 0; i < 100; i++ { go countSingleLimiterHandleResult(limiter, &successCount, &failedCount, &lock, &wg, r) @@ -228,8 +228,8 @@ func TestWithTwoLimiters(t *testing.T) { cfg = &DimensionConfig{} status = limiter.updateDimensionConfig(cfg) - re.True(status&ConcurrencyDeleted != 0) - re.True(status&QPSDeleted != 0) + re.NotZero(status & ConcurrencyDeleted) + re.NotZero(status & QPSDeleted) } func countSingleLimiterHandleResult(limiter *limiter, successCount *int, diff --git a/pkg/schedule/checker/rule_checker_test.go b/pkg/schedule/checker/rule_checker_test.go index e77830fac49..72d3e7e5ec4 100644 --- a/pkg/schedule/checker/rule_checker_test.go +++ b/pkg/schedule/checker/rule_checker_test.go @@ -426,6 +426,7 @@ func (suite *ruleCheckerTestSuite) TestFixRoleLeaderIssue3130() { } func (suite *ruleCheckerTestSuite) TestFixLeaderRoleWithUnhealthyRegion() { + re := suite.Require() suite.cluster.AddLabelsStore(1, 1, map[string]string{"rule": "follower"}) suite.cluster.AddLabelsStore(2, 1, map[string]string{"rule": "follower"}) suite.cluster.AddLabelsStore(3, 1, map[string]string{"rule": "leader"}) @@ -456,12 +457,12 @@ func (suite *ruleCheckerTestSuite) TestFixLeaderRoleWithUnhealthyRegion() { }, }, }) - suite.NoError(err) + re.NoError(err) // no Leader suite.cluster.AddNoLeaderRegion(1, 1, 2, 3) r := suite.cluster.GetRegion(1) op := suite.rc.Check(r) - suite.Nil(op) + re.Nil(op) } func (suite *ruleCheckerTestSuite) TestFixRuleWitness() { @@ -532,6 +533,7 @@ func (suite *ruleCheckerTestSuite) TestFixRuleWitness3() { } func (suite *ruleCheckerTestSuite) TestFixRuleWitness4() { + re := suite.Require() suite.cluster.AddLabelsStore(1, 1, map[string]string{"A": "leader"}) suite.cluster.AddLabelsStore(2, 1, map[string]string{"B": "voter"}) suite.cluster.AddLabelsStore(3, 1, map[string]string{"C": "learner"}) @@ -565,12 +567,12 @@ func (suite *ruleCheckerTestSuite) TestFixRuleWitness4() { }, }, }) - suite.NoError(err) + re.NoError(err) op := suite.rc.Check(r) - suite.NotNil(op) - suite.Equal("fix-non-witness-peer", op.Desc()) - suite.Equal(uint64(3), op.Step(0).(operator.BecomeNonWitness).StoreID) + re.NotNil(op) + re.Equal("fix-non-witness-peer", op.Desc()) + re.Equal(uint64(3), op.Step(0).(operator.BecomeNonWitness).StoreID) } func (suite *ruleCheckerTestSuite) TestFixRuleWitness5() { diff --git a/pkg/schedule/filter/counter_test.go b/pkg/schedule/filter/counter_test.go index 067a07f138b..78a1ef5395b 100644 --- a/pkg/schedule/filter/counter_test.go +++ b/pkg/schedule/filter/counter_test.go @@ -34,7 +34,7 @@ func TestString(t *testing.T) { for _, data := range testcases { re.Equal(data.expected, filterType(data.filterType).String()) } - re.Equal(int(filtersLen), len(filters)) + re.Len(filters, int(filtersLen)) } func TestCounter(t *testing.T) { @@ -42,9 +42,9 @@ func TestCounter(t *testing.T) { counter := NewCounter(BalanceLeader.String()) counter.inc(source, storeStateTombstone, 1, 2) counter.inc(target, storeStateTombstone, 1, 2) - re.Equal(counter.counter[source][storeStateTombstone][1][2], 1) - re.Equal(counter.counter[target][storeStateTombstone][1][2], 1) + re.Equal(1, counter.counter[source][storeStateTombstone][1][2]) + re.Equal(1, counter.counter[target][storeStateTombstone][1][2]) counter.Flush() - re.Equal(counter.counter[source][storeStateTombstone][1][2], 0) - re.Equal(counter.counter[target][storeStateTombstone][1][2], 0) + re.Zero(counter.counter[source][storeStateTombstone][1][2]) + re.Zero(counter.counter[target][storeStateTombstone][1][2]) } diff --git a/pkg/schedule/labeler/rule_test.go b/pkg/schedule/labeler/rule_test.go index 0b341754007..00c179b36b8 100644 --- a/pkg/schedule/labeler/rule_test.go +++ b/pkg/schedule/labeler/rule_test.go @@ -42,7 +42,7 @@ func TestRegionLabelTTL(t *testing.T) { label.TTL = "10h10m10s10ms" err = label.checkAndAdjustExpire() re.NoError(err) - re.Greater(len(label.StartAt), 0) + re.NotEmpty(label.StartAt) re.False(label.expireBefore(time.Now().Add(time.Hour))) re.True(label.expireBefore(time.Now().Add(24 * time.Hour))) @@ -56,5 +56,5 @@ func TestRegionLabelTTL(t *testing.T) { re.Equal(label.TTL, label2.TTL) label2.checkAndAdjustExpire() // The `expire` should be the same with minor inaccuracies. - re.True(math.Abs(label2.expire.Sub(*label.expire).Seconds()) < 1) + re.Less(math.Abs(label2.expire.Sub(*label.expire).Seconds()), 1.0) } diff --git a/pkg/schedule/operator/builder_test.go b/pkg/schedule/operator/builder_test.go index 864734eb5ff..b010dcf935b 100644 --- a/pkg/schedule/operator/builder_test.go +++ b/pkg/schedule/operator/builder_test.go @@ -62,21 +62,22 @@ func (suite *operatorBuilderTestSuite) TearDownTest() { } func (suite *operatorBuilderTestSuite) TestNewBuilder() { + re := suite.Require() peers := []*metapb.Peer{{Id: 11, StoreId: 1}, {Id: 12, StoreId: 2, Role: metapb.PeerRole_Learner}} region := core.NewRegionInfo(&metapb.Region{Id: 42, Peers: peers}, peers[0]) builder := NewBuilder("test", suite.cluster, region) - suite.NoError(builder.err) - suite.Len(builder.originPeers, 2) - suite.Equal(peers[0], builder.originPeers[1]) - suite.Equal(peers[1], builder.originPeers[2]) - suite.Equal(uint64(1), builder.originLeaderStoreID) - suite.Len(builder.targetPeers, 2) - suite.Equal(peers[0], builder.targetPeers[1]) - suite.Equal(peers[1], builder.targetPeers[2]) + re.NoError(builder.err) + re.Len(builder.originPeers, 2) + re.Equal(peers[0], builder.originPeers[1]) + re.Equal(peers[1], builder.originPeers[2]) + re.Equal(uint64(1), builder.originLeaderStoreID) + re.Len(builder.targetPeers, 2) + re.Equal(peers[0], builder.targetPeers[1]) + re.Equal(peers[1], builder.targetPeers[2]) region = region.Clone(core.WithLeader(nil)) builder = NewBuilder("test", suite.cluster, region) - suite.Error(builder.err) + re.Error(builder.err) } func (suite *operatorBuilderTestSuite) newBuilder() *Builder { @@ -90,18 +91,19 @@ func (suite *operatorBuilderTestSuite) newBuilder() *Builder { } func (suite *operatorBuilderTestSuite) TestRecord() { - suite.Error(suite.newBuilder().AddPeer(&metapb.Peer{StoreId: 1}).err) - suite.NoError(suite.newBuilder().AddPeer(&metapb.Peer{StoreId: 4}).err) - suite.Error(suite.newBuilder().PromoteLearner(1).err) - suite.NoError(suite.newBuilder().PromoteLearner(3).err) - suite.NoError(suite.newBuilder().SetLeader(1).SetLeader(2).err) - suite.Error(suite.newBuilder().SetLeader(3).err) - suite.Error(suite.newBuilder().RemovePeer(4).err) - suite.NoError(suite.newBuilder().AddPeer(&metapb.Peer{StoreId: 4, Role: metapb.PeerRole_Learner}).RemovePeer(4).err) - suite.Error(suite.newBuilder().SetLeader(2).RemovePeer(2).err) - suite.Error(suite.newBuilder().PromoteLearner(4).err) - suite.Error(suite.newBuilder().SetLeader(4).err) - suite.Error(suite.newBuilder().SetPeers(map[uint64]*metapb.Peer{2: {Id: 2}}).err) + re := suite.Require() + re.Error(suite.newBuilder().AddPeer(&metapb.Peer{StoreId: 1}).err) + re.NoError(suite.newBuilder().AddPeer(&metapb.Peer{StoreId: 4}).err) + re.Error(suite.newBuilder().PromoteLearner(1).err) + re.NoError(suite.newBuilder().PromoteLearner(3).err) + re.NoError(suite.newBuilder().SetLeader(1).SetLeader(2).err) + re.Error(suite.newBuilder().SetLeader(3).err) + re.Error(suite.newBuilder().RemovePeer(4).err) + re.NoError(suite.newBuilder().AddPeer(&metapb.Peer{StoreId: 4, Role: metapb.PeerRole_Learner}).RemovePeer(4).err) + re.Error(suite.newBuilder().SetLeader(2).RemovePeer(2).err) + re.Error(suite.newBuilder().PromoteLearner(4).err) + re.Error(suite.newBuilder().SetLeader(4).err) + re.Error(suite.newBuilder().SetPeers(map[uint64]*metapb.Peer{2: {Id: 2}}).err) m := map[uint64]*metapb.Peer{ 2: {StoreId: 2}, @@ -109,18 +111,19 @@ func (suite *operatorBuilderTestSuite) TestRecord() { 4: {StoreId: 4}, } builder := suite.newBuilder().SetPeers(m).SetAddLightPeer() - suite.Len(builder.targetPeers, 3) - suite.Equal(m[2], builder.targetPeers[2]) - suite.Equal(m[3], builder.targetPeers[3]) - suite.Equal(m[4], builder.targetPeers[4]) - suite.Equal(uint64(0), builder.targetLeaderStoreID) - suite.True(builder.addLightPeer) + re.Len(builder.targetPeers, 3) + re.Equal(m[2], builder.targetPeers[2]) + re.Equal(m[3], builder.targetPeers[3]) + re.Equal(m[4], builder.targetPeers[4]) + re.Equal(uint64(0), builder.targetLeaderStoreID) + re.True(builder.addLightPeer) } func (suite *operatorBuilderTestSuite) TestPrepareBuild() { + re := suite.Require() // no voter. _, err := suite.newBuilder().SetPeers(map[uint64]*metapb.Peer{4: {StoreId: 4, Role: metapb.PeerRole_Learner}}).prepareBuild() - suite.Error(err) + re.Error(err) // use joint consensus builder := suite.newBuilder().SetPeers(map[uint64]*metapb.Peer{ @@ -130,19 +133,19 @@ func (suite *operatorBuilderTestSuite) TestPrepareBuild() { 5: {StoreId: 5, Role: metapb.PeerRole_Learner}, }) _, err = builder.prepareBuild() - suite.NoError(err) - suite.Len(builder.toAdd, 2) - suite.NotEqual(metapb.PeerRole_Learner, builder.toAdd[4].GetRole()) - suite.Equal(uint64(14), builder.toAdd[4].GetId()) - suite.Equal(metapb.PeerRole_Learner, builder.toAdd[5].GetRole()) - suite.NotEqual(uint64(0), builder.toAdd[5].GetId()) - suite.Len(builder.toRemove, 1) - suite.NotNil(builder.toRemove[2]) - suite.Len(builder.toPromote, 1) - suite.NotNil(builder.toPromote[3]) - suite.Len(builder.toDemote, 1) - suite.NotNil(builder.toDemote[1]) - suite.Equal(uint64(1), builder.currentLeaderStoreID) + re.NoError(err) + re.Len(builder.toAdd, 2) + re.NotEqual(metapb.PeerRole_Learner, builder.toAdd[4].GetRole()) + re.Equal(uint64(14), builder.toAdd[4].GetId()) + re.Equal(metapb.PeerRole_Learner, builder.toAdd[5].GetRole()) + re.NotEqual(uint64(0), builder.toAdd[5].GetId()) + re.Len(builder.toRemove, 1) + re.NotNil(builder.toRemove[2]) + re.Len(builder.toPromote, 1) + re.NotNil(builder.toPromote[3]) + re.Len(builder.toDemote, 1) + re.NotNil(builder.toDemote[1]) + re.Equal(uint64(1), builder.currentLeaderStoreID) // do not use joint consensus builder = suite.newBuilder().SetPeers(map[uint64]*metapb.Peer{ @@ -154,22 +157,23 @@ func (suite *operatorBuilderTestSuite) TestPrepareBuild() { }) builder.useJointConsensus = false _, err = builder.prepareBuild() - suite.NoError(err) - suite.Len(builder.toAdd, 3) - suite.Equal(metapb.PeerRole_Learner, builder.toAdd[1].GetRole()) - suite.NotEqual(uint64(0), builder.toAdd[1].GetId()) - suite.NotEqual(metapb.PeerRole_Learner, builder.toAdd[4].GetRole()) - suite.Equal(uint64(14), builder.toAdd[4].GetId()) - suite.Equal(metapb.PeerRole_Learner, builder.toAdd[5].GetRole()) - suite.NotEqual(uint64(0), builder.toAdd[5].GetId()) - suite.Len(builder.toRemove, 1) - suite.NotNil(builder.toRemove[1]) - suite.Len(builder.toPromote, 1) - suite.NotNil(builder.toPromote[3]) - suite.Equal(uint64(1), builder.currentLeaderStoreID) + re.NoError(err) + re.Len(builder.toAdd, 3) + re.Equal(metapb.PeerRole_Learner, builder.toAdd[1].GetRole()) + re.NotEqual(uint64(0), builder.toAdd[1].GetId()) + re.NotEqual(metapb.PeerRole_Learner, builder.toAdd[4].GetRole()) + re.Equal(uint64(14), builder.toAdd[4].GetId()) + re.Equal(metapb.PeerRole_Learner, builder.toAdd[5].GetRole()) + re.NotEqual(uint64(0), builder.toAdd[5].GetId()) + re.Len(builder.toRemove, 1) + re.NotNil(builder.toRemove[1]) + re.Len(builder.toPromote, 1) + re.NotNil(builder.toPromote[3]) + re.Equal(uint64(1), builder.currentLeaderStoreID) } func (suite *operatorBuilderTestSuite) TestBuild() { + re := suite.Require() type testCase struct { name string useJointConsensus bool @@ -545,42 +549,42 @@ func (suite *operatorBuilderTestSuite) TestBuild() { builder.SetPeers(m).SetLeader(testCase.targetPeers[0].GetStoreId()) op, err := builder.Build(0) if len(testCase.steps) == 0 { - suite.Error(err) + re.Error(err) continue } - suite.NoError(err) - suite.Equal(testCase.kind, op.Kind()) - suite.Len(testCase.steps, op.Len()) + re.NoError(err) + re.Equal(testCase.kind, op.Kind()) + re.Len(testCase.steps, op.Len()) for i := 0; i < op.Len(); i++ { switch step := op.Step(i).(type) { case TransferLeader: - suite.Equal(testCase.steps[i].(TransferLeader).FromStore, step.FromStore) - suite.Equal(testCase.steps[i].(TransferLeader).ToStore, step.ToStore) + re.Equal(testCase.steps[i].(TransferLeader).FromStore, step.FromStore) + re.Equal(testCase.steps[i].(TransferLeader).ToStore, step.ToStore) case AddPeer: - suite.Equal(testCase.steps[i].(AddPeer).ToStore, step.ToStore) + re.Equal(testCase.steps[i].(AddPeer).ToStore, step.ToStore) case RemovePeer: - suite.Equal(testCase.steps[i].(RemovePeer).FromStore, step.FromStore) + re.Equal(testCase.steps[i].(RemovePeer).FromStore, step.FromStore) case AddLearner: - suite.Equal(testCase.steps[i].(AddLearner).ToStore, step.ToStore) + re.Equal(testCase.steps[i].(AddLearner).ToStore, step.ToStore) case PromoteLearner: - suite.Equal(testCase.steps[i].(PromoteLearner).ToStore, step.ToStore) + re.Equal(testCase.steps[i].(PromoteLearner).ToStore, step.ToStore) case ChangePeerV2Enter: - suite.Len(step.PromoteLearners, len(testCase.steps[i].(ChangePeerV2Enter).PromoteLearners)) - suite.Len(step.DemoteVoters, len(testCase.steps[i].(ChangePeerV2Enter).DemoteVoters)) + re.Len(step.PromoteLearners, len(testCase.steps[i].(ChangePeerV2Enter).PromoteLearners)) + re.Len(step.DemoteVoters, len(testCase.steps[i].(ChangePeerV2Enter).DemoteVoters)) for j, p := range testCase.steps[i].(ChangePeerV2Enter).PromoteLearners { - suite.Equal(p.ToStore, step.PromoteLearners[j].ToStore) + re.Equal(p.ToStore, step.PromoteLearners[j].ToStore) } for j, d := range testCase.steps[i].(ChangePeerV2Enter).DemoteVoters { - suite.Equal(d.ToStore, step.DemoteVoters[j].ToStore) + re.Equal(d.ToStore, step.DemoteVoters[j].ToStore) } case ChangePeerV2Leave: - suite.Len(step.PromoteLearners, len(testCase.steps[i].(ChangePeerV2Leave).PromoteLearners)) - suite.Len(step.DemoteVoters, len(testCase.steps[i].(ChangePeerV2Leave).DemoteVoters)) + re.Len(step.PromoteLearners, len(testCase.steps[i].(ChangePeerV2Leave).PromoteLearners)) + re.Len(step.DemoteVoters, len(testCase.steps[i].(ChangePeerV2Leave).DemoteVoters)) for j, p := range testCase.steps[i].(ChangePeerV2Leave).PromoteLearners { - suite.Equal(p.ToStore, step.PromoteLearners[j].ToStore) + re.Equal(p.ToStore, step.PromoteLearners[j].ToStore) } for j, d := range testCase.steps[i].(ChangePeerV2Leave).DemoteVoters { - suite.Equal(d.ToStore, step.DemoteVoters[j].ToStore) + re.Equal(d.ToStore, step.DemoteVoters[j].ToStore) } } } @@ -588,26 +592,27 @@ func (suite *operatorBuilderTestSuite) TestBuild() { } func (suite *operatorBuilderTestSuite) TestTargetUnhealthyPeer() { + re := suite.Require() p := &metapb.Peer{Id: 2, StoreId: 2, Role: metapb.PeerRole_Learner} region := core.NewRegionInfo(&metapb.Region{Id: 1, Peers: []*metapb.Peer{{Id: 1, StoreId: 1}, p}}, &metapb.Peer{Id: 1, StoreId: 1}, core.WithPendingPeers([]*metapb.Peer{p})) builder := NewBuilder("test", suite.cluster, region) builder.PromoteLearner(2) - suite.Error(builder.err) + re.Error(builder.err) region = core.NewRegionInfo(&metapb.Region{Id: 1, Peers: []*metapb.Peer{{Id: 1, StoreId: 1}, p}}, &metapb.Peer{Id: 1, StoreId: 1}, core.WithDownPeers([]*pdpb.PeerStats{{Peer: p}})) builder = NewBuilder("test", suite.cluster, region) builder.PromoteLearner(2) - suite.Error(builder.err) + re.Error(builder.err) p = &metapb.Peer{Id: 2, StoreId: 2, Role: metapb.PeerRole_Voter} region = core.NewRegionInfo(&metapb.Region{Id: 1, Peers: []*metapb.Peer{{Id: 1, StoreId: 1}, p}}, &metapb.Peer{Id: 1, StoreId: 1}, core.WithPendingPeers([]*metapb.Peer{p})) builder = NewBuilder("test", suite.cluster, region) builder.SetLeader(2) - suite.Error(builder.err) + re.Error(builder.err) region = core.NewRegionInfo(&metapb.Region{Id: 1, Peers: []*metapb.Peer{{Id: 1, StoreId: 1}, p}}, &metapb.Peer{Id: 1, StoreId: 1}, core.WithDownPeers([]*pdpb.PeerStats{{Peer: p}})) builder = NewBuilder("test", suite.cluster, region) builder.SetLeader(2) - suite.Error(builder.err) + re.Error(builder.err) } diff --git a/pkg/schedule/placement/rule_manager_test.go b/pkg/schedule/placement/rule_manager_test.go index c0987f6dd33..0539e935113 100644 --- a/pkg/schedule/placement/rule_manager_test.go +++ b/pkg/schedule/placement/rule_manager_test.go @@ -161,11 +161,11 @@ func TestSaveLoad(t *testing.T) { err := m2.Initialize(3, []string{"no", "labels"}, "") re.NoError(err) re.Len(m2.GetAllRules(), 3) - re.Equal(rules[0].String(), m2.GetRule(DefaultGroupID, DefaultRuleID).String()) - re.Equal(rules[1].String(), m2.GetRule("foo", "baz").String()) - re.Equal(rules[2].String(), m2.GetRule("foo", "bar").String()) - re.Equal(manager.GetRulesCount(), 3) - re.Equal(manager.GetGroupsCount(), 2) + re.Equal(m2.GetRule(DefaultGroupID, DefaultRuleID).String(), rules[0].String()) + re.Equal(m2.GetRule("foo", "baz").String(), rules[1].String()) + re.Equal(m2.GetRule("foo", "bar").String(), rules[2].String()) + re.Equal(3, manager.GetRulesCount()) + re.Equal(2, manager.GetGroupsCount()) } func TestSetAfterGet(t *testing.T) { diff --git a/pkg/schedule/plan/balance_plan_test.go b/pkg/schedule/plan/balance_plan_test.go index 59ad637d5c8..59f2acc689a 100644 --- a/pkg/schedule/plan/balance_plan_test.go +++ b/pkg/schedule/plan/balance_plan_test.go @@ -114,6 +114,7 @@ func (suite *balanceSchedulerPlanAnalyzeTestSuite) TearDownSuite() { } func (suite *balanceSchedulerPlanAnalyzeTestSuite) TestAnalyzerResult1() { + re := suite.Require() plans := make([]Plan, 0) plans = append(plans, &BalanceSchedulerPlan{Source: suite.stores[4], Step: 2, Target: suite.stores[0], Status: NewStatus(StatusStoreScoreDisallowed)}) plans = append(plans, &BalanceSchedulerPlan{Source: suite.stores[4], Step: 2, Target: suite.stores[1], Status: NewStatus(StatusStoreScoreDisallowed)}) @@ -141,9 +142,9 @@ func (suite *balanceSchedulerPlanAnalyzeTestSuite) TestAnalyzerResult1() { plans = append(plans, &BalanceSchedulerPlan{Source: suite.stores[0], Step: 2, Target: suite.stores[3], Status: NewStatus(StatusStoreNotMatchRule)}) plans = append(plans, &BalanceSchedulerPlan{Source: suite.stores[0], Step: 2, Target: suite.stores[4], Status: NewStatus(StatusStoreScoreDisallowed)}) statuses, isNormal, err := BalancePlanSummary(plans) - suite.NoError(err) - suite.True(isNormal) - suite.True(suite.check(statuses, + re.NoError(err) + re.True(isNormal) + re.True(suite.check(statuses, map[uint64]*Status{ 1: NewStatus(StatusStoreNotMatchRule), 2: NewStatus(StatusStoreNotMatchRule), @@ -154,6 +155,7 @@ func (suite *balanceSchedulerPlanAnalyzeTestSuite) TestAnalyzerResult1() { } func (suite *balanceSchedulerPlanAnalyzeTestSuite) TestAnalyzerResult2() { + re := suite.Require() plans := make([]Plan, 0) plans = append(plans, &BalanceSchedulerPlan{Source: suite.stores[4], Step: 0, Status: NewStatus(StatusStoreDown)}) plans = append(plans, &BalanceSchedulerPlan{Source: suite.stores[3], Step: 0, Status: NewStatus(StatusStoreDown)}) @@ -161,9 +163,9 @@ func (suite *balanceSchedulerPlanAnalyzeTestSuite) TestAnalyzerResult2() { plans = append(plans, &BalanceSchedulerPlan{Source: suite.stores[1], Step: 0, Status: NewStatus(StatusStoreDown)}) plans = append(plans, &BalanceSchedulerPlan{Source: suite.stores[0], Step: 0, Status: NewStatus(StatusStoreDown)}) statuses, isNormal, err := BalancePlanSummary(plans) - suite.NoError(err) - suite.False(isNormal) - suite.True(suite.check(statuses, + re.NoError(err) + re.False(isNormal) + re.True(suite.check(statuses, map[uint64]*Status{ 1: NewStatus(StatusStoreDown), 2: NewStatus(StatusStoreDown), @@ -174,6 +176,7 @@ func (suite *balanceSchedulerPlanAnalyzeTestSuite) TestAnalyzerResult2() { } func (suite *balanceSchedulerPlanAnalyzeTestSuite) TestAnalyzerResult3() { + re := suite.Require() plans := make([]Plan, 0) plans = append(plans, &BalanceSchedulerPlan{Source: suite.stores[4], Step: 0, Status: NewStatus(StatusStoreDown)}) plans = append(plans, &BalanceSchedulerPlan{Source: suite.stores[3], Region: suite.regions[0], Step: 1, Status: NewStatus(StatusRegionNotMatchRule)}) @@ -181,9 +184,9 @@ func (suite *balanceSchedulerPlanAnalyzeTestSuite) TestAnalyzerResult3() { plans = append(plans, &BalanceSchedulerPlan{Source: suite.stores[1], Region: suite.regions[1], Step: 1, Status: NewStatus(StatusRegionNotMatchRule)}) plans = append(plans, &BalanceSchedulerPlan{Source: suite.stores[0], Region: suite.regions[1], Step: 1, Status: NewStatus(StatusRegionNotMatchRule)}) statuses, isNormal, err := BalancePlanSummary(plans) - suite.NoError(err) - suite.False(isNormal) - suite.True(suite.check(statuses, + re.NoError(err) + re.False(isNormal) + re.True(suite.check(statuses, map[uint64]*Status{ 1: NewStatus(StatusRegionNotMatchRule), 2: NewStatus(StatusRegionNotMatchRule), @@ -193,6 +196,7 @@ func (suite *balanceSchedulerPlanAnalyzeTestSuite) TestAnalyzerResult3() { } func (suite *balanceSchedulerPlanAnalyzeTestSuite) TestAnalyzerResult4() { + re := suite.Require() plans := make([]Plan, 0) plans = append(plans, &BalanceSchedulerPlan{Source: suite.stores[4], Step: 0, Status: NewStatus(StatusStoreDown)}) plans = append(plans, &BalanceSchedulerPlan{Source: suite.stores[3], Region: suite.regions[0], Step: 1, Status: NewStatus(StatusRegionNotMatchRule)}) @@ -208,9 +212,9 @@ func (suite *balanceSchedulerPlanAnalyzeTestSuite) TestAnalyzerResult4() { plans = append(plans, &BalanceSchedulerPlan{Source: suite.stores[0], Target: suite.stores[3], Step: 2, Status: NewStatus(StatusStoreNotMatchRule)}) plans = append(plans, &BalanceSchedulerPlan{Source: suite.stores[0], Target: suite.stores[4], Step: 2, Status: NewStatus(StatusStoreDown)}) statuses, isNormal, err := BalancePlanSummary(plans) - suite.NoError(err) - suite.False(isNormal) - suite.True(suite.check(statuses, + re.NoError(err) + re.False(isNormal) + re.True(suite.check(statuses, map[uint64]*Status{ 1: NewStatus(StatusStoreAlreadyHasPeer), 2: NewStatus(StatusStoreAlreadyHasPeer), @@ -221,6 +225,7 @@ func (suite *balanceSchedulerPlanAnalyzeTestSuite) TestAnalyzerResult4() { } func (suite *balanceSchedulerPlanAnalyzeTestSuite) TestAnalyzerResult5() { + re := suite.Require() plans := make([]Plan, 0) plans = append(plans, &BalanceSchedulerPlan{Source: suite.stores[4], Step: 0, Status: NewStatus(StatusStoreRemoveLimitThrottled)}) plans = append(plans, &BalanceSchedulerPlan{Source: suite.stores[3], Region: suite.regions[0], Step: 1, Status: NewStatus(StatusRegionNotMatchRule)}) @@ -234,9 +239,9 @@ func (suite *balanceSchedulerPlanAnalyzeTestSuite) TestAnalyzerResult5() { plans = append(plans, &BalanceSchedulerPlan{Source: suite.stores[0], Target: suite.stores[2], Step: 2, Status: NewStatus(StatusStoreNotMatchRule)}) plans = append(plans, &BalanceSchedulerPlan{Source: suite.stores[0], Target: suite.stores[3], Step: 2, Status: NewStatus(StatusStoreNotMatchRule)}) statuses, isNormal, err := BalancePlanSummary(plans) - suite.NoError(err) - suite.False(isNormal) - suite.True(suite.check(statuses, + re.NoError(err) + re.False(isNormal) + re.True(suite.check(statuses, map[uint64]*Status{ 1: NewStatus(StatusStoreAlreadyHasPeer), 2: NewStatus(StatusStoreAlreadyHasPeer), @@ -247,6 +252,7 @@ func (suite *balanceSchedulerPlanAnalyzeTestSuite) TestAnalyzerResult5() { } func (suite *balanceSchedulerPlanAnalyzeTestSuite) TestAnalyzerResult6() { + re := suite.Require() basePlan := NewBalanceSchedulerPlan() collector := NewCollector(basePlan) collector.Collect(SetResourceWithStep(suite.stores[0], 2), SetStatus(NewStatus(StatusStoreDown))) @@ -258,9 +264,9 @@ func (suite *balanceSchedulerPlanAnalyzeTestSuite) TestAnalyzerResult6() { basePlan.Step++ collector.Collect(SetResource(suite.regions[0]), SetStatus(NewStatus(StatusRegionNoLeader))) statuses, isNormal, err := BalancePlanSummary(collector.GetPlans()) - suite.NoError(err) - suite.False(isNormal) - suite.True(suite.check(statuses, + re.NoError(err) + re.False(isNormal) + re.True(suite.check(statuses, map[uint64]*Status{ 1: NewStatus(StatusStoreDown), 2: NewStatus(StatusStoreDown), diff --git a/pkg/schedule/scatter/region_scatterer_test.go b/pkg/schedule/scatter/region_scatterer_test.go index 70517d23fee..af41ed04b76 100644 --- a/pkg/schedule/scatter/region_scatterer_test.go +++ b/pkg/schedule/scatter/region_scatterer_test.go @@ -350,7 +350,7 @@ func TestSomeStoresFilteredScatterGroupInConcurrency(t *testing.T) { // prevent store from being disconnected tc.SetStoreLastHeartbeatInterval(i, 40*time.Minute) } - re.Equal(tc.GetStore(uint64(6)).IsDisconnected(), true) + re.True(tc.GetStore(uint64(6)).IsDisconnected()) scatterer := NewRegionScatterer(ctx, tc, oc, tc.AddSuspectRegions) var wg sync.WaitGroup for j := 0; j < 10; j++ { @@ -466,7 +466,7 @@ func TestScatterForManyRegion(t *testing.T) { re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/schedule/scatter/scatterHbStreamsDrain", `return(true)`)) scatterer.scatterRegions(regions, failures, group, 3, false) re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/schedule/scatter/scatterHbStreamsDrain")) - re.Len(failures, 0) + re.Empty(failures) } func TestScattersGroup(t *testing.T) { diff --git a/pkg/schedule/schedulers/balance_benchmark_test.go b/pkg/schedule/schedulers/balance_benchmark_test.go index 694d5edb658..2d7befd27af 100644 --- a/pkg/schedule/schedulers/balance_benchmark_test.go +++ b/pkg/schedule/schedulers/balance_benchmark_test.go @@ -163,7 +163,7 @@ func BenchmarkPlacementRule(b *testing.B) { ops, plans = sc.Schedule(tc, false) } b.StopTimer() - re.Len(plans, 0) + re.Empty(plans) re.Len(ops, 1) re.Contains(ops[0].String(), "to [191]") } diff --git a/pkg/schedule/schedulers/balance_test.go b/pkg/schedule/schedulers/balance_test.go index 54fe8ff489b..dafe810b2b7 100644 --- a/pkg/schedule/schedulers/balance_test.go +++ b/pkg/schedule/schedulers/balance_test.go @@ -237,9 +237,10 @@ func TestBalanceLeaderSchedulerTestSuite(t *testing.T) { } func (suite *balanceLeaderSchedulerTestSuite) SetupTest() { + re := suite.Require() suite.cancel, suite.conf, suite.tc, suite.oc = prepareSchedulersTest() lb, err := CreateScheduler(BalanceLeaderType, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceLeaderType, []string{"", ""})) - suite.NoError(err) + re.NoError(err) suite.lb = lb } @@ -560,6 +561,7 @@ func (suite *balanceLeaderRangeSchedulerTestSuite) TearDownTest() { } func (suite *balanceLeaderRangeSchedulerTestSuite) TestSingleRangeBalance() { + re := suite.Require() // Stores: 1 2 3 4 // Leaders: 10 10 10 10 // Weight: 0.5 0.9 1 2 @@ -573,36 +575,36 @@ func (suite *balanceLeaderRangeSchedulerTestSuite) TestSingleRangeBalance() { suite.tc.UpdateStoreLeaderWeight(4, 2) suite.tc.AddLeaderRegionWithRange(1, "a", "g", 1, 2, 3, 4) lb, err := CreateScheduler(BalanceLeaderType, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceLeaderType, []string{"", ""})) - suite.NoError(err) + re.NoError(err) ops, _ := lb.Schedule(suite.tc, false) - suite.NotEmpty(ops) - suite.Len(ops, 1) - suite.Len(ops[0].Counters, 1) - suite.Len(ops[0].FinishedCounters, 1) + re.NotEmpty(ops) + re.Len(ops, 1) + re.Len(ops[0].Counters, 1) + re.Len(ops[0].FinishedCounters, 1) lb, err = CreateScheduler(BalanceLeaderType, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceLeaderType, []string{"h", "n"})) - suite.NoError(err) + re.NoError(err) ops, _ = lb.Schedule(suite.tc, false) - suite.Empty(ops) + re.Empty(ops) lb, err = CreateScheduler(BalanceLeaderType, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceLeaderType, []string{"b", "f"})) - suite.NoError(err) + re.NoError(err) ops, _ = lb.Schedule(suite.tc, false) - suite.Empty(ops) + re.Empty(ops) lb, err = CreateScheduler(BalanceLeaderType, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceLeaderType, []string{"", "a"})) - suite.NoError(err) + re.NoError(err) ops, _ = lb.Schedule(suite.tc, false) - suite.Empty(ops) + re.Empty(ops) lb, err = CreateScheduler(BalanceLeaderType, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceLeaderType, []string{"g", ""})) - suite.NoError(err) + re.NoError(err) ops, _ = lb.Schedule(suite.tc, false) - suite.Empty(ops) + re.Empty(ops) lb, err = CreateScheduler(BalanceLeaderType, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceLeaderType, []string{"", "f"})) - suite.NoError(err) + re.NoError(err) ops, _ = lb.Schedule(suite.tc, false) - suite.Empty(ops) + re.Empty(ops) lb, err = CreateScheduler(BalanceLeaderType, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceLeaderType, []string{"b", ""})) - suite.NoError(err) + re.NoError(err) ops, _ = lb.Schedule(suite.tc, false) - suite.Empty(ops) + re.Empty(ops) } func (suite *balanceLeaderRangeSchedulerTestSuite) TestMultiRangeBalance() { diff --git a/pkg/schedule/schedulers/balance_witness_test.go b/pkg/schedule/schedulers/balance_witness_test.go index 59bf04c2303..9bde7e33438 100644 --- a/pkg/schedule/schedulers/balance_witness_test.go +++ b/pkg/schedule/schedulers/balance_witness_test.go @@ -125,7 +125,7 @@ func (suite *balanceWitnessSchedulerTestSuite) TestTransferWitnessOut() { } } } - suite.Equal(3, len(regions)) + suite.Len(regions, 3) for _, count := range targets { suite.Zero(count) } diff --git a/pkg/schedule/schedulers/evict_leader_test.go b/pkg/schedule/schedulers/evict_leader_test.go index d804561f11c..a91b1c3c937 100644 --- a/pkg/schedule/schedulers/evict_leader_test.go +++ b/pkg/schedule/schedulers/evict_leader_test.go @@ -97,7 +97,7 @@ func TestConfigClone(t *testing.T) { con3 := con2.Clone() con3.StoreIDWithRanges[1], _ = getKeyRanges([]string{"a", "b", "c", "d"}) re.Empty(emptyConf.getKeyRangesByID(1)) - re.False(len(con3.getRanges(1)) == len(con2.getRanges(1))) + re.NotEqual(len(con3.getRanges(1)), len(con2.getRanges(1))) con4 := con3.Clone() re.True(bytes.Equal(con4.StoreIDWithRanges[1][0].StartKey, con3.StoreIDWithRanges[1][0].StartKey)) diff --git a/pkg/schedule/schedulers/evict_slow_trend_test.go b/pkg/schedule/schedulers/evict_slow_trend_test.go index 65a70962a20..aed41e83ecd 100644 --- a/pkg/schedule/schedulers/evict_slow_trend_test.go +++ b/pkg/schedule/schedulers/evict_slow_trend_test.go @@ -100,10 +100,10 @@ func (suite *evictSlowTrendTestSuite) TestEvictSlowTrendBasicFuncs() { // Pop captured store 1 and mark it has recovered. time.Sleep(50 * time.Millisecond) suite.Equal(es2.conf.popCandidate(true), store.GetID()) - suite.True(es2.conf.evictCandidate == (slowCandidate{})) + suite.Equal(slowCandidate{}, es2.conf.evictCandidate) es2.conf.markCandidateRecovered() lastCapturedCandidate = es2.conf.lastCapturedCandidate() - suite.True(lastCapturedCandidate.recoverTS.Compare(recoverTS) > 0) + suite.Greater(lastCapturedCandidate.recoverTS.Compare(recoverTS), 0) suite.Equal(lastCapturedCandidate.storeID, store.GetID()) // Test capture another store 2 diff --git a/pkg/schedule/schedulers/hot_region_test.go b/pkg/schedule/schedulers/hot_region_test.go index 6e7208e4251..5b1bc3db4b4 100644 --- a/pkg/schedule/schedulers/hot_region_test.go +++ b/pkg/schedule/schedulers/hot_region_test.go @@ -180,11 +180,11 @@ func checkGCPendingOpInfos(re *require.Assertions, enablePlacementRules bool) { kind := hb.regionPendings[regionID].op.Kind() switch typ { case transferLeader: - re.True(kind&operator.OpLeader != 0) - re.True(kind&operator.OpRegion == 0) + re.NotZero(kind & operator.OpLeader) + re.Zero(kind & operator.OpRegion) case movePeer: - re.True(kind&operator.OpLeader == 0) - re.True(kind&operator.OpRegion != 0) + re.Zero(kind & operator.OpLeader) + re.NotZero(kind & operator.OpRegion) } } } @@ -257,7 +257,7 @@ func TestSplitIfRegionTooHot(t *testing.T) { re.Equal(expectOp.Kind(), ops[0].Kind()) ops, _ = hb.Schedule(tc, false) - re.Len(ops, 0) + re.Empty(ops) tc.UpdateStorageWrittenBytes(1, 6*units.MiB*utils.StoreHeartBeatReportInterval) tc.UpdateStorageWrittenBytes(2, 1*units.MiB*utils.StoreHeartBeatReportInterval) @@ -276,7 +276,7 @@ func TestSplitIfRegionTooHot(t *testing.T) { re.Equal(operator.OpSplit, ops[0].Kind()) ops, _ = hb.Schedule(tc, false) - re.Len(ops, 0) + re.Empty(ops) } func TestSplitBucketsBySize(t *testing.T) { @@ -319,10 +319,10 @@ func TestSplitBucketsBySize(t *testing.T) { region.UpdateBuckets(b, region.GetBuckets()) ops := solve.createSplitOperator([]*core.RegionInfo{region}, bySize) if data.splitKeys == nil { - re.Equal(0, len(ops)) + re.Empty(ops) continue } - re.Equal(1, len(ops)) + re.Len(ops, 1) op := ops[0] re.Equal(splitHotReadBuckets, op.Desc()) @@ -380,10 +380,10 @@ func TestSplitBucketsByLoad(t *testing.T) { time.Sleep(time.Millisecond * 10) ops := solve.createSplitOperator([]*core.RegionInfo{region}, byLoad) if data.splitKeys == nil { - re.Equal(0, len(ops)) + re.Empty(ops) continue } - re.Equal(1, len(ops)) + re.Len(ops, 1) op := ops[0] re.Equal(splitHotReadBuckets, op.Desc()) @@ -731,7 +731,7 @@ func TestHotWriteRegionScheduleByteRateOnlyWithTiFlash(t *testing.T) { loadsEqual( hb.stLoadInfos[writeLeader][1].LoadPred.Expect.Loads, []float64{hotRegionBytesSum / allowLeaderTiKVCount, hotRegionKeysSum / allowLeaderTiKVCount, tikvQuerySum / allowLeaderTiKVCount})) - re.True(tikvQuerySum != hotRegionQuerySum) + re.NotEqual(tikvQuerySum, hotRegionQuerySum) re.True( loadsEqual( hb.stLoadInfos[writePeer][1].LoadPred.Expect.Loads, @@ -1574,7 +1574,7 @@ func TestHotReadWithEvictLeaderScheduler(t *testing.T) { // two dim are both enough uniform among three stores tc.SetStoreEvictLeader(4, true) ops, _ = hb.Schedule(tc, false) - re.Len(ops, 0) + re.Empty(ops) clearPendingInfluence(hb.(*hotScheduler)) } diff --git a/pkg/schedule/schedulers/hot_region_v2_test.go b/pkg/schedule/schedulers/hot_region_v2_test.go index d11ac44dde9..f5e21e02981 100644 --- a/pkg/schedule/schedulers/hot_region_v2_test.go +++ b/pkg/schedule/schedulers/hot_region_v2_test.go @@ -309,7 +309,7 @@ func TestSkipUniformStore(t *testing.T) { // when there is uniform store filter, not schedule stddevThreshold = 0.1 ops, _ = hb.Schedule(tc, false) - re.Len(ops, 0) + re.Empty(ops) clearPendingInfluence(hb.(*hotScheduler)) // Case2: the first dim is enough uniform, we should schedule the second dim @@ -380,7 +380,7 @@ func TestHotReadRegionScheduleWithSmallHotRegion(t *testing.T) { ops = checkHotReadRegionScheduleWithSmallHotRegion(re, highLoad, lowLoad, emptyFunc) re.Len(ops, 1) ops = checkHotReadRegionScheduleWithSmallHotRegion(re, lowLoad, highLoad, emptyFunc) - re.Len(ops, 0) + re.Empty(ops) // Case3: If there is larger hot region, we will schedule it. hotRegionID := uint64(100) @@ -418,7 +418,7 @@ func TestHotReadRegionScheduleWithSmallHotRegion(t *testing.T) { tc.AddRegionWithReadInfo(hotRegionID+1, 2, bigHotRegionByte, 0, bigHotRegionQuery, utils.StoreHeartBeatReportInterval, []uint64{1, 3}) tc.AddRegionWithReadInfo(hotRegionID+1, 1, bigHotRegionByte, 0, bigHotRegionQuery, utils.StoreHeartBeatReportInterval, []uint64{2, 3}) }) - re.Len(ops, 0) + re.Empty(ops) topnPosition = origin // Case7: If there are more than topnPosition hot regions, but them are pending, @@ -430,7 +430,7 @@ func TestHotReadRegionScheduleWithSmallHotRegion(t *testing.T) { tc.AddRegionWithReadInfo(hotRegionID+1, 1, bigHotRegionByte, 0, bigHotRegionQuery, utils.StoreHeartBeatReportInterval, []uint64{2, 3}) hb.regionPendings[hotRegionID+1] = &pendingInfluence{} }) - re.Len(ops, 0) + re.Empty(ops) topnPosition = origin } diff --git a/pkg/schedule/schedulers/scheduler_test.go b/pkg/schedule/schedulers/scheduler_test.go index 57f1fcf1e3f..77c190ad943 100644 --- a/pkg/schedule/schedulers/scheduler_test.go +++ b/pkg/schedule/schedulers/scheduler_test.go @@ -484,7 +484,7 @@ func TestBalanceLeaderWithConflictRule(t *testing.T) { } for _, testCase := range testCases { - re.Nil(tc.SetRule(testCase.rule)) + re.NoError(tc.SetRule(testCase.rule)) ops, _ := lb.Schedule(tc, false) if testCase.schedule { re.Len(ops, 1) diff --git a/pkg/storage/storage_gc_test.go b/pkg/storage/storage_gc_test.go index 141777d441e..77f7c7dbf65 100644 --- a/pkg/storage/storage_gc_test.go +++ b/pkg/storage/storage_gc_test.go @@ -93,7 +93,7 @@ func TestLoadMinServiceSafePoint(t *testing.T) { // gc_worker service safepoint will not be removed. ssp, err := storage.LoadMinServiceSafePointV2(testKeyspaceID, currentTime.Add(5000*time.Second)) re.NoError(err) - re.Equal(ssp.ServiceID, endpoint.GCWorkerServiceSafePointID) + re.Equal(endpoint.GCWorkerServiceSafePointID, ssp.ServiceID) re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/storage/endpoint/removeExpiredKeys")) } diff --git a/pkg/tso/keyspace_group_manager_test.go b/pkg/tso/keyspace_group_manager_test.go index 0c1b017d7aa..54a1adc6b34 100644 --- a/pkg/tso/keyspace_group_manager_test.go +++ b/pkg/tso/keyspace_group_manager_test.go @@ -988,7 +988,7 @@ func (suite *keyspaceGroupManagerTestSuite) TestUpdateKeyspaceGroupMembership() re.Equal(len(keyspaces), len(newGroup.Keyspaces)) for i := 0; i < len(newGroup.Keyspaces); i++ { if i > 0 { - re.True(newGroup.Keyspaces[i-1] < newGroup.Keyspaces[i]) + re.Less(newGroup.Keyspaces[i-1], newGroup.Keyspaces[i]) } } } diff --git a/pkg/unsaferecovery/unsafe_recovery_controller_test.go b/pkg/unsaferecovery/unsafe_recovery_controller_test.go index 44c4e4a7b4d..956b9b8729c 100644 --- a/pkg/unsaferecovery/unsafe_recovery_controller_test.go +++ b/pkg/unsaferecovery/unsafe_recovery_controller_test.go @@ -1158,7 +1158,7 @@ func TestExecutionTimeout(t *testing.T) { re.Equal(Failed, recoveryController.GetStage()) output := recoveryController.Show() - re.Equal(len(output), 3) + re.Len(output, 3) re.Contains(output[1].Details[0], "triggered by error: Exceeds timeout") } @@ -1768,7 +1768,7 @@ func TestEpochComparsion(t *testing.T) { cluster.PutStore(store) } recoveryController := NewController(cluster) - re.Nil(recoveryController.RemoveFailedStores(map[uint64]struct{}{ + re.NoError(recoveryController.RemoveFailedStores(map[uint64]struct{}{ 2: {}, 3: {}, }, 60, false)) @@ -1829,7 +1829,7 @@ func TestEpochComparsion(t *testing.T) { if expect, ok := expects[storeID]; ok { re.Equal(expect.PeerReports, report.PeerReports) } else { - re.Empty(len(report.PeerReports)) + re.Empty(report.PeerReports) } } } diff --git a/pkg/utils/etcdutil/etcdutil_test.go b/pkg/utils/etcdutil/etcdutil_test.go index 861a57cef13..d8b38e7b045 100644 --- a/pkg/utils/etcdutil/etcdutil_test.go +++ b/pkg/utils/etcdutil/etcdutil_test.go @@ -154,11 +154,11 @@ func TestInitClusterID(t *testing.T) { // Get any cluster key to parse the cluster ID. resp, err := EtcdKVGet(client, pdClusterIDPath) re.NoError(err) - re.Equal(0, len(resp.Kvs)) + re.Empty(resp.Kvs) clusterID, err := InitClusterID(client, pdClusterIDPath) re.NoError(err) - re.NotEqual(0, clusterID) + re.NotZero(clusterID) clusterID1, err := InitClusterID(client, pdClusterIDPath) re.NoError(err) @@ -375,15 +375,15 @@ func TestLoopWatcherTestSuite(t *testing.T) { } func (suite *loopWatcherTestSuite) SetupSuite() { + re := suite.Require() var err error - t := suite.T() suite.ctx, suite.cancel = context.WithCancel(context.Background()) suite.cleans = make([]func(), 0) // Start a etcd server and create a client with etcd1 as endpoint. - suite.config = newTestSingleConfig(t) - suite.startEtcd() + suite.config = newTestSingleConfig(suite.T()) + suite.startEtcd(re) suite.client, err = CreateEtcdClient(nil, suite.config.LCUrls) - suite.NoError(err) + re.NoError(err) suite.cleans = append(suite.cleans, func() { suite.client.Close() }) @@ -398,6 +398,7 @@ func (suite *loopWatcherTestSuite) TearDownSuite() { } func (suite *loopWatcherTestSuite) TestLoadWithoutKey() { + re := suite.Require() cache := struct { syncutil.RWMutex data map[string]struct{} @@ -422,13 +423,14 @@ func (suite *loopWatcherTestSuite) TestLoadWithoutKey() { ) watcher.StartWatchLoop() err := watcher.WaitLoad() - suite.NoError(err) // although no key, watcher returns no error + re.NoError(err) // although no key, watcher returns no error cache.RLock() defer cache.RUnlock() - suite.Len(cache.data, 0) + suite.Empty(cache.data) } func (suite *loopWatcherTestSuite) TestCallBack() { + re := suite.Require() cache := struct { syncutil.RWMutex data map[string]struct{} @@ -466,35 +468,36 @@ func (suite *loopWatcherTestSuite) TestCallBack() { ) watcher.StartWatchLoop() err := watcher.WaitLoad() - suite.NoError(err) + re.NoError(err) // put 10 keys for i := 0; i < 10; i++ { - suite.put(fmt.Sprintf("TestCallBack%d", i), "") + suite.put(re, fmt.Sprintf("TestCallBack%d", i), "") } time.Sleep(time.Second) cache.RLock() - suite.Len(cache.data, 10) + re.Len(cache.data, 10) cache.RUnlock() // delete 10 keys for i := 0; i < 10; i++ { key := fmt.Sprintf("TestCallBack%d", i) _, err = suite.client.Delete(suite.ctx, key) - suite.NoError(err) + re.NoError(err) } time.Sleep(time.Second) cache.RLock() - suite.Empty(cache.data) + re.Empty(cache.data) cache.RUnlock() } func (suite *loopWatcherTestSuite) TestWatcherLoadLimit() { + re := suite.Require() for count := 1; count < 10; count++ { for limit := 0; limit < 10; limit++ { ctx, cancel := context.WithCancel(suite.ctx) for i := 0; i < count; i++ { - suite.put(fmt.Sprintf("TestWatcherLoadLimit%d", i), "") + suite.put(re, fmt.Sprintf("TestWatcherLoadLimit%d", i), "") } cache := struct { syncutil.RWMutex @@ -525,9 +528,9 @@ func (suite *loopWatcherTestSuite) TestWatcherLoadLimit() { ) watcher.StartWatchLoop() err := watcher.WaitLoad() - suite.NoError(err) + re.NoError(err) cache.RLock() - suite.Len(cache.data, count) + re.Len(cache.data, count) cache.RUnlock() cancel() } @@ -535,6 +538,7 @@ func (suite *loopWatcherTestSuite) TestWatcherLoadLimit() { } func (suite *loopWatcherTestSuite) TestWatcherBreak() { + re := suite.Require() cache := struct { syncutil.RWMutex data string @@ -568,51 +572,51 @@ func (suite *loopWatcherTestSuite) TestWatcherBreak() { watcher.watchChangeRetryInterval = 100 * time.Millisecond watcher.StartWatchLoop() err := watcher.WaitLoad() - suite.NoError(err) + re.NoError(err) checkCache("") // we use close client and update client in failpoint to simulate the network error and recover - failpoint.Enable("github.com/tikv/pd/pkg/utils/etcdutil/updateClient", "return(true)") + re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/utils/etcdutil/updateClient", "return(true)")) // Case1: restart the etcd server suite.etcd.Close() - suite.startEtcd() - suite.put("TestWatcherBreak", "0") + suite.startEtcd(re) + suite.put(re, "TestWatcherBreak", "0") checkCache("0") suite.etcd.Server.Stop() time.Sleep(DefaultRequestTimeout) suite.etcd.Close() - suite.startEtcd() - suite.put("TestWatcherBreak", "1") + suite.startEtcd(re) + suite.put(re, "TestWatcherBreak", "1") checkCache("1") // Case2: close the etcd client and put a new value after watcher restarts suite.client.Close() suite.client, err = CreateEtcdClient(nil, suite.config.LCUrls) - suite.NoError(err) + re.NoError(err) watcher.updateClientCh <- suite.client - suite.put("TestWatcherBreak", "2") + suite.put(re, "TestWatcherBreak", "2") checkCache("2") // Case3: close the etcd client and put a new value before watcher restarts suite.client.Close() suite.client, err = CreateEtcdClient(nil, suite.config.LCUrls) - suite.NoError(err) - suite.put("TestWatcherBreak", "3") + re.NoError(err) + suite.put(re, "TestWatcherBreak", "3") watcher.updateClientCh <- suite.client checkCache("3") // Case4: close the etcd client and put a new value with compact suite.client.Close() suite.client, err = CreateEtcdClient(nil, suite.config.LCUrls) - suite.NoError(err) - suite.put("TestWatcherBreak", "4") + re.NoError(err) + suite.put(re, "TestWatcherBreak", "4") resp, err := EtcdKVGet(suite.client, "TestWatcherBreak") - suite.NoError(err) + re.NoError(err) revision := resp.Header.Revision resp2, err := suite.etcd.Server.Compact(suite.ctx, &etcdserverpb.CompactionRequest{Revision: revision}) - suite.NoError(err) - suite.Equal(revision, resp2.Header.Revision) + re.NoError(err) + re.Equal(revision, resp2.Header.Revision) watcher.updateClientCh <- suite.client checkCache("4") @@ -623,7 +627,7 @@ func (suite *loopWatcherTestSuite) TestWatcherBreak() { watcher.ForceLoad() checkCache("4") - failpoint.Disable("github.com/tikv/pd/pkg/utils/etcdutil/updateClient") + re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/utils/etcdutil/updateClient")) } func (suite *loopWatcherTestSuite) TestWatcherRequestProgress() { @@ -669,9 +673,9 @@ func (suite *loopWatcherTestSuite) TestWatcherRequestProgress() { checkWatcherRequestProgress(true) } -func (suite *loopWatcherTestSuite) startEtcd() { +func (suite *loopWatcherTestSuite) startEtcd(re *require.Assertions) { etcd1, err := embed.StartEtcd(suite.config) - suite.NoError(err) + re.NoError(err) suite.etcd = etcd1 <-etcd1.Server.ReadyNotify() suite.cleans = append(suite.cleans, func() { @@ -679,11 +683,11 @@ func (suite *loopWatcherTestSuite) startEtcd() { }) } -func (suite *loopWatcherTestSuite) put(key, value string) { +func (suite *loopWatcherTestSuite) put(re *require.Assertions, key, value string) { kv := clientv3.NewKV(suite.client) _, err := kv.Put(suite.ctx, key, value) - suite.NoError(err) + re.NoError(err) resp, err := kv.Get(suite.ctx, key) - suite.NoError(err) - suite.Equal(value, string(resp.Kvs[0].Value)) + re.NoError(err) + re.Equal(value, string(resp.Kvs[0].Value)) } diff --git a/pkg/utils/syncutil/lock_group_test.go b/pkg/utils/syncutil/lock_group_test.go index ff306983e05..897e6b777a6 100644 --- a/pkg/utils/syncutil/lock_group_test.go +++ b/pkg/utils/syncutil/lock_group_test.go @@ -60,14 +60,14 @@ func TestLockGroupWithRemoveEntryOnUnlock(t *testing.T) { for i := 0; i < maxID; i++ { group.Lock(uint32(i)) } - re.Equal(len(group.entries), maxID) + re.Len(group.entries, maxID) for i := 0; i < maxID; i++ { group.Unlock(uint32(i)) } wg.Wait() // Check that size of the lock group is limited. - re.Equal(len(group.entries), 0) + re.Empty(group.entries) } // mustSequentialUpdateSingle checks that for any given update, update is sequential. diff --git a/pkg/utils/typeutil/duration_test.go b/pkg/utils/typeutil/duration_test.go index 9a0beda7979..cff7c3cd66c 100644 --- a/pkg/utils/typeutil/duration_test.go +++ b/pkg/utils/typeutil/duration_test.go @@ -46,6 +46,6 @@ func TestDurationTOML(t *testing.T) { example := &example{} text := []byte(`interval = "1h1m1s"`) - re.Nil(toml.Unmarshal(text, example)) + re.NoError(toml.Unmarshal(text, example)) re.Equal(float64(60*60+60+1), example.Interval.Seconds()) } diff --git a/pkg/window/policy_test.go b/pkg/window/policy_test.go index 489c8428c9a..a81ef0ef82d 100644 --- a/pkg/window/policy_test.go +++ b/pkg/window/policy_test.go @@ -111,7 +111,7 @@ func TestRollingPolicy_AddWithTimespan(t *testing.T) { t.Logf("%+v", bkt) } - re.Equal(0, len(policy.window.buckets[0].Points)) + re.Empty(policy.window.buckets[0].Points) re.Equal(4, int(policy.window.buckets[1].Points[0])) re.Equal(2, int(policy.window.buckets[2].Points[0])) }) @@ -137,8 +137,8 @@ func TestRollingPolicy_AddWithTimespan(t *testing.T) { t.Logf("%+v", bkt) } - re.Equal(0, len(policy.window.buckets[0].Points)) + re.Zero(len(policy.window.buckets[0].Points)) re.Equal(4, int(policy.window.buckets[1].Points[0])) - re.Equal(0, len(policy.window.buckets[2].Points)) + re.Zero(len(policy.window.buckets[2].Points)) }) } diff --git a/pkg/window/window_test.go b/pkg/window/window_test.go index 0205aae47a3..f4df861fc2f 100644 --- a/pkg/window/window_test.go +++ b/pkg/window/window_test.go @@ -20,7 +20,6 @@ package window import ( "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -33,7 +32,7 @@ func TestWindowResetWindow(t *testing.T) { } window.ResetWindow() for i := 0; i < opts.Size; i++ { - re.Equal(len(window.Bucket(i).Points), 0) + re.Empty(window.Bucket(i).Points) } } @@ -45,9 +44,9 @@ func TestWindowResetBucket(t *testing.T) { window.Append(i, 1.0) } window.ResetBucket(1) - re.Equal(len(window.Bucket(1).Points), 0) - re.Equal(window.Bucket(0).Points[0], float64(1.0)) - re.Equal(window.Bucket(2).Points[0], float64(1.0)) + re.Empty(window.Bucket(1).Points) + re.Equal(float64(1.0), window.Bucket(0).Points[0]) + re.Equal(float64(1.0), window.Bucket(2).Points[0]) } func TestWindowResetBuckets(t *testing.T) { @@ -59,7 +58,7 @@ func TestWindowResetBuckets(t *testing.T) { } window.ResetBuckets(0, 3) for i := 0; i < opts.Size; i++ { - re.Equal(len(window.Bucket(i).Points), 0) + re.Empty(window.Bucket(i).Points) } } @@ -74,28 +73,30 @@ func TestWindowAppend(t *testing.T) { window.Append(i, 2.0) } for i := 0; i < opts.Size; i++ { - re.Equal(window.Bucket(i).Points[0], float64(1.0)) + re.Equal(float64(1.0), window.Bucket(i).Points[0]) } for i := 1; i < opts.Size; i++ { - re.Equal(window.Bucket(i).Points[1], float64(2.0)) + re.Equal(float64(2.0), window.Bucket(i).Points[1]) } } func TestWindowAdd(t *testing.T) { + re := require.New(t) opts := Options{Size: 3} window := NewWindow(opts) window.Append(0, 1.0) window.Add(0, 1.0) - assert.Equal(t, window.Bucket(0).Points[0], float64(2.0)) + re.Equal(float64(2.0), window.Bucket(0).Points[0]) window = NewWindow(opts) window.Add(0, 1.0) window.Add(0, 1.0) - assert.Equal(t, window.Bucket(0).Points[0], float64(2.0)) + re.Equal(float64(2.0), window.Bucket(0).Points[0]) } func TestWindowSize(t *testing.T) { + re := require.New(t) opts := Options{Size: 3} window := NewWindow(opts) - assert.Equal(t, window.Size(), 3) + re.Equal(3, window.Size()) } diff --git a/scripts/check-test.sh b/scripts/check-test.sh deleted file mode 100755 index c3168066e3d..00000000000 --- a/scripts/check-test.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -# Check if there is any inefficient assert function usage in package. - -res=$(grep -rn --include=\*_test.go -E "(re|suite|require)\.(True|False)\((t, )?reflect\.DeepEqual\(" . | sort -u) \ - -if [ "$res" ]; then - echo "following packages use the inefficient assert function: please replace reflect.DeepEqual with require.Equal" - echo "$res" - exit 1 -fi - -res=$(grep -rn --include=\*_test.go -E "(re|suite|require)\.(True|False)\((t, )?strings\.Contains\(" . | sort -u) - -if [ "$res" ]; then - echo "following packages use the inefficient assert function: please replace strings.Contains with require.Contains" - echo "$res" - exit 1 -fi - -res=$(grep -rn --include=\*_test.go -E "(re|suite|require)\.(Nil|NotNil)\((t, )?(err|error)" . | sort -u) - -if [ "$res" ]; then - echo "following packages use the inefficient assert function: please replace require.Nil/NotNil with require.NoError/Error" - echo "$res" - exit 1 -fi - -res=$(grep -rn --include=\*_test.go -E "(re|suite|require)\.(Equal|NotEqual)\((t, )?(true|false)" . | sort -u) - -if [ "$res" ]; then - echo "following packages use the inefficient assert function: please replace require.Equal/NotEqual(true, xxx) with require.True/False" - echo "$res" - exit 1 -fi - -exit 0 diff --git a/server/api/admin_test.go b/server/api/admin_test.go index 76c5e729eb0..050aa9cfb32 100644 --- a/server/api/admin_test.go +++ b/server/api/admin_test.go @@ -60,6 +60,7 @@ func (suite *adminTestSuite) TearDownSuite() { } func (suite *adminTestSuite) TestDropRegion() { + re := suite.Require() cluster := suite.svr.GetRaftCluster() // Update region's epoch to (100, 100). @@ -73,7 +74,7 @@ func (suite *adminTestSuite) TestDropRegion() { }, })) err := cluster.HandleRegionHeartbeat(region) - suite.NoError(err) + re.NoError(err) // Region epoch cannot decrease. region = region.Clone( @@ -81,25 +82,26 @@ func (suite *adminTestSuite) TestDropRegion() { core.SetRegionVersion(50), ) err = cluster.HandleRegionHeartbeat(region) - suite.Error(err) + re.Error(err) // After drop region from cache, lower version is accepted. url := fmt.Sprintf("%s/admin/cache/region/%d", suite.urlPrefix, region.GetID()) req, err := http.NewRequest(http.MethodDelete, url, http.NoBody) - suite.NoError(err) + re.NoError(err) res, err := testDialClient.Do(req) - suite.NoError(err) - suite.Equal(http.StatusOK, res.StatusCode) + re.NoError(err) + re.Equal(http.StatusOK, res.StatusCode) res.Body.Close() err = cluster.HandleRegionHeartbeat(region) - suite.NoError(err) + re.NoError(err) region = cluster.GetRegionByKey([]byte("foo")) - suite.Equal(uint64(50), region.GetRegionEpoch().ConfVer) - suite.Equal(uint64(50), region.GetRegionEpoch().Version) + re.Equal(uint64(50), region.GetRegionEpoch().ConfVer) + re.Equal(uint64(50), region.GetRegionEpoch().Version) } func (suite *adminTestSuite) TestDropRegions() { + re := suite.Require() cluster := suite.svr.GetRaftCluster() n := uint64(10000) @@ -124,7 +126,7 @@ func (suite *adminTestSuite) TestDropRegions() { regions = append(regions, region) err := cluster.HandleRegionHeartbeat(region) - suite.NoError(err) + re.NoError(err) } // Region epoch cannot decrease. @@ -135,46 +137,46 @@ func (suite *adminTestSuite) TestDropRegions() { ) regions[i] = region err := cluster.HandleRegionHeartbeat(region) - suite.Error(err) + re.Error(err) } for i := uint64(0); i < n; i++ { region := cluster.GetRegionByKey([]byte(fmt.Sprintf("%d", i))) - suite.Equal(uint64(100), region.GetRegionEpoch().ConfVer) - suite.Equal(uint64(100), region.GetRegionEpoch().Version) + re.Equal(uint64(100), region.GetRegionEpoch().ConfVer) + re.Equal(uint64(100), region.GetRegionEpoch().Version) } // After drop all regions from cache, lower version is accepted. url := fmt.Sprintf("%s/admin/cache/regions", suite.urlPrefix) req, err := http.NewRequest(http.MethodDelete, url, http.NoBody) - suite.NoError(err) + re.NoError(err) res, err := testDialClient.Do(req) - suite.NoError(err) - suite.Equal(http.StatusOK, res.StatusCode) + re.NoError(err) + re.Equal(http.StatusOK, res.StatusCode) res.Body.Close() for _, region := range regions { err := cluster.HandleRegionHeartbeat(region) - suite.NoError(err) + re.NoError(err) } for i := uint64(0); i < n; i++ { region := cluster.GetRegionByKey([]byte(fmt.Sprintf("%d", i))) - suite.Equal(uint64(50), region.GetRegionEpoch().ConfVer) - suite.Equal(uint64(50), region.GetRegionEpoch().Version) + re.Equal(uint64(50), region.GetRegionEpoch().ConfVer) + re.Equal(uint64(50), region.GetRegionEpoch().Version) } } func (suite *adminTestSuite) TestPersistFile() { - data := []byte("#!/bin/sh\nrm -rf /") re := suite.Require() + data := []byte("#!/bin/sh\nrm -rf /") err := tu.CheckPostJSON(testDialClient, suite.urlPrefix+"/admin/persist-file/"+replication.DrStatusFile, data, tu.StatusNotOK(re)) - suite.NoError(err) + re.NoError(err) data = []byte(`{"foo":"bar"}`) err = tu.CheckPostJSON(testDialClient, suite.urlPrefix+"/admin/persist-file/"+replication.DrStatusFile, data, tu.StatusOK(re)) - suite.NoError(err) + re.NoError(err) } func makeTS(offset time.Duration) uint64 { @@ -183,13 +185,13 @@ func makeTS(offset time.Duration) uint64 { } func (suite *adminTestSuite) TestResetTS() { + re := suite.Require() args := make(map[string]interface{}) t1 := makeTS(time.Hour) url := fmt.Sprintf("%s/admin/reset-ts", suite.urlPrefix) args["tso"] = fmt.Sprintf("%d", t1) values, err := json.Marshal(args) - suite.NoError(err) - re := suite.Require() + re.NoError(err) tu.Eventually(re, func() bool { resp, err := apiutil.PostJSON(testDialClient, url, values) re.NoError(err) @@ -208,128 +210,128 @@ func (suite *adminTestSuite) TestResetTS() { return false } }) - suite.NoError(err) + re.NoError(err) t2 := makeTS(32 * time.Hour) args["tso"] = fmt.Sprintf("%d", t2) values, err = json.Marshal(args) - suite.NoError(err) + re.NoError(err) err = tu.CheckPostJSON(testDialClient, url, values, tu.Status(re, http.StatusForbidden), tu.StringContain(re, "too large")) - suite.NoError(err) + re.NoError(err) t3 := makeTS(-2 * time.Hour) args["tso"] = fmt.Sprintf("%d", t3) values, err = json.Marshal(args) - suite.NoError(err) + re.NoError(err) err = tu.CheckPostJSON(testDialClient, url, values, tu.Status(re, http.StatusForbidden), tu.StringContain(re, "small")) - suite.NoError(err) + re.NoError(err) args["tso"] = "" values, err = json.Marshal(args) - suite.NoError(err) + re.NoError(err) err = tu.CheckPostJSON(testDialClient, url, values, tu.Status(re, http.StatusBadRequest), tu.StringEqual(re, "\"invalid tso value\"\n")) - suite.NoError(err) + re.NoError(err) args["tso"] = "test" values, err = json.Marshal(args) - suite.NoError(err) + re.NoError(err) err = tu.CheckPostJSON(testDialClient, url, values, tu.Status(re, http.StatusBadRequest), tu.StringEqual(re, "\"invalid tso value\"\n")) - suite.NoError(err) + re.NoError(err) t4 := makeTS(32 * time.Hour) args["tso"] = fmt.Sprintf("%d", t4) args["force-use-larger"] = "xxx" values, err = json.Marshal(args) - suite.NoError(err) + re.NoError(err) err = tu.CheckPostJSON(testDialClient, url, values, tu.Status(re, http.StatusBadRequest), tu.StringContain(re, "invalid force-use-larger value")) - suite.NoError(err) + re.NoError(err) args["force-use-larger"] = false values, err = json.Marshal(args) - suite.NoError(err) + re.NoError(err) err = tu.CheckPostJSON(testDialClient, url, values, tu.Status(re, http.StatusForbidden), tu.StringContain(re, "too large")) - suite.NoError(err) + re.NoError(err) args["force-use-larger"] = true values, err = json.Marshal(args) - suite.NoError(err) + re.NoError(err) err = tu.CheckPostJSON(testDialClient, url, values, tu.StatusOK(re), tu.StringEqual(re, "\"Reset ts successfully.\"\n")) - suite.NoError(err) + re.NoError(err) } func (suite *adminTestSuite) TestMarkSnapshotRecovering() { re := suite.Require() url := fmt.Sprintf("%s/admin/cluster/markers/snapshot-recovering", suite.urlPrefix) // default to false - suite.NoError(tu.CheckGetJSON(testDialClient, url, nil, + re.NoError(tu.CheckGetJSON(testDialClient, url, nil, tu.StatusOK(re), tu.StringContain(re, "false"))) // mark - suite.NoError(tu.CheckPostJSON(testDialClient, url, nil, + re.NoError(tu.CheckPostJSON(testDialClient, url, nil, tu.StatusOK(re))) - suite.NoError(tu.CheckGetJSON(testDialClient, url, nil, + re.NoError(tu.CheckGetJSON(testDialClient, url, nil, tu.StatusOK(re), tu.StringContain(re, "true"))) // test using grpc call grpcServer := server.GrpcServer{Server: suite.svr} resp, err2 := grpcServer.IsSnapshotRecovering(context.Background(), &pdpb.IsSnapshotRecoveringRequest{}) - suite.NoError(err2) - suite.True(resp.Marked) + re.NoError(err2) + re.True(resp.Marked) // unmark err := tu.CheckDelete(testDialClient, url, tu.StatusOK(re)) - suite.NoError(err) - suite.NoError(tu.CheckGetJSON(testDialClient, url, nil, + re.NoError(err) + re.NoError(tu.CheckGetJSON(testDialClient, url, nil, tu.StatusOK(re), tu.StringContain(re, "false"))) } func (suite *adminTestSuite) TestRecoverAllocID() { re := suite.Require() url := fmt.Sprintf("%s/admin/base-alloc-id", suite.urlPrefix) - suite.NoError(tu.CheckPostJSON(testDialClient, url, []byte("invalid json"), tu.Status(re, http.StatusBadRequest))) + re.NoError(tu.CheckPostJSON(testDialClient, url, []byte("invalid json"), tu.Status(re, http.StatusBadRequest))) // no id or invalid id - suite.NoError(tu.CheckPostJSON(testDialClient, url, []byte(`{}`), + re.NoError(tu.CheckPostJSON(testDialClient, url, []byte(`{}`), tu.Status(re, http.StatusBadRequest), tu.StringContain(re, "invalid id value"))) - suite.NoError(tu.CheckPostJSON(testDialClient, url, []byte(`{"id": ""}`), + re.NoError(tu.CheckPostJSON(testDialClient, url, []byte(`{"id": ""}`), tu.Status(re, http.StatusBadRequest), tu.StringContain(re, "invalid id value"))) - suite.NoError(tu.CheckPostJSON(testDialClient, url, []byte(`{"id": 11}`), + re.NoError(tu.CheckPostJSON(testDialClient, url, []byte(`{"id": 11}`), tu.Status(re, http.StatusBadRequest), tu.StringContain(re, "invalid id value"))) - suite.NoError(tu.CheckPostJSON(testDialClient, url, []byte(`{"id": "aa"}`), + re.NoError(tu.CheckPostJSON(testDialClient, url, []byte(`{"id": "aa"}`), tu.Status(re, http.StatusBadRequest), tu.StringContain(re, "invalid syntax"))) // snapshot recovering=false - suite.NoError(tu.CheckPostJSON(testDialClient, url, []byte(`{"id": "100000"}`), + re.NoError(tu.CheckPostJSON(testDialClient, url, []byte(`{"id": "100000"}`), tu.Status(re, http.StatusForbidden), tu.StringContain(re, "can only recover alloc id when recovering"))) // mark and recover alloc id markRecoveringURL := fmt.Sprintf("%s/admin/cluster/markers/snapshot-recovering", suite.urlPrefix) - suite.NoError(tu.CheckPostJSON(testDialClient, markRecoveringURL, nil, + re.NoError(tu.CheckPostJSON(testDialClient, markRecoveringURL, nil, tu.StatusOK(re))) - suite.NoError(tu.CheckPostJSON(testDialClient, url, []byte(`{"id": "1000000"}`), + re.NoError(tu.CheckPostJSON(testDialClient, url, []byte(`{"id": "1000000"}`), tu.StatusOK(re))) id, err2 := suite.svr.GetAllocator().Alloc() - suite.NoError(err2) - suite.Equal(id, uint64(1000001)) + re.NoError(err2) + re.Equal(uint64(1000001), id) // recover alloc id again - suite.NoError(tu.CheckPostJSON(testDialClient, url, []byte(`{"id": "99000000"}`), + re.NoError(tu.CheckPostJSON(testDialClient, url, []byte(`{"id": "99000000"}`), tu.StatusOK(re))) id, err2 = suite.svr.GetAllocator().Alloc() - suite.NoError(err2) - suite.Equal(id, uint64(99000001)) + re.NoError(err2) + re.Equal(uint64(99000001), id) // unmark err := tu.CheckDelete(testDialClient, markRecoveringURL, tu.StatusOK(re)) - suite.NoError(err) - suite.NoError(tu.CheckGetJSON(testDialClient, markRecoveringURL, nil, + re.NoError(err) + re.NoError(tu.CheckGetJSON(testDialClient, markRecoveringURL, nil, tu.StatusOK(re), tu.StringContain(re, "false"))) - suite.NoError(tu.CheckPostJSON(testDialClient, url, []byte(`{"id": "100000"}`), + re.NoError(tu.CheckPostJSON(testDialClient, url, []byte(`{"id": "100000"}`), tu.Status(re, http.StatusForbidden), tu.StringContain(re, "can only recover alloc id when recovering"))) } diff --git a/server/api/region_test.go b/server/api/region_test.go index ea2f2871a95..7e48c80d7bc 100644 --- a/server/api/region_test.go +++ b/server/api/region_test.go @@ -224,7 +224,7 @@ func (suite *regionTestSuite) TestRegionCheck() { func (suite *regionTestSuite) TestRegions() { r := NewAPIRegionInfo(core.NewRegionInfo(&metapb.Region{Id: 1}, nil)) suite.Nil(r.Leader.Peer) - suite.Len(r.Leader.RoleName, 0) + suite.Empty(r.Leader.RoleName) rs := []*core.RegionInfo{ core.NewTestRegionInfo(2, 1, []byte("a"), []byte("b"), core.SetApproximateKeys(10), core.SetApproximateSize(10)), diff --git a/server/cluster/cluster_test.go b/server/cluster/cluster_test.go index 8c889923ea7..d5931394c1b 100644 --- a/server/cluster/cluster_test.go +++ b/server/cluster/cluster_test.go @@ -179,10 +179,10 @@ func TestStoreHeartbeat(t *testing.T) { time.Sleep(20 * time.Millisecond) storeStats = cluster.hotStat.RegionStats(utils.Read, 0) re.Empty(storeStats[1]) - re.Nil(cluster.HandleStoreHeartbeat(hotReq, hotResp)) + re.NoError(cluster.HandleStoreHeartbeat(hotReq, hotResp)) time.Sleep(20 * time.Millisecond) storeStats = cluster.hotStat.RegionStats(utils.Read, 1) - re.Len(storeStats[1], 0) + re.Empty(storeStats[1]) storeStats = cluster.hotStat.RegionStats(utils.Read, 3) re.Empty(storeStats[1]) // after 2 hot heartbeats, wo can find region 1 peer again @@ -2239,7 +2239,7 @@ func checkRegions(re *require.Assertions, cache *core.BasicCluster, regions []*c } } - re.Equal(len(regions), cache.GetTotalRegionCount()) + re.Len(regions, cache.GetTotalRegionCount()) for id, count := range regionCount { re.Equal(count, cache.GetStoreRegionCount(id)) } @@ -2744,7 +2744,7 @@ func TestMergeRegionCancelOneOperator(t *testing.T) { re.Len(ops, co.GetOperatorController().AddWaitingOperator(ops...)) // Cancel source operator. co.GetOperatorController().RemoveOperator(co.GetOperatorController().GetOperator(source.GetID())) - re.Len(co.GetOperatorController().GetOperators(), 0) + re.Empty(co.GetOperatorController().GetOperators()) // Cancel target region. ops, err = operator.CreateMergeRegionOperator("merge-region", tc, source, target, operator.OpMerge) @@ -2752,7 +2752,7 @@ func TestMergeRegionCancelOneOperator(t *testing.T) { re.Len(ops, co.GetOperatorController().AddWaitingOperator(ops...)) // Cancel target operator. co.GetOperatorController().RemoveOperator(co.GetOperatorController().GetOperator(target.GetID())) - re.Len(co.GetOperatorController().GetOperators(), 0) + re.Empty(co.GetOperatorController().GetOperators()) } func TestReplica(t *testing.T) { @@ -3047,8 +3047,8 @@ func TestAddScheduler(t *testing.T) { re.Equal(4, int(batch)) gls, err := schedulers.CreateScheduler(schedulers.GrantLeaderType, oc, storage.NewStorageWithMemoryBackend(), schedulers.ConfigSliceDecoder(schedulers.GrantLeaderType, []string{"0"}), controller.RemoveScheduler) re.NoError(err) - re.NotNil(controller.AddScheduler(gls)) - re.NotNil(controller.RemoveScheduler(gls.GetName())) + re.Error(controller.AddScheduler(gls)) + re.Error(controller.RemoveScheduler(gls.GetName())) gls, err = schedulers.CreateScheduler(schedulers.GrantLeaderType, oc, storage.NewStorageWithMemoryBackend(), schedulers.ConfigSliceDecoder(schedulers.GrantLeaderType, []string{"1"}), controller.RemoveScheduler) re.NoError(err) @@ -3445,7 +3445,7 @@ func TestStoreOverloaded(t *testing.T) { time.Sleep(time.Second) for i := 0; i < 100; i++ { ops, _ := lb.Schedule(tc, false /* dryRun */) - re.Greater(len(ops), 0) + re.NotEmpty(ops) } } @@ -3480,7 +3480,7 @@ func TestStoreOverloadedWithReplace(t *testing.T) { // sleep 2 seconds to make sure that token is filled up time.Sleep(2 * time.Second) ops, _ = lb.Schedule(tc, false /* dryRun */) - re.Greater(len(ops), 0) + re.NotEmpty(ops) } func TestDownStoreLimit(t *testing.T) { diff --git a/server/config/config_test.go b/server/config/config_test.go index 07cdc966409..69cfafd8d36 100644 --- a/server/config/config_test.go +++ b/server/config/config_test.go @@ -492,7 +492,7 @@ func TestRateLimitClone(t *testing.T) { ConcurrencyLimit: 200, } dc := cfg.LimiterConfig["test"] - re.Equal(dc.ConcurrencyLimit, uint64(0)) + re.Zero(dc.ConcurrencyLimit) gCfg := &GRPCRateLimitConfig{ EnableRateLimit: defaultEnableGRPCRateLimitMiddleware, @@ -503,5 +503,5 @@ func TestRateLimitClone(t *testing.T) { ConcurrencyLimit: 300, } gdc := gCfg.LimiterConfig["test"] - re.Equal(gdc.ConcurrencyLimit, uint64(0)) + re.Zero(gdc.ConcurrencyLimit) } diff --git a/tests/pdctl/hot/hot_test.go b/tests/pdctl/hot/hot_test.go index 03c26f40441..366887e19aa 100644 --- a/tests/pdctl/hot/hot_test.go +++ b/tests/pdctl/hot/hot_test.go @@ -368,11 +368,11 @@ func (suite *hotTestSuite) checkHotWithoutHotPeer(cluster *tests.TestCluster) { re.NoError(err) re.NoError(json.Unmarshal(output, &hotRegion)) re.NotNil(hotRegion.AsPeer[1]) - re.Equal(hotRegion.AsPeer[1].Count, 0) - re.Equal(0.0, hotRegion.AsPeer[1].TotalBytesRate) + re.Zero(hotRegion.AsPeer[1].Count) + re.Zero(hotRegion.AsPeer[1].TotalBytesRate) re.Equal(load, hotRegion.AsPeer[1].StoreByteRate) - re.Equal(hotRegion.AsLeader[1].Count, 0) - re.Equal(0.0, hotRegion.AsLeader[1].TotalBytesRate) + re.Zero(hotRegion.AsLeader[1].Count) + re.Zero(hotRegion.AsLeader[1].TotalBytesRate) re.Equal(load, hotRegion.AsLeader[1].StoreByteRate) } { @@ -381,12 +381,12 @@ func (suite *hotTestSuite) checkHotWithoutHotPeer(cluster *tests.TestCluster) { hotRegion := statistics.StoreHotPeersInfos{} re.NoError(err) re.NoError(json.Unmarshal(output, &hotRegion)) - re.Equal(0, hotRegion.AsPeer[1].Count) - re.Equal(0.0, hotRegion.AsPeer[1].TotalBytesRate) + re.Zero(hotRegion.AsPeer[1].Count) + re.Zero(hotRegion.AsPeer[1].TotalBytesRate) re.Equal(load, hotRegion.AsPeer[1].StoreByteRate) - re.Equal(0, hotRegion.AsLeader[1].Count) - re.Equal(0.0, hotRegion.AsLeader[1].TotalBytesRate) - re.Equal(0.0, hotRegion.AsLeader[1].StoreByteRate) // write leader sum + re.Zero(hotRegion.AsLeader[1].Count) + re.Zero(hotRegion.AsLeader[1].TotalBytesRate) + re.Zero(hotRegion.AsLeader[1].StoreByteRate) // write leader sum } } diff --git a/tests/pdctl/keyspace/keyspace_group_test.go b/tests/pdctl/keyspace/keyspace_group_test.go index cbfdf1d099a..0de48a85c64 100644 --- a/tests/pdctl/keyspace/keyspace_group_test.go +++ b/tests/pdctl/keyspace/keyspace_group_test.go @@ -78,14 +78,14 @@ func TestKeyspaceGroup(t *testing.T) { err = json.Unmarshal(output, &keyspaceGroup) re.NoError(err) re.Equal(uint32(1), keyspaceGroup.ID) - re.Equal(keyspaceGroup.Keyspaces, []uint32{111}) + re.Equal([]uint32{111}, keyspaceGroup.Keyspaces) output, err = pdctl.ExecuteCommand(cmd, append(args, "2")...) re.NoError(err) keyspaceGroup = endpoint.KeyspaceGroup{} err = json.Unmarshal(output, &keyspaceGroup) re.NoError(err) re.Equal(uint32(2), keyspaceGroup.ID) - re.Equal(keyspaceGroup.Keyspaces, []uint32{222, 333}) + re.Equal([]uint32{222, 333}, keyspaceGroup.Keyspaces) } func TestSplitKeyspaceGroup(t *testing.T) { @@ -133,8 +133,8 @@ func TestSplitKeyspaceGroup(t *testing.T) { err = json.Unmarshal(output, &keyspaceGroups) re.NoError(err) re.Len(keyspaceGroups, 2) - re.Equal(keyspaceGroups[0].ID, uint32(0)) - re.Equal(keyspaceGroups[1].ID, uint32(1)) + re.Zero(keyspaceGroups[0].ID) + re.Equal(uint32(1), keyspaceGroups[1].ID) re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/keyspace/acceleratedAllocNodes")) re.NoError(failpoint.Disable("github.com/tikv/pd/server/delayStartServerLoop")) @@ -448,7 +448,7 @@ func TestKeyspaceGroupState(t *testing.T) { var keyspaceGroups []*endpoint.KeyspaceGroup err = json.Unmarshal(output, &keyspaceGroups) re.NoError(err) - re.Len(keyspaceGroups, 0) + re.Empty(keyspaceGroups) testutil.Eventually(re, func() bool { args := []string{"-u", pdAddr, "keyspace-group", "split", "0", "2", "3"} output, err := pdctl.ExecuteCommand(cmd, args...) @@ -462,8 +462,8 @@ func TestKeyspaceGroupState(t *testing.T) { err = json.Unmarshal(output, &keyspaceGroups) re.NoError(err) re.Len(keyspaceGroups, 2) - re.Equal(keyspaceGroups[0].ID, uint32(0)) - re.Equal(keyspaceGroups[1].ID, uint32(2)) + re.Equal(uint32(0), keyspaceGroups[0].ID) + re.Equal(uint32(2), keyspaceGroups[1].ID) args = []string{"-u", pdAddr, "keyspace-group", "finish-split", "2"} output, err = pdctl.ExecuteCommand(cmd, args...) @@ -486,7 +486,7 @@ func TestKeyspaceGroupState(t *testing.T) { err = json.Unmarshal(output, &keyspaceGroups) re.NoError(err) re.Len(keyspaceGroups, 1) - re.Equal(keyspaceGroups[0].ID, uint32(0)) + re.Equal(uint32(0), keyspaceGroups[0].ID) re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/keyspace/acceleratedAllocNodes")) re.NoError(failpoint.Disable("github.com/tikv/pd/server/delayStartServerLoop")) diff --git a/tests/pdctl/keyspace/keyspace_test.go b/tests/pdctl/keyspace/keyspace_test.go index 3ff755fe601..f83d09760a9 100644 --- a/tests/pdctl/keyspace/keyspace_test.go +++ b/tests/pdctl/keyspace/keyspace_test.go @@ -147,22 +147,24 @@ func TestKeyspaceTestSuite(t *testing.T) { } func (suite *keyspaceTestSuite) SetupTest() { + re := suite.Require() suite.ctx, suite.cancel = context.WithCancel(context.Background()) - suite.NoError(failpoint.Enable("github.com/tikv/pd/server/delayStartServerLoop", `return(true)`)) - suite.NoError(failpoint.Enable("github.com/tikv/pd/pkg/keyspace/skipSplitRegion", "return(true)")) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/delayStartServerLoop", `return(true)`)) + re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/keyspace/skipSplitRegion", "return(true)")) tc, err := tests.NewTestAPICluster(suite.ctx, 1) - suite.NoError(err) - suite.NoError(tc.RunInitialServers()) + re.NoError(err) + re.NoError(tc.RunInitialServers()) tc.WaitLeader() leaderServer := tc.GetLeaderServer() - suite.NoError(leaderServer.BootstrapCluster()) + re.NoError(leaderServer.BootstrapCluster()) suite.cluster = tc suite.pdAddr = tc.GetConfig().GetClientURL() } func (suite *keyspaceTestSuite) TearDownTest() { - suite.NoError(failpoint.Disable("github.com/tikv/pd/server/delayStartServerLoop")) - suite.NoError(failpoint.Disable("github.com/tikv/pd/pkg/keyspace/skipSplitRegion")) + re := suite.Require() + re.NoError(failpoint.Disable("github.com/tikv/pd/server/delayStartServerLoop")) + re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/keyspace/skipSplitRegion")) suite.cancel() } diff --git a/tests/pdctl/log/log_test.go b/tests/pdctl/log/log_test.go index e6995231329..08df4a78bea 100644 --- a/tests/pdctl/log/log_test.go +++ b/tests/pdctl/log/log_test.go @@ -39,11 +39,12 @@ func TestLogTestSuite(t *testing.T) { } func (suite *logTestSuite) SetupSuite() { + re := suite.Require() suite.ctx, suite.cancel = context.WithCancel(context.Background()) var err error suite.cluster, err = tests.NewTestCluster(suite.ctx, 3) - suite.NoError(err) - suite.NoError(suite.cluster.RunInitialServers()) + re.NoError(err) + re.NoError(suite.cluster.RunInitialServers()) suite.cluster.WaitLeader() suite.pdAddrs = suite.cluster.GetConfig().GetClientURLs() @@ -53,7 +54,7 @@ func (suite *logTestSuite) SetupSuite() { LastHeartbeat: time.Now().UnixNano(), } leaderServer := suite.cluster.GetLeaderServer() - suite.NoError(leaderServer.BootstrapCluster()) + re.NoError(leaderServer.BootstrapCluster()) tests.MustPutStore(suite.Require(), suite.cluster, store) } diff --git a/tests/pdctl/operator/operator_test.go b/tests/pdctl/operator/operator_test.go index aa2fe5d1304..1de61dca880 100644 --- a/tests/pdctl/operator/operator_test.go +++ b/tests/pdctl/operator/operator_test.go @@ -107,7 +107,7 @@ func (suite *operatorTestSuite) checkOperator(cluster *tests.TestCluster) { output, err := pdctl.ExecuteCommand(cmd, args...) re.NoError(err) re.NoError(json.Unmarshal(output, &slice)) - re.Len(slice, 0) + re.Empty(slice) args = []string{"-u", pdAddr, "operator", "check", "2"} output, err = pdctl.ExecuteCommand(cmd, args...) re.NoError(err) diff --git a/tests/pdctl/resourcemanager/resource_manager_command_test.go b/tests/pdctl/resourcemanager/resource_manager_command_test.go index ad43e0abca9..cbd9b481869 100644 --- a/tests/pdctl/resourcemanager/resource_manager_command_test.go +++ b/tests/pdctl/resourcemanager/resource_manager_command_test.go @@ -41,9 +41,10 @@ type testResourceManagerSuite struct { } func (s *testResourceManagerSuite) SetupSuite() { + re := s.Require() s.ctx, s.cancel = context.WithCancel(context.Background()) cluster, err := tests.NewTestCluster(s.ctx, 1) - s.Nil(err) + re.NoError(err) s.cluster = cluster s.cluster.RunInitialServers() cluster.WaitLeader() @@ -56,18 +57,19 @@ func (s *testResourceManagerSuite) TearDownSuite() { } func (s *testResourceManagerSuite) TestConfigController() { + re := s.Require() expectCfg := server.ControllerConfig{} expectCfg.Adjust(nil) // Show controller config checkShow := func() { args := []string{"-u", s.pdAddr, "resource-manager", "config", "controller", "show"} output, err := pdctl.ExecuteCommand(pdctlCmd.GetRootCmd(), args...) - s.Nil(err) + re.NoError(err) actualCfg := server.ControllerConfig{} err = json.Unmarshal(output, &actualCfg) - s.Nil(err) - s.Equal(expectCfg, actualCfg) + re.NoError(err) + re.Equal(expectCfg, actualCfg) } // Check default config @@ -76,22 +78,22 @@ func (s *testResourceManagerSuite) TestConfigController() { // Set controller config args := []string{"-u", s.pdAddr, "resource-manager", "config", "controller", "set", "ltb-max-wait-duration", "1h"} output, err := pdctl.ExecuteCommand(pdctlCmd.GetRootCmd(), args...) - s.Nil(err) - s.Contains(string(output), "Success!") + re.NoError(err) + re.Contains(string(output), "Success!") expectCfg.LTBMaxWaitDuration = typeutil.Duration{Duration: 1 * time.Hour} checkShow() args = []string{"-u", s.pdAddr, "resource-manager", "config", "controller", "set", "enable-controller-trace-log", "true"} output, err = pdctl.ExecuteCommand(pdctlCmd.GetRootCmd(), args...) - s.Nil(err) - s.Contains(string(output), "Success!") + re.NoError(err) + re.Contains(string(output), "Success!") expectCfg.EnableControllerTraceLog = true checkShow() args = []string{"-u", s.pdAddr, "resource-manager", "config", "controller", "set", "write-base-cost", "2"} output, err = pdctl.ExecuteCommand(pdctlCmd.GetRootCmd(), args...) - s.Nil(err) - s.Contains(string(output), "Success!") + re.NoError(err) + re.Contains(string(output), "Success!") expectCfg.RequestUnit.WriteBaseCost = 2 checkShow() } diff --git a/tests/pdctl/scheduler/scheduler_test.go b/tests/pdctl/scheduler/scheduler_test.go index 140ee7a7c44..585a5fd1199 100644 --- a/tests/pdctl/scheduler/scheduler_test.go +++ b/tests/pdctl/scheduler/scheduler_test.go @@ -48,7 +48,8 @@ func TestSchedulerTestSuite(t *testing.T) { } func (suite *schedulerTestSuite) SetupSuite() { - suite.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/skipStoreConfigSync", `return(true)`)) + re := suite.Require() + re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/skipStoreConfigSync", `return(true)`)) suite.env = tests.NewSchedulingTestEnvironment(suite.T()) suite.defaultSchedulers = []string{ "balance-leader-scheduler", @@ -61,8 +62,9 @@ func (suite *schedulerTestSuite) SetupSuite() { } func (suite *schedulerTestSuite) TearDownSuite() { + re := suite.Require() suite.env.Cleanup() - suite.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/skipStoreConfigSync")) + re.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/skipStoreConfigSync")) } func (suite *schedulerTestSuite) TearDownTest() { diff --git a/tests/server/api/api_test.go b/tests/server/api/api_test.go index f5db6bb2513..946e65bc6e4 100644 --- a/tests/server/api/api_test.go +++ b/tests/server/api/api_test.go @@ -218,7 +218,7 @@ func (suite *middlewareTestSuite) TestRateLimitMiddleware() { resp, err := dialClient.Do(req) suite.NoError(err) resp.Body.Close() - suite.Equal(leader.GetServer().GetServiceMiddlewarePersistOptions().IsRateLimitEnabled(), true) + suite.True(leader.GetServer().GetServiceMiddlewarePersistOptions().IsRateLimitEnabled()) // returns StatusOK when no rate-limit config req, _ = http.NewRequest(http.MethodPost, leader.GetAddr()+"/pd/api/v1/admin/log", strings.NewReader("\"info\"")) @@ -227,7 +227,7 @@ func (suite *middlewareTestSuite) TestRateLimitMiddleware() { _, err = io.ReadAll(resp.Body) resp.Body.Close() suite.NoError(err) - suite.Equal(resp.StatusCode, http.StatusOK) + suite.Equal(http.StatusOK, resp.StatusCode) input = make(map[string]interface{}) input["type"] = "label" input["label"] = "SetLogLevel" @@ -241,7 +241,7 @@ func (suite *middlewareTestSuite) TestRateLimitMiddleware() { _, err = io.ReadAll(resp.Body) resp.Body.Close() suite.NoError(err) - suite.Equal(resp.StatusCode, http.StatusOK) + suite.Equal(http.StatusOK, resp.StatusCode) for i := 0; i < 3; i++ { req, _ = http.NewRequest(http.MethodPost, leader.GetAddr()+"/pd/api/v1/admin/log", strings.NewReader("\"info\"")) @@ -251,10 +251,10 @@ func (suite *middlewareTestSuite) TestRateLimitMiddleware() { resp.Body.Close() suite.NoError(err) if i > 0 { - suite.Equal(resp.StatusCode, http.StatusTooManyRequests) + suite.Equal(http.StatusTooManyRequests, resp.StatusCode) suite.Equal(string(data), fmt.Sprintf("%s\n", http.StatusText(http.StatusTooManyRequests))) } else { - suite.Equal(resp.StatusCode, http.StatusOK) + suite.Equal(http.StatusOK, resp.StatusCode) } } @@ -268,10 +268,10 @@ func (suite *middlewareTestSuite) TestRateLimitMiddleware() { resp.Body.Close() suite.NoError(err) if i > 0 { - suite.Equal(resp.StatusCode, http.StatusTooManyRequests) + suite.Equal(http.StatusTooManyRequests, resp.StatusCode) suite.Equal(string(data), fmt.Sprintf("%s\n", http.StatusText(http.StatusTooManyRequests))) } else { - suite.Equal(resp.StatusCode, http.StatusOK) + suite.Equal(http.StatusOK, resp.StatusCode) } } @@ -284,7 +284,7 @@ func (suite *middlewareTestSuite) TestRateLimitMiddleware() { data, err := io.ReadAll(resp.Body) resp.Body.Close() suite.NoError(err) - suite.Equal(resp.StatusCode, http.StatusTooManyRequests) + suite.Equal(http.StatusTooManyRequests, resp.StatusCode) suite.Equal(string(data), fmt.Sprintf("%s\n", http.StatusText(http.StatusTooManyRequests))) } @@ -297,12 +297,12 @@ func (suite *middlewareTestSuite) TestRateLimitMiddleware() { } server.MustWaitLeader(suite.Require(), servers) leader = suite.cluster.GetLeaderServer() - suite.Equal(leader.GetServer().GetServiceMiddlewarePersistOptions().IsRateLimitEnabled(), true) + suite.True(leader.GetServer().GetServiceMiddlewarePersistOptions().IsRateLimitEnabled()) cfg, ok := leader.GetServer().GetRateLimitConfig().LimiterConfig["SetLogLevel"] - suite.Equal(ok, true) - suite.Equal(cfg.ConcurrencyLimit, uint64(1)) - suite.Equal(cfg.QPS, 0.5) - suite.Equal(cfg.QPSBurst, 1) + suite.True(ok) + suite.Equal(uint64(1), cfg.ConcurrencyLimit) + suite.Equal(0.5, cfg.QPS) + suite.Equal(1, cfg.QPSBurst) for i := 0; i < 3; i++ { req, _ = http.NewRequest(http.MethodPost, leader.GetAddr()+"/pd/api/v1/admin/log", strings.NewReader("\"info\"")) @@ -312,10 +312,10 @@ func (suite *middlewareTestSuite) TestRateLimitMiddleware() { resp.Body.Close() suite.NoError(err) if i > 0 { - suite.Equal(resp.StatusCode, http.StatusTooManyRequests) + suite.Equal(http.StatusTooManyRequests, resp.StatusCode) suite.Equal(string(data), fmt.Sprintf("%s\n", http.StatusText(http.StatusTooManyRequests))) } else { - suite.Equal(resp.StatusCode, http.StatusOK) + suite.Equal(http.StatusOK, resp.StatusCode) } } @@ -329,10 +329,10 @@ func (suite *middlewareTestSuite) TestRateLimitMiddleware() { resp.Body.Close() suite.NoError(err) if i > 0 { - suite.Equal(resp.StatusCode, http.StatusTooManyRequests) + suite.Equal(http.StatusTooManyRequests, resp.StatusCode) suite.Equal(string(data), fmt.Sprintf("%s\n", http.StatusText(http.StatusTooManyRequests))) } else { - suite.Equal(resp.StatusCode, http.StatusOK) + suite.Equal(http.StatusOK, resp.StatusCode) } } @@ -345,7 +345,7 @@ func (suite *middlewareTestSuite) TestRateLimitMiddleware() { data, err := io.ReadAll(resp.Body) resp.Body.Close() suite.NoError(err) - suite.Equal(resp.StatusCode, http.StatusTooManyRequests) + suite.Equal(http.StatusTooManyRequests, resp.StatusCode) suite.Equal(string(data), fmt.Sprintf("%s\n", http.StatusText(http.StatusTooManyRequests))) } @@ -358,7 +358,7 @@ func (suite *middlewareTestSuite) TestRateLimitMiddleware() { resp, err = dialClient.Do(req) suite.NoError(err) resp.Body.Close() - suite.Equal(leader.GetServer().GetServiceMiddlewarePersistOptions().IsRateLimitEnabled(), false) + suite.False(leader.GetServer().GetServiceMiddlewarePersistOptions().IsRateLimitEnabled()) for i := 0; i < 3; i++ { req, _ = http.NewRequest(http.MethodPost, leader.GetAddr()+"/pd/api/v1/admin/log", strings.NewReader("\"info\"")) @@ -367,7 +367,7 @@ func (suite *middlewareTestSuite) TestRateLimitMiddleware() { _, err = io.ReadAll(resp.Body) resp.Body.Close() suite.NoError(err) - suite.Equal(resp.StatusCode, http.StatusOK) + suite.Equal(http.StatusOK, resp.StatusCode) } } @@ -377,7 +377,7 @@ func (suite *middlewareTestSuite) TestSwaggerUrl() { req, _ := http.NewRequest(http.MethodGet, leader.GetAddr()+"/swagger/ui/index", http.NoBody) resp, err := dialClient.Do(req) suite.NoError(err) - suite.True(resp.StatusCode == http.StatusNotFound) + suite.Equal(http.StatusNotFound, resp.StatusCode) resp.Body.Close() } diff --git a/tests/server/api/rule_test.go b/tests/server/api/rule_test.go index 0a0c3f2fb2e..eaa41cc11bc 100644 --- a/tests/server/api/rule_test.go +++ b/tests/server/api/rule_test.go @@ -256,7 +256,7 @@ func (suite *ruleTestSuite) checkGetAll(cluster *tests.TestCluster) { var resp2 []*placement.Rule err = tu.ReadGetJSON(re, testDialClient, urlPrefix+"/rules", &resp2) suite.NoError(err) - suite.GreaterOrEqual(len(resp2), 1) + suite.NotEmpty(resp2) } func (suite *ruleTestSuite) TestSetAll() { @@ -1039,40 +1039,40 @@ func (suite *regionRuleTestSuite) checkRegionPlacementRule(cluster *tests.TestCl u := fmt.Sprintf("%s/config/rules/region/%d/detail", urlPrefix, 1) err := tu.ReadGetJSON(re, testDialClient, u, fit) suite.NoError(err) - suite.Equal(len(fit.RuleFits), 1) - suite.Equal(len(fit.OrphanPeers), 1) + suite.Len(fit.RuleFits, 1) + suite.Len(fit.OrphanPeers, 1) u = fmt.Sprintf("%s/config/rules/region/%d/detail", urlPrefix, 2) fit = &placement.RegionFit{} err = tu.ReadGetJSON(re, testDialClient, u, fit) suite.NoError(err) - suite.Equal(len(fit.RuleFits), 2) - suite.Equal(len(fit.OrphanPeers), 0) + suite.Len(fit.RuleFits, 2) + suite.Empty(fit.OrphanPeers) u = fmt.Sprintf("%s/config/rules/region/%d/detail", urlPrefix, 3) fit = &placement.RegionFit{} err = tu.ReadGetJSON(re, testDialClient, u, fit) suite.NoError(err) - suite.Equal(len(fit.RuleFits), 0) - suite.Equal(len(fit.OrphanPeers), 2) + suite.Empty(fit.RuleFits) + suite.Len(fit.OrphanPeers, 2) var label labeler.LabelRule escapedID := url.PathEscape("keyspaces/0") u = fmt.Sprintf("%s/config/region-label/rule/%s", urlPrefix, escapedID) err = tu.ReadGetJSON(re, testDialClient, u, &label) suite.NoError(err) - suite.Equal(label.ID, "keyspaces/0") + suite.Equal("keyspaces/0", label.ID) var labels []labeler.LabelRule u = fmt.Sprintf("%s/config/region-label/rules", urlPrefix) err = tu.ReadGetJSON(re, testDialClient, u, &labels) suite.NoError(err) suite.Len(labels, 1) - suite.Equal(labels[0].ID, "keyspaces/0") + suite.Equal("keyspaces/0", labels[0].ID) u = fmt.Sprintf("%s/config/region-label/rules/ids", urlPrefix) err = tu.CheckGetJSON(testDialClient, u, []byte(`["rule1", "rule3"]`), func(resp []byte, statusCode int, _ http.Header) { err := json.Unmarshal(resp, &labels) suite.NoError(err) - suite.Len(labels, 0) + suite.Empty(labels) }) suite.NoError(err) @@ -1080,7 +1080,7 @@ func (suite *regionRuleTestSuite) checkRegionPlacementRule(cluster *tests.TestCl err := json.Unmarshal(resp, &labels) suite.NoError(err) suite.Len(labels, 1) - suite.Equal(labels[0].ID, "keyspaces/0") + suite.Equal("keyspaces/0", labels[0].ID) }) suite.NoError(err) diff --git a/tests/server/apiv2/handlers/keyspace_test.go b/tests/server/apiv2/handlers/keyspace_test.go index f7b43ab194d..535f01cc33e 100644 --- a/tests/server/apiv2/handlers/keyspace_test.go +++ b/tests/server/apiv2/handlers/keyspace_test.go @@ -46,22 +46,24 @@ func TestKeyspaceTestSuite(t *testing.T) { } func (suite *keyspaceTestSuite) SetupTest() { + re := suite.Require() ctx, cancel := context.WithCancel(context.Background()) suite.cleanup = cancel cluster, err := tests.NewTestCluster(ctx, 1) suite.cluster = cluster - suite.NoError(err) - suite.NoError(cluster.RunInitialServers()) - suite.NotEmpty(cluster.WaitLeader()) + re.NoError(err) + re.NoError(cluster.RunInitialServers()) + re.NotEmpty(cluster.WaitLeader()) suite.server = cluster.GetLeaderServer() - suite.NoError(suite.server.BootstrapCluster()) - suite.NoError(failpoint.Enable("github.com/tikv/pd/pkg/keyspace/skipSplitRegion", "return(true)")) + re.NoError(suite.server.BootstrapCluster()) + re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/keyspace/skipSplitRegion", "return(true)")) } func (suite *keyspaceTestSuite) TearDownTest() { + re := suite.Require() suite.cleanup() suite.cluster.Destroy() - suite.NoError(failpoint.Disable("github.com/tikv/pd/pkg/keyspace/skipSplitRegion")) + re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/keyspace/skipSplitRegion")) } func (suite *keyspaceTestSuite) TestCreateLoadKeyspace() { @@ -133,7 +135,7 @@ func (suite *keyspaceTestSuite) TestLoadRangeKeyspace() { loadResponse := sendLoadRangeRequest(re, suite.server, "", "") re.Empty(loadResponse.NextPageToken) // Load response should contain no more pages. // Load response should contain all created keyspace and a default. - re.Equal(len(keyspaces)+1, len(loadResponse.Keyspaces)) + re.Len(loadResponse.Keyspaces, len(keyspaces)+1) for i, created := range keyspaces { re.Equal(created, loadResponse.Keyspaces[i+1].KeyspaceMeta) } diff --git a/tests/server/apiv2/handlers/tso_keyspace_group_test.go b/tests/server/apiv2/handlers/tso_keyspace_group_test.go index 214de6e95ef..2bf2db715fa 100644 --- a/tests/server/apiv2/handlers/tso_keyspace_group_test.go +++ b/tests/server/apiv2/handlers/tso_keyspace_group_test.go @@ -39,14 +39,15 @@ func TestKeyspaceGroupTestSuite(t *testing.T) { } func (suite *keyspaceGroupTestSuite) SetupTest() { + re := suite.Require() suite.ctx, suite.cancel = context.WithCancel(context.Background()) cluster, err := tests.NewTestAPICluster(suite.ctx, 1) suite.cluster = cluster - suite.NoError(err) - suite.NoError(cluster.RunInitialServers()) - suite.NotEmpty(cluster.WaitLeader()) + re.NoError(err) + re.NoError(cluster.RunInitialServers()) + re.NotEmpty(cluster.WaitLeader()) suite.server = cluster.GetLeaderServer() - suite.NoError(suite.server.BootstrapCluster()) + re.NoError(suite.server.BootstrapCluster()) } func (suite *keyspaceGroupTestSuite) TearDownTest() { diff --git a/tests/server/cluster/cluster_test.go b/tests/server/cluster/cluster_test.go index 0b0779d9434..67c798d7f69 100644 --- a/tests/server/cluster/cluster_test.go +++ b/tests/server/cluster/cluster_test.go @@ -1485,7 +1485,7 @@ func TestMinResolvedTS(t *testing.T) { } // default run job - re.NotEqual(rc.GetPDServerConfig().MinResolvedTSPersistenceInterval.Duration, 0) + re.NotZero(rc.GetPDServerConfig().MinResolvedTSPersistenceInterval.Duration) setMinResolvedTSPersistenceInterval(re, rc, svr, 0) re.Equal(time.Duration(0), rc.GetPDServerConfig().MinResolvedTSPersistenceInterval.Duration) diff --git a/tests/server/cluster/cluster_work_test.go b/tests/server/cluster/cluster_work_test.go index eabecf8e29b..f503563dbb1 100644 --- a/tests/server/cluster/cluster_work_test.go +++ b/tests/server/cluster/cluster_work_test.go @@ -16,7 +16,6 @@ package cluster_test import ( "context" - "errors" "sort" "testing" "time" @@ -112,9 +111,9 @@ func TestAskSplit(t *testing.T) { re.NoError(leaderServer.GetServer().SaveTTLConfig(map[string]interface{}{"schedule.enable-tikv-split-region": 0}, time.Minute)) _, err = rc.HandleAskSplit(req) - re.True(errors.Is(err, errs.ErrSchedulerTiKVSplitDisabled)) + re.ErrorIs(err, errs.ErrSchedulerTiKVSplitDisabled) _, err = rc.HandleAskBatchSplit(req1) - re.True(errors.Is(err, errs.ErrSchedulerTiKVSplitDisabled)) + re.ErrorIs(err, errs.ErrSchedulerTiKVSplitDisabled) re.NoError(leaderServer.GetServer().SaveTTLConfig(map[string]interface{}{"schedule.enable-tikv-split-region": 0}, 0)) // wait ttl config takes effect time.Sleep(time.Second) diff --git a/tests/server/keyspace/keyspace_test.go b/tests/server/keyspace/keyspace_test.go index 86b8f6fd37c..3ee15e1edc1 100644 --- a/tests/server/keyspace/keyspace_test.go +++ b/tests/server/keyspace/keyspace_test.go @@ -100,7 +100,7 @@ func checkLabelRule(re *require.Assertions, id uint32, regionLabeler *labeler.Re rangeRule, ok := loadedLabel.Data.([]*labeler.KeyRangeRule) re.True(ok) - re.Equal(2, len(rangeRule)) + re.Len(rangeRule, 2) keyspaceIDBytes := make([]byte, 4) nextKeyspaceIDBytes := make([]byte, 4) diff --git a/tools/pd-backup/pdbackup/backup_test.go b/tools/pd-backup/pdbackup/backup_test.go index 40e4190f5d4..b35bf1e8a70 100644 --- a/tools/pd-backup/pdbackup/backup_test.go +++ b/tools/pd-backup/pdbackup/backup_test.go @@ -99,6 +99,7 @@ func setupServer() (*httptest.Server, *config.Config) { } func (s *backupTestSuite) BeforeTest(suiteName, testName string) { + re := s.Require() ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) defer cancel() @@ -106,21 +107,21 @@ func (s *backupTestSuite) BeforeTest(suiteName, testName string) { ctx, pdClusterIDPath, string(typeutil.Uint64ToBytes(clusterID))) - s.NoError(err) + re.NoError(err) var ( rootPath = path.Join(pdRootPath, strconv.FormatUint(clusterID, 10)) allocTimestampMaxBytes = typeutil.Uint64ToBytes(allocTimestampMax) ) _, err = s.etcdClient.Put(ctx, endpoint.TimestampPath(rootPath), string(allocTimestampMaxBytes)) - s.NoError(err) + re.NoError(err) var ( allocIDPath = path.Join(rootPath, "alloc_id") allocIDMaxBytes = typeutil.Uint64ToBytes(allocIDMax) ) _, err = s.etcdClient.Put(ctx, allocIDPath, string(allocIDMaxBytes)) - s.NoError(err) + re.NoError(err) } func (s *backupTestSuite) AfterTest(suiteName, testName string) { @@ -128,8 +129,9 @@ func (s *backupTestSuite) AfterTest(suiteName, testName string) { } func (s *backupTestSuite) TestGetBackupInfo() { + re := s.Require() actual, err := GetBackupInfo(s.etcdClient, s.server.URL) - s.NoError(err) + re.NoError(err) expected := &BackupInfo{ ClusterID: clusterID, @@ -137,22 +139,22 @@ func (s *backupTestSuite) TestGetBackupInfo() { AllocTimestampMax: allocTimestampMax, Config: s.serverConfig, } - s.Equal(expected, actual) + re.Equal(expected, actual) tmpFile, err := os.CreateTemp(os.TempDir(), "pd_backup_info_test.json") - s.NoError(err) + re.NoError(err) defer os.RemoveAll(tmpFile.Name()) - s.NoError(OutputToFile(actual, tmpFile)) + re.NoError(OutputToFile(actual, tmpFile)) _, err = tmpFile.Seek(0, 0) - s.NoError(err) + re.NoError(err) b, err := io.ReadAll(tmpFile) - s.NoError(err) + re.NoError(err) var restored BackupInfo err = json.Unmarshal(b, &restored) - s.NoError(err) + re.NoError(err) - s.Equal(expected, &restored) + re.Equal(expected, &restored) } diff --git a/tools/pd-simulator/simulator/simutil/key_test.go b/tools/pd-simulator/simulator/simutil/key_test.go index b34f1bb3809..6f71bd12d14 100644 --- a/tools/pd-simulator/simulator/simutil/key_test.go +++ b/tools/pd-simulator/simulator/simutil/key_test.go @@ -102,13 +102,13 @@ func TestGenerateKeys(t *testing.T) { numKeys := 10 actual := GenerateKeys(numKeys) - re.Equal(len(actual), numKeys) + re.Len(actual, numKeys) // make sure every key: // i. has length `keyLen` // ii. has only characters from `keyChars` for _, key := range actual { - re.Equal(len(key), keyLen) + re.Len(key, keyLen) for _, char := range key { re.True(strings.ContainsRune(keyChars, char)) }