Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

multi tidb cluster testing #334

Merged
merged 5 commits into from
Mar 20, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ require (
github.com/grpc-ecosystem/grpc-gateway v1.4.1 // indirect
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 // indirect
github.com/hpcloud/tail v1.0.0 // indirect
github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3
github.com/jonboulle/clockwork v0.1.0 // indirect
github.com/json-iterator/go v1.1.5 // indirect
github.com/juju/errors v0.0.0-20180806074554-22422dad46e1
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,8 @@ github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3 h1:sHsPfNMAG70QAvKbddQ0uScZCHQoZsT5NykGRCeeeIs=
github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3/go.mod h1:yL958EeXv8Ylng6IfnvG4oflryUi3vgA3xPs9hmII1s=
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE=
Expand Down
6 changes: 3 additions & 3 deletions tests/actions.go
Original file line number Diff line number Diff line change
Expand Up @@ -1115,8 +1115,8 @@ func (oa *operatorActions) Restore(from *TidbClusterInfo, to *TidbClusterInfo) e
setString := to.HelmSetString(sets)

restoreName := fmt.Sprintf("%s-restore", from.ClusterName)
cmd := fmt.Sprintf("helm install -n %s --namespace %s /charts/%s/tidb-backup --set-string %s",
restoreName, to.Namespace, to.OperatorTag, setString)
cmd := fmt.Sprintf("helm upgrade %s /charts/%s/tidb-backup --set-string %s",
restoreName, to.OperatorTag, setString)
glog.Infof("install restore [%s]", cmd)
res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput()
if err != nil {
Expand Down Expand Up @@ -1222,7 +1222,7 @@ func (oa *operatorActions) CreateSecret(info *TidbClusterInfo) error {

_, err := oa.kubeCli.CoreV1().Secrets(info.Namespace).Create(&initSecret)
if err != nil && !releaseIsExist(err) {
return err
return err
}

backupSecret := corev1.Secret{
Expand Down
281 changes: 155 additions & 126 deletions tests/cmd/e2e/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,12 @@
package main

import (
"fmt"
"net/http"
_ "net/http/pprof"
"time"

"github.com/golang/glog"
"github.com/jinzhu/copier"
"github.com/pingcap/tidb-operator/pkg/client/clientset/versioned"
"github.com/pingcap/tidb-operator/tests"
"github.com/pingcap/tidb-operator/tests/backup"
Expand All @@ -29,12 +30,6 @@ import (
"k8s.io/client-go/rest"
)

func perror(err error) {
if err != nil {
glog.Fatal(err)
}
}

func main() {
logs.InitLogs()
defer logs.FlushLogs()
Expand All @@ -49,6 +44,12 @@ func main() {
glog.Info(http.ListenAndServe("localhost:6060", nil))
}()

// TODO read these args from config
beginTidbVersion := "v2.1.0"
toTidbVersion := "v2.1.4"
operatorTag := "master"
operatorImage := "pingcap/tidb-operator:latest"

cfg, err := rest.InClusterConfig()
if err != nil {
glog.Fatalf("failed to get config: %v", err)
Expand All @@ -67,11 +68,85 @@ func main() {
operatorInfo := &tests.OperatorInfo{
Namespace: "pingcap",
ReleaseName: "operator",
Image: "pingcap/tidb-operator:latest",
Tag: "master",
Image: operatorImage,
Tag: operatorTag,
SchedulerImage: "gcr.io/google-containers/hyperkube:v1.12.1",
LogLevel: "2",
}

// create database and table and insert a column for test backup and restore
initSql := `"create database record;use record;create table test(t char(32))"`

clusterInfos := []*tests.TidbClusterInfo{
{
Namespace: "e2e-cluster1",
ClusterName: "e2e-cluster1",
OperatorTag: operatorTag,
PDImage: fmt.Sprintf("pingcap/pd:%s", beginTidbVersion),
TiKVImage: fmt.Sprintf("pingcap/tikv:%s", beginTidbVersion),
TiDBImage: fmt.Sprintf("pingcap/tidb:%s", beginTidbVersion),
StorageClassName: "local-storage",
Password: "admin",
InitSql: initSql,
UserName: "root",
InitSecretName: "demo-set-secret",
BackupSecretName: "demo-backup-secret",
BackupPVC: "test-backup",
Resources: map[string]string{
"pd.resources.limits.cpu": "1000m",
"pd.resources.limits.memory": "2Gi",
"pd.resources.requests.cpu": "200m",
"pd.resources.requests.memory": "1Gi",
"tikv.resources.limits.cpu": "2000m",
"tikv.resources.limits.memory": "4Gi",
"tikv.resources.requests.cpu": "1000m",
"tikv.resources.requests.memory": "2Gi",
"tidb.resources.limits.cpu": "2000m",
"tidb.resources.limits.memory": "4Gi",
"tidb.resources.requests.cpu": "500m",
"tidb.resources.requests.memory": "1Gi",
},
Args: map[string]string{},
Monitor: true,
},
{
Namespace: "e2e-cluster2",
ClusterName: "e2e-cluster2",
OperatorTag: "master",
PDImage: fmt.Sprintf("pingcap/pd:%s", beginTidbVersion),
TiKVImage: fmt.Sprintf("pingcap/tikv:%s", beginTidbVersion),
TiDBImage: fmt.Sprintf("pingcap/tidb:%s", beginTidbVersion),
StorageClassName: "local-storage",
Password: "admin",
InitSql: initSql,
UserName: "root",
InitSecretName: "demo-set-secret",
BackupSecretName: "demo-backup-secret",
BackupPVC: "test-backup",
Resources: map[string]string{
"pd.resources.limits.cpu": "1000m",
"pd.resources.limits.memory": "2Gi",
"pd.resources.requests.cpu": "200m",
"pd.resources.requests.memory": "1Gi",
"tikv.resources.limits.cpu": "2000m",
"tikv.resources.limits.memory": "4Gi",
"tikv.resources.requests.cpu": "1000m",
"tikv.resources.requests.memory": "2Gi",
"tidb.resources.limits.cpu": "2000m",
"tidb.resources.limits.memory": "4Gi",
"tidb.resources.requests.cpu": "500m",
"tidb.resources.requests.memory": "1Gi",
},
Args: map[string]string{},
Monitor: true,
},
}

defer func() {
oa.DumpAllLogs(operatorInfo, clusterInfos)
}()

// deploy operator
if err := oa.CleanOperator(operatorInfo); err != nil {
oa.DumpAllLogs(operatorInfo, nil)
glog.Fatal(err)
Expand All @@ -81,163 +156,117 @@ func main() {
glog.Fatal(err)
}

// create database and table and insert a column for test backup and restore
initSql := `"create database record;use record;create table test(t char(32))"`

clusterInfo := &tests.TidbClusterInfo{
BackupPVC: "test-backup",
Namespace: "tidb",
ClusterName: "demo",
OperatorTag: "master",
PDImage: "pingcap/pd:v2.1.0",
TiKVImage: "pingcap/tikv:v2.1.0",
TiDBImage: "pingcap/tidb:v2.1.0",
StorageClassName: "local-storage",
Password: "admin",
InitSql: initSql,
UserName: "root",
InitSecretName: "demo-set-secret",
BackupSecretName: "demo-backup-secret",
Resources: map[string]string{
"pd.resources.limits.cpu": "1000m",
"pd.resources.limits.memory": "2Gi",
"pd.resources.requests.cpu": "200m",
"pd.resources.requests.memory": "1Gi",
"tikv.resources.limits.cpu": "2000m",
"tikv.resources.limits.memory": "4Gi",
"tikv.resources.requests.cpu": "1000m",
"tikv.resources.requests.memory": "2Gi",
"tidb.resources.limits.cpu": "2000m",
"tidb.resources.limits.memory": "4Gi",
"tidb.resources.requests.cpu": "500m",
"tidb.resources.requests.memory": "1Gi",
},
Args: map[string]string{},
// deploy tidbclusters
for _, clusterInfo := range clusterInfos {
if err = oa.CleanTidbCluster(clusterInfo); err != nil {
glog.Fatal(err)
}
if err = oa.DeployTidbCluster(clusterInfo); err != nil {
glog.Fatal(err)
}
}

if err = oa.CleanTidbCluster(clusterInfo); err != nil {
oa.DumpAllLogs(operatorInfo, []*tests.TidbClusterInfo{clusterInfo})
glog.Fatal(err)
}
if err = oa.DeployTidbCluster(clusterInfo); err != nil {
oa.DumpAllLogs(operatorInfo, []*tests.TidbClusterInfo{clusterInfo})
glog.Fatal(err)
for _, clusterInfo := range clusterInfos {
if err = oa.CheckTidbClusterStatus(clusterInfo); err != nil {
glog.Fatal(err)
}
}
if err = oa.CheckTidbClusterStatus(clusterInfo); err != nil {
oa.DumpAllLogs(operatorInfo, []*tests.TidbClusterInfo{clusterInfo})
glog.Fatal(err)

var workloads []workload.Workload
for _, clusterInfo := range clusterInfos {
workload := ddl.New(clusterInfo.DSN("test"), 1, 1)
workloads = append(workloads, workload)
}

err = workload.Run(func() error {
clusterInfo = clusterInfo.ScaleTiDB(3).ScaleTiKV(5).ScalePD(5)
if err := oa.ScaleTidbCluster(clusterInfo); err != nil {
return err

for _, clusterInfo := range clusterInfos {
clusterInfo = clusterInfo.ScaleTiDB(3).ScaleTiKV(5).ScalePD(5)
if err := oa.ScaleTidbCluster(clusterInfo); err != nil {
return err
}
}
if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil {
return err
for _, clusterInfo := range clusterInfos {
if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil {
return err
}
}

clusterInfo = clusterInfo.ScalePD(3)
if err := oa.ScaleTidbCluster(clusterInfo); err != nil {
return err
for _, clusterInfo := range clusterInfos {
clusterInfo = clusterInfo.ScalePD(3)
if err := oa.ScaleTidbCluster(clusterInfo); err != nil {
return err
}
}
if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil {
return err
for _, clusterInfo := range clusterInfos {
if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil {
return err
}
}

clusterInfo = clusterInfo.ScaleTiKV(3)
if err := oa.ScaleTidbCluster(clusterInfo); err != nil {
return err
for _, clusterInfo := range clusterInfos {
clusterInfo = clusterInfo.ScaleTiKV(3)
if err := oa.ScaleTidbCluster(clusterInfo); err != nil {
return err
}
}
if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil {
return err
for _, clusterInfo := range clusterInfos {
if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil {
return err
}
}

clusterInfo = clusterInfo.ScaleTiDB(1)
if err := oa.ScaleTidbCluster(clusterInfo); err != nil {
return err
for _, clusterInfo := range clusterInfos {
clusterInfo = clusterInfo.ScaleTiDB(1)
if err := oa.ScaleTidbCluster(clusterInfo); err != nil {
return err
}
}
if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil {
return err
for _, clusterInfo := range clusterInfos {
if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil {
return err
}
}

return nil
}, ddl.New(clusterInfo.DSN("test"), 1, 1))
}, workloads...)

if err != nil {
oa.DumpAllLogs(operatorInfo, []*tests.TidbClusterInfo{clusterInfo})
glog.Fatal(err)
}

clusterInfo = clusterInfo.UpgradeAll("v2.1.4")
if err = oa.UpgradeTidbCluster(clusterInfo); err != nil {
oa.DumpAllLogs(operatorInfo, []*tests.TidbClusterInfo{clusterInfo})
glog.Fatal(err)
}
if err = oa.CheckTidbClusterStatus(clusterInfo); err != nil {
oa.DumpAllLogs(operatorInfo, []*tests.TidbClusterInfo{clusterInfo})
glog.Fatal(err)
for _, clusterInfo := range clusterInfos {
clusterInfo = clusterInfo.UpgradeAll(toTidbVersion)
if err = oa.UpgradeTidbCluster(clusterInfo); err != nil {
glog.Fatal(err)
}
}

restoreClusterInfo := &tests.TidbClusterInfo{
BackupPVC: "test-backup",
Namespace: "tidb",
ClusterName: "demo2",
OperatorTag: "master",
PDImage: "pingcap/pd:v2.1.0",
TiKVImage: "pingcap/tikv:v2.1.0",
TiDBImage: "pingcap/tidb:v2.1.0",
StorageClassName: "local-storage",
Password: "admin",
InitSql: initSql,
UserName: "root",
InitSecretName: "demo2-set-secret",
BackupSecretName: "demo2-backup-secret",
Resources: map[string]string{
"pd.resources.limits.cpu": "1000m",
"pd.resources.limits.memory": "2Gi",
"pd.resources.requests.cpu": "200m",
"pd.resources.requests.memory": "1Gi",
"tikv.resources.limits.cpu": "2000m",
"tikv.resources.limits.memory": "4Gi",
"tikv.resources.requests.cpu": "1000m",
"tikv.resources.requests.memory": "2Gi",
"tidb.resources.limits.cpu": "2000m",
"tidb.resources.limits.memory": "4Gi",
"tidb.resources.requests.cpu": "500m",
"tidb.resources.requests.memory": "1Gi",
},
Args: map[string]string{},
for _, clusterInfo := range clusterInfos {
if err = oa.CheckTidbClusterStatus(clusterInfo); err != nil {
glog.Fatal(err)
}
}

// backup and restore
backupClusterInfo := clusterInfos[0]
restoreClusterInfo := &tests.TidbClusterInfo{}
copier.Copy(restoreClusterInfo, backupClusterInfo)
restoreClusterInfo.ClusterName = restoreClusterInfo.ClusterName + "-restore"

if err = oa.CleanTidbCluster(restoreClusterInfo); err != nil {
oa.DumpAllLogs(operatorInfo, []*tests.TidbClusterInfo{clusterInfo, restoreClusterInfo})
glog.Fatal(err)
}
if err = oa.DeployTidbCluster(restoreClusterInfo); err != nil {
oa.DumpAllLogs(operatorInfo, []*tests.TidbClusterInfo{clusterInfo, restoreClusterInfo})
glog.Fatal(err)
}
if err = oa.CheckTidbClusterStatus(restoreClusterInfo); err != nil {
oa.DumpAllLogs(operatorInfo, []*tests.TidbClusterInfo{clusterInfo, restoreClusterInfo})
glog.Fatal(err)
}

backupCase := backup.NewBackupCase(oa, clusterInfo, restoreClusterInfo)
backupCase := backup.NewBackupCase(oa, backupClusterInfo, restoreClusterInfo)

if err := backupCase.Run(); err != nil {
oa.DumpAllLogs(operatorInfo, []*tests.TidbClusterInfo{clusterInfo, restoreClusterInfo})
glog.Fatal(err)
}

fa := tests.NewFaultTriggerAction(cli, kubeCli, conf)
if err := fa.StopETCD("172.16.4.171"); err != nil {
glog.Fatal(err)
}

time.Sleep(1 * time.Minute)

if err := fa.StartETCD("172.16.4.171"); err != nil {
glog.Fatal(err)
}
}
Loading