diff --git a/cmd/backup-manager/app/backup/backup.go b/cmd/backup-manager/app/backup/backup.go index 7d9f0729a5..51f1d3626d 100644 --- a/cmd/backup-manager/app/backup/backup.go +++ b/cmd/backup-manager/app/backup/backup.go @@ -20,7 +20,7 @@ import ( "os/exec" "github.com/gogo/protobuf/proto" - glog "k8s.io/klog" + "k8s.io/klog" kvbackup "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/tidb-operator/cmd/backup-manager/app/constants" @@ -54,12 +54,12 @@ func (bo *Options) backupData(backup *v1alpha1.Backup) (string, error) { btype, } fullArgs = append(fullArgs, args...) - glog.Infof("Running br command with args: %v", fullArgs) + klog.Infof("Running br command with args: %v", fullArgs) output, err := exec.Command("br", fullArgs...).CombinedOutput() if err != nil { return path, fmt.Errorf("cluster %s, execute br command %v failed, output: %s, err: %v", bo, fullArgs, string(output), err) } - glog.Infof("Backup data for cluster %s successfully, output: %s", bo, string(output)) + klog.Infof("Backup data for cluster %s successfully, output: %s", bo, string(output)) return path, nil } diff --git a/cmd/backup-manager/app/backup/manager.go b/cmd/backup-manager/app/backup/manager.go index 78f652126b..0fb2543eba 100644 --- a/cmd/backup-manager/app/backup/manager.go +++ b/cmd/backup-manager/app/backup/manager.go @@ -22,7 +22,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/controller" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - glog "k8s.io/klog" + "k8s.io/klog" ) // Manager mainly used to manage backup related work @@ -48,7 +48,7 @@ func NewManager( func (bm *Manager) ProcessBackup() error { backup, err := bm.backupLister.Backups(bm.Namespace).Get(bm.BackupName) if err != nil { - glog.Errorf("can't find cluster %s backup %s CRD object, err: %v", bm, bm.BackupName, err) + klog.Errorf("can't find cluster %s backup %s CRD object, err: %v", bm, bm.BackupName, err) return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{ Type: v1alpha1.BackupFailed, Status: corev1.ConditionTrue, @@ -76,7 +76,7 @@ func (bm *Manager) performBackup(backup *v1alpha1.Backup) error { backupFullPath, err := bm.backupData(backup) if err != nil { - glog.Errorf("backup cluster %s data failed, err: %s", bm, err) + klog.Errorf("backup cluster %s data failed, err: %s", bm, err) return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{ Type: v1alpha1.BackupFailed, Status: corev1.ConditionTrue, @@ -84,13 +84,13 @@ func (bm *Manager) performBackup(backup *v1alpha1.Backup) error { Message: err.Error(), }) } - glog.Infof("backup cluster %s data to %s success", bm, backupFullPath) + klog.Infof("backup cluster %s data to %s success", bm, backupFullPath) // Note: The size get from remote may be incorrect because the blobs // are eventually consistent. size, err := getBackupSize(backup) if err != nil { - glog.Errorf("Get size for backup files in %s of cluster %s failed, err: %s", backupFullPath, bm, err) + klog.Errorf("Get size for backup files in %s of cluster %s failed, err: %s", backupFullPath, bm, err) return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{ Type: v1alpha1.BackupFailed, Status: corev1.ConditionTrue, @@ -98,11 +98,11 @@ func (bm *Manager) performBackup(backup *v1alpha1.Backup) error { Message: err.Error(), }) } - glog.Infof("Get size %d for backup files in %s of cluster %s success", size, backupFullPath, bm) + klog.Infof("Get size %d for backup files in %s of cluster %s success", size, backupFullPath, bm) commitTs, err := getCommitTs(backup) if err != nil { - glog.Errorf("get cluster %s commitTs failed, err: %s", bm, err) + klog.Errorf("get cluster %s commitTs failed, err: %s", bm, err) return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{ Type: v1alpha1.BackupFailed, Status: corev1.ConditionTrue, @@ -110,7 +110,7 @@ func (bm *Manager) performBackup(backup *v1alpha1.Backup) error { Message: err.Error(), }) } - glog.Infof("get cluster %s commitTs %d success", bm, commitTs) + klog.Infof("get cluster %s commitTs %d success", bm, commitTs) finish := time.Now() diff --git a/cmd/backup-manager/app/backup_manager.go b/cmd/backup-manager/app/backup_manager.go index 83b9c64c31..8800891446 100644 --- a/cmd/backup-manager/app/backup_manager.go +++ b/cmd/backup-manager/app/backup_manager.go @@ -26,7 +26,7 @@ func Run() error { logs.InitLogs() defer logs.FlushLogs() - // fix glog parse error + // fix klog parse error flag.CommandLine.Parse([]string{}) pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc) diff --git a/cmd/backup-manager/app/clean/clean.go b/cmd/backup-manager/app/clean/clean.go index 136866b0a9..c727319d58 100644 --- a/cmd/backup-manager/app/clean/clean.go +++ b/cmd/backup-manager/app/clean/clean.go @@ -19,7 +19,7 @@ import ( "io" "os/exec" - glog "k8s.io/klog" + "k8s.io/klog" "github.com/pingcap/tidb-operator/cmd/backup-manager/app/constants" "github.com/pingcap/tidb-operator/cmd/backup-manager/app/util" @@ -53,12 +53,12 @@ func (bo *Options) cleanBRRemoteBackupData(backup *v1alpha1.Backup) error { if err != nil { return err } - glog.Infof("Prepare to delete %s for cluster %s", obj.Key, bo) + klog.Infof("Prepare to delete %s for cluster %s", obj.Key, bo) err = s.Delete(context.Background(), obj.Key) if err != nil { return err } - glog.Infof("Delete %s for cluster %s successfully", obj.Key, bo) + klog.Infof("Delete %s for cluster %s successfully", obj.Key, bo) } return nil } @@ -70,6 +70,6 @@ func (bo *Options) cleanRemoteBackupData(bucket string) error { return fmt.Errorf("cluster %s, execute rclone deletefile command failed, output: %s, err: %v", bo, string(output), err) } - glog.Infof("cluster %s backup %s was deleted successfully", bo, bucket) + klog.Infof("cluster %s backup %s was deleted successfully", bo, bucket) return nil } diff --git a/cmd/backup-manager/app/clean/manager.go b/cmd/backup-manager/app/clean/manager.go index 01380b5eec..e2e4061567 100644 --- a/cmd/backup-manager/app/clean/manager.go +++ b/cmd/backup-manager/app/clean/manager.go @@ -17,7 +17,7 @@ import ( "fmt" corev1 "k8s.io/api/core/v1" - glog "k8s.io/klog" + "k8s.io/klog" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1" @@ -55,7 +55,7 @@ func (bm *Manager) ProcessCleanBackup() error { func (bm *Manager) performCleanBackup(backup *v1alpha1.Backup) error { if backup.Status.BackupPath == "" { - glog.Errorf("cluster %s backup path is empty", bm) + klog.Errorf("cluster %s backup path is empty", bm) return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{ Type: v1alpha1.BackupFailed, Status: corev1.ConditionTrue, @@ -72,7 +72,7 @@ func (bm *Manager) performCleanBackup(backup *v1alpha1.Backup) error { } if err != nil { - glog.Errorf("clean cluster %s backup %s failed, err: %s", bm, backup.Status.BackupPath, err) + klog.Errorf("clean cluster %s backup %s failed, err: %s", bm, backup.Status.BackupPath, err) return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{ Type: v1alpha1.BackupFailed, Status: corev1.ConditionTrue, @@ -81,7 +81,7 @@ func (bm *Manager) performCleanBackup(backup *v1alpha1.Backup) error { }) } - glog.Infof("clean cluster %s backup %s success", bm, backup.Status.BackupPath) + klog.Infof("clean cluster %s backup %s success", bm, backup.Status.BackupPath) return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{ Type: v1alpha1.BackupClean, Status: corev1.ConditionTrue, diff --git a/cmd/backup-manager/app/cmd/backup.go b/cmd/backup-manager/app/cmd/backup.go index d0e2fce165..a67c95dcb1 100644 --- a/cmd/backup-manager/app/cmd/backup.go +++ b/cmd/backup-manager/app/cmd/backup.go @@ -23,7 +23,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/controller" "github.com/spf13/cobra" "k8s.io/client-go/tools/cache" - glog "k8s.io/klog" + "k8s.io/klog" cmdutil "k8s.io/kubectl/pkg/cmd/util" ) @@ -63,7 +63,7 @@ func runBackup(backupOpts backup.Options, kubecfg string) error { // waiting for the shared informer's store has synced. cache.WaitForCacheSync(ctx.Done(), backupInformer.Informer().HasSynced) - glog.Infof("start to process backup %s", backupOpts.String()) + klog.Infof("start to process backup %s", backupOpts.String()) bm := backup.NewManager(backupInformer.Lister(), statusUpdater, backupOpts) return bm.ProcessBackup() } diff --git a/cmd/backup-manager/app/cmd/clean.go b/cmd/backup-manager/app/cmd/clean.go index 35095223de..efff8cc92e 100644 --- a/cmd/backup-manager/app/cmd/clean.go +++ b/cmd/backup-manager/app/cmd/clean.go @@ -23,7 +23,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/controller" "github.com/spf13/cobra" "k8s.io/client-go/tools/cache" - glog "k8s.io/klog" + "k8s.io/klog" cmdutil "k8s.io/kubectl/pkg/cmd/util" ) @@ -64,7 +64,7 @@ func runClean(backupOpts clean.Options, kubecfg string) error { // waiting for the shared informer's store has synced. cache.WaitForCacheSync(ctx.Done(), backupInformer.Informer().HasSynced) - glog.Infof("start to clean backup %s", backupOpts.String()) + klog.Infof("start to clean backup %s", backupOpts.String()) bm := clean.NewManager(backupInformer.Lister(), statusUpdater, backupOpts) return bm.ProcessCleanBackup() } diff --git a/cmd/backup-manager/app/cmd/export.go b/cmd/backup-manager/app/cmd/export.go index 437506c8d1..6cc0c57fb2 100644 --- a/cmd/backup-manager/app/cmd/export.go +++ b/cmd/backup-manager/app/cmd/export.go @@ -26,7 +26,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/controller" "github.com/spf13/cobra" "k8s.io/client-go/tools/cache" - glog "k8s.io/klog" + "k8s.io/klog" cmdutil "k8s.io/kubectl/pkg/cmd/util" ) @@ -73,7 +73,7 @@ func runExport(backupOpts export.BackupOpts, kubecfg string) error { // waiting for the shared informer's store has synced. cache.WaitForCacheSync(ctx.Done(), backupInformer.Informer().HasSynced) - glog.Infof("start to process backup %s", backupOpts.String()) + klog.Infof("start to process backup %s", backupOpts.String()) bm := export.NewBackupManager(backupInformer.Lister(), statusUpdater, backupOpts) return bm.ProcessBackup() } diff --git a/cmd/backup-manager/app/cmd/import.go b/cmd/backup-manager/app/cmd/import.go index 92aef28d18..9efbe03946 100644 --- a/cmd/backup-manager/app/cmd/import.go +++ b/cmd/backup-manager/app/cmd/import.go @@ -26,7 +26,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/controller" "github.com/spf13/cobra" "k8s.io/client-go/tools/cache" - glog "k8s.io/klog" + "k8s.io/klog" cmdutil "k8s.io/kubectl/pkg/cmd/util" ) @@ -72,7 +72,7 @@ func runImport(restoreOpts _import.RestoreOpts, kubecfg string) error { // waiting for the shared informer's store has synced. cache.WaitForCacheSync(ctx.Done(), restoreInformer.Informer().HasSynced) - glog.Infof("start to process restore %s", restoreOpts.String()) + klog.Infof("start to process restore %s", restoreOpts.String()) rm := _import.NewRestoreManager(restoreInformer.Lister(), statusUpdater, restoreOpts) return rm.ProcessRestore() } diff --git a/cmd/backup-manager/app/cmd/restore.go b/cmd/backup-manager/app/cmd/restore.go index 50152afee2..d5835c2ad6 100644 --- a/cmd/backup-manager/app/cmd/restore.go +++ b/cmd/backup-manager/app/cmd/restore.go @@ -26,7 +26,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/controller" "github.com/spf13/cobra" "k8s.io/client-go/tools/cache" - glog "k8s.io/klog" + "k8s.io/klog" cmdutil "k8s.io/kubectl/pkg/cmd/util" ) @@ -67,7 +67,7 @@ func runRestore(restoreOpts restore.Options, kubecfg string) error { // waiting for the shared informer's store has synced. cache.WaitForCacheSync(ctx.Done(), restoreInformer.Informer().HasSynced) - glog.Infof("start to process restore %s", restoreOpts.String()) + klog.Infof("start to process restore %s", restoreOpts.String()) rm := restore.NewManager(restoreInformer.Lister(), statusUpdater, restoreOpts) return rm.ProcessRestore() } diff --git a/cmd/backup-manager/app/export/export.go b/cmd/backup-manager/app/export/export.go index 194463ef62..91e73ca175 100644 --- a/cmd/backup-manager/app/export/export.go +++ b/cmd/backup-manager/app/export/export.go @@ -24,7 +24,7 @@ import ( "time" "github.com/mholt/archiver" - glog "k8s.io/klog" + "k8s.io/klog" "github.com/pingcap/tidb-operator/cmd/backup-manager/app/constants" "github.com/pingcap/tidb-operator/cmd/backup-manager/app/util" @@ -114,7 +114,7 @@ func (bo *BackupOpts) backupDataToRemote(source, bucketURI string) error { return fmt.Errorf("cluster %s, execute rclone copyto command for upload backup data %s failed, output: %s, err: %v", bo, bucketURI, string(output), err) } - glog.Infof("upload cluster %s backup data to %s successfully, now move it to permanent URL %s", bo, tmpDestBucket, destBucket) + klog.Infof("upload cluster %s backup data to %s successfully, now move it to permanent URL %s", bo, tmpDestBucket, destBucket) // the backup was a success // remove .tmp extension diff --git a/cmd/backup-manager/app/export/manager.go b/cmd/backup-manager/app/export/manager.go index f4316c6f5f..22148294e2 100644 --- a/cmd/backup-manager/app/export/manager.go +++ b/cmd/backup-manager/app/export/manager.go @@ -26,7 +26,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" - glog "k8s.io/klog" + "k8s.io/klog" ) // BackupManager mainly used to manage backup related work @@ -52,7 +52,7 @@ func NewBackupManager( func (bm *BackupManager) ProcessBackup() error { backup, err := bm.backupLister.Backups(bm.Namespace).Get(bm.BackupName) if err != nil { - glog.Errorf("can't find cluster %s backup %s CRD object, err: %v", bm, bm.BackupName, err) + klog.Errorf("can't find cluster %s backup %s CRD object, err: %v", bm, bm.BackupName, err) return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{ Type: v1alpha1.BackupFailed, Status: corev1.ConditionTrue, @@ -65,19 +65,19 @@ func (bm *BackupManager) ProcessBackup() error { err = wait.PollImmediate(constants.PollInterval, constants.CheckTimeout, func() (done bool, err error) { db, err = util.OpenDB(bm.getDSN(constants.TidbMetaDB)) if err != nil { - glog.Warningf("can't open connection to tidb cluster %s, err: %v", bm, err) + klog.Warningf("can't open connection to tidb cluster %s, err: %v", bm, err) return false, nil } if err := db.Ping(); err != nil { - glog.Warningf("can't connect to tidb cluster %s, err: %s", bm, err) + klog.Warningf("can't connect to tidb cluster %s, err: %s", bm, err) return false, nil } return true, nil }) if err != nil { - glog.Errorf("cluster %s connect failed, err: %s", bm, err) + klog.Errorf("cluster %s connect failed, err: %s", bm, err) return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{ Type: v1alpha1.BackupFailed, Status: corev1.ConditionTrue, @@ -103,7 +103,7 @@ func (bm *BackupManager) performBackup(backup *v1alpha1.Backup, db *sql.DB) erro oldTikvGCTime, err := bm.getTikvGCLifeTime(db) if err != nil { - glog.Errorf("cluster %s get %s failed, err: %s", bm, constants.TikvGCVariable, err) + klog.Errorf("cluster %s get %s failed, err: %s", bm, constants.TikvGCVariable, err) return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{ Type: v1alpha1.BackupFailed, Status: corev1.ConditionTrue, @@ -111,11 +111,11 @@ func (bm *BackupManager) performBackup(backup *v1alpha1.Backup, db *sql.DB) erro Message: err.Error(), }) } - glog.Infof("cluster %s %s is %s", bm, constants.TikvGCVariable, oldTikvGCTime) + klog.Infof("cluster %s %s is %s", bm, constants.TikvGCVariable, oldTikvGCTime) oldTikvGCTimeDuration, err := time.ParseDuration(oldTikvGCTime) if err != nil { - glog.Errorf("cluster %s parse old %s failed, err: %s", bm, constants.TikvGCVariable, err) + klog.Errorf("cluster %s parse old %s failed, err: %s", bm, constants.TikvGCVariable, err) return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{ Type: v1alpha1.BackupFailed, Status: corev1.ConditionTrue, @@ -125,7 +125,7 @@ func (bm *BackupManager) performBackup(backup *v1alpha1.Backup, db *sql.DB) erro } tikvGCTimeDuration, err := time.ParseDuration(constants.TikvGCLifeTime) if err != nil { - glog.Errorf("cluster %s parse default %s failed, err: %s", bm, constants.TikvGCVariable, err) + klog.Errorf("cluster %s parse default %s failed, err: %s", bm, constants.TikvGCVariable, err) return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{ Type: v1alpha1.BackupFailed, Status: corev1.ConditionTrue, @@ -136,7 +136,7 @@ func (bm *BackupManager) performBackup(backup *v1alpha1.Backup, db *sql.DB) erro if oldTikvGCTimeDuration < tikvGCTimeDuration { err = bm.setTikvGCLifeTime(db, constants.TikvGCLifeTime) if err != nil { - glog.Errorf("cluster %s set tikv GC life time to %s failed, err: %s", bm, constants.TikvGCLifeTime, err) + klog.Errorf("cluster %s set tikv GC life time to %s failed, err: %s", bm, constants.TikvGCLifeTime, err) return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{ Type: v1alpha1.BackupFailed, Status: corev1.ConditionTrue, @@ -144,12 +144,12 @@ func (bm *BackupManager) performBackup(backup *v1alpha1.Backup, db *sql.DB) erro Message: err.Error(), }) } - glog.Infof("set cluster %s %s to %s success", bm, constants.TikvGCVariable, constants.TikvGCLifeTime) + klog.Infof("set cluster %s %s to %s success", bm, constants.TikvGCVariable, constants.TikvGCLifeTime) } backupFullPath, err := bm.dumpTidbClusterData() if err != nil { - glog.Errorf("dump cluster %s data failed, err: %s", bm, err) + klog.Errorf("dump cluster %s data failed, err: %s", bm, err) return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{ Type: v1alpha1.BackupFailed, Status: corev1.ConditionTrue, @@ -157,12 +157,12 @@ func (bm *BackupManager) performBackup(backup *v1alpha1.Backup, db *sql.DB) erro Message: err.Error(), }) } - glog.Infof("dump cluster %s data to %s success", bm, backupFullPath) + klog.Infof("dump cluster %s data to %s success", bm, backupFullPath) if oldTikvGCTimeDuration < tikvGCTimeDuration { err = bm.setTikvGCLifeTime(db, oldTikvGCTime) if err != nil { - glog.Errorf("cluster %s reset tikv GC life time to %s failed, err: %s", bm, oldTikvGCTime, err) + klog.Errorf("cluster %s reset tikv GC life time to %s failed, err: %s", bm, oldTikvGCTime, err) return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{ Type: v1alpha1.BackupFailed, Status: corev1.ConditionTrue, @@ -170,13 +170,13 @@ func (bm *BackupManager) performBackup(backup *v1alpha1.Backup, db *sql.DB) erro Message: err.Error(), }) } - glog.Infof("reset cluster %s %s to %s success", bm, constants.TikvGCVariable, oldTikvGCTime) + klog.Infof("reset cluster %s %s to %s success", bm, constants.TikvGCVariable, oldTikvGCTime) } // TODO: Concurrent get file size and upload backup data to speed up processing time archiveBackupPath := backupFullPath + constants.DefaultArchiveExtention err = archiveBackupData(backupFullPath, archiveBackupPath) if err != nil { - glog.Errorf("archive cluster %s backup data %s failed, err: %s", bm, archiveBackupPath, err) + klog.Errorf("archive cluster %s backup data %s failed, err: %s", bm, archiveBackupPath, err) return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{ Type: v1alpha1.BackupFailed, Status: corev1.ConditionTrue, @@ -184,11 +184,11 @@ func (bm *BackupManager) performBackup(backup *v1alpha1.Backup, db *sql.DB) erro Message: err.Error(), }) } - glog.Infof("archive cluster %s backup data %s success", bm, archiveBackupPath) + klog.Infof("archive cluster %s backup data %s success", bm, archiveBackupPath) size, err := getBackupSize(archiveBackupPath) if err != nil { - glog.Errorf("get cluster %s archived backup file %s size %d failed, err: %s", bm, archiveBackupPath, size, err) + klog.Errorf("get cluster %s archived backup file %s size %d failed, err: %s", bm, archiveBackupPath, size, err) return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{ Type: v1alpha1.BackupFailed, Status: corev1.ConditionTrue, @@ -196,11 +196,11 @@ func (bm *BackupManager) performBackup(backup *v1alpha1.Backup, db *sql.DB) erro Message: err.Error(), }) } - glog.Infof("get cluster %s archived backup file %s size %d success", bm, archiveBackupPath, size) + klog.Infof("get cluster %s archived backup file %s size %d success", bm, archiveBackupPath, size) commitTs, err := getCommitTsFromMetadata(backupFullPath) if err != nil { - glog.Errorf("get cluster %s commitTs failed, err: %s", bm, err) + klog.Errorf("get cluster %s commitTs failed, err: %s", bm, err) return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{ Type: v1alpha1.BackupFailed, Status: corev1.ConditionTrue, @@ -208,13 +208,13 @@ func (bm *BackupManager) performBackup(backup *v1alpha1.Backup, db *sql.DB) erro Message: err.Error(), }) } - glog.Infof("get cluster %s commitTs %s success", bm, commitTs) + klog.Infof("get cluster %s commitTs %s success", bm, commitTs) remotePath := strings.TrimPrefix(archiveBackupPath, constants.BackupRootPath+"/") bucketURI := bm.getDestBucketURI(remotePath) err = bm.backupDataToRemote(archiveBackupPath, bucketURI) if err != nil { - glog.Errorf("backup cluster %s data to %s failed, err: %s", bm, bm.StorageType, err) + klog.Errorf("backup cluster %s data to %s failed, err: %s", bm, bm.StorageType, err) return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{ Type: v1alpha1.BackupFailed, Status: corev1.ConditionTrue, @@ -222,7 +222,7 @@ func (bm *BackupManager) performBackup(backup *v1alpha1.Backup, db *sql.DB) erro Message: err.Error(), }) } - glog.Infof("backup cluster %s data to %s success", bm, bm.StorageType) + klog.Infof("backup cluster %s data to %s success", bm, bm.StorageType) finish := time.Now() diff --git a/cmd/backup-manager/app/import/manager.go b/cmd/backup-manager/app/import/manager.go index c5d74a1591..cc17881052 100644 --- a/cmd/backup-manager/app/import/manager.go +++ b/cmd/backup-manager/app/import/manager.go @@ -26,7 +26,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" - glog "k8s.io/klog" + "k8s.io/klog" ) // RestoreManager mainly used to manage backup related work @@ -52,7 +52,7 @@ func NewRestoreManager( func (rm *RestoreManager) ProcessRestore() error { restore, err := rm.restoreLister.Restores(rm.Namespace).Get(rm.RestoreName) if err != nil { - glog.Errorf("can't find cluster %s restore %s CRD object, err: %v", rm, rm.RestoreName, err) + klog.Errorf("can't find cluster %s restore %s CRD object, err: %v", rm, rm.RestoreName, err) return rm.StatusUpdater.Update(restore, &v1alpha1.RestoreCondition{ Type: v1alpha1.RestoreFailed, Status: corev1.ConditionTrue, @@ -64,12 +64,12 @@ func (rm *RestoreManager) ProcessRestore() error { err = wait.PollImmediate(constants.PollInterval, constants.CheckTimeout, func() (done bool, err error) { db, err := util.OpenDB(rm.getDSN(constants.TidbMetaDB)) if err != nil { - glog.Warningf("can't open connection to tidb cluster %s, err: %v", rm, err) + klog.Warningf("can't open connection to tidb cluster %s, err: %v", rm, err) return false, nil } if err := db.Ping(); err != nil { - glog.Warningf("can't connect to tidb cluster %s, err: %s", rm, err) + klog.Warningf("can't connect to tidb cluster %s, err: %s", rm, err) return false, nil } db.Close() @@ -77,7 +77,7 @@ func (rm *RestoreManager) ProcessRestore() error { }) if err != nil { - glog.Errorf("cluster %s connect failed, err: %s", rm, err) + klog.Errorf("cluster %s connect failed, err: %s", rm, err) return rm.StatusUpdater.Update(restore, &v1alpha1.RestoreCondition{ Type: v1alpha1.RestoreFailed, Status: corev1.ConditionTrue, @@ -102,7 +102,7 @@ func (rm *RestoreManager) performRestore(restore *v1alpha1.Restore) error { restoreDataPath := rm.getRestoreDataPath() if err := rm.downloadBackupData(restoreDataPath); err != nil { - glog.Errorf("download cluster %s backup %s data failed, err: %s", rm, rm.BackupPath, err) + klog.Errorf("download cluster %s backup %s data failed, err: %s", rm, rm.BackupPath, err) return rm.StatusUpdater.Update(restore, &v1alpha1.RestoreCondition{ Type: v1alpha1.RestoreFailed, Status: corev1.ConditionTrue, @@ -110,12 +110,12 @@ func (rm *RestoreManager) performRestore(restore *v1alpha1.Restore) error { Message: fmt.Sprintf("download backup %s data failed, err: %v", rm.BackupPath, err), }) } - glog.Infof("download cluster %s backup %s data success", rm, rm.BackupPath) + klog.Infof("download cluster %s backup %s data success", rm, rm.BackupPath) restoreDataDir := filepath.Dir(restoreDataPath) unarchiveDataPath, err := unarchiveBackupData(restoreDataPath, restoreDataDir) if err != nil { - glog.Errorf("unarchive cluster %s backup %s data failed, err: %s", rm, restoreDataPath, err) + klog.Errorf("unarchive cluster %s backup %s data failed, err: %s", rm, restoreDataPath, err) return rm.StatusUpdater.Update(restore, &v1alpha1.RestoreCondition{ Type: v1alpha1.RestoreFailed, Status: corev1.ConditionTrue, @@ -123,11 +123,11 @@ func (rm *RestoreManager) performRestore(restore *v1alpha1.Restore) error { Message: fmt.Sprintf("unarchive backup %s data failed, err: %v", restoreDataPath, err), }) } - glog.Infof("unarchive cluster %s backup %s data success", rm, restoreDataPath) + klog.Infof("unarchive cluster %s backup %s data success", rm, restoreDataPath) err = rm.loadTidbClusterData(unarchiveDataPath) if err != nil { - glog.Errorf("restore cluster %s from backup %s failed, err: %s", rm, rm.BackupPath, err) + klog.Errorf("restore cluster %s from backup %s failed, err: %s", rm, rm.BackupPath, err) return rm.StatusUpdater.Update(restore, &v1alpha1.RestoreCondition{ Type: v1alpha1.RestoreFailed, Status: corev1.ConditionTrue, @@ -135,7 +135,7 @@ func (rm *RestoreManager) performRestore(restore *v1alpha1.Restore) error { Message: fmt.Sprintf("loader backup %s data failed, err: %v", restoreDataPath, err), }) } - glog.Infof("restore cluster %s from backup %s success", rm, rm.BackupPath) + klog.Infof("restore cluster %s from backup %s success", rm, rm.BackupPath) finish := time.Now() diff --git a/cmd/backup-manager/app/restore/manager.go b/cmd/backup-manager/app/restore/manager.go index 2154c1c5c1..ec034badcc 100644 --- a/cmd/backup-manager/app/restore/manager.go +++ b/cmd/backup-manager/app/restore/manager.go @@ -19,7 +19,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - glog "k8s.io/klog" + "k8s.io/klog" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1" @@ -48,7 +48,7 @@ func NewManager( func (rm *Manager) ProcessRestore() error { restore, err := rm.restoreLister.Restores(rm.Namespace).Get(rm.RestoreName) if err != nil { - glog.Errorf("can't find cluster %s restore %s CRD object, err: %v", rm, rm.RestoreName, err) + klog.Errorf("can't find cluster %s restore %s CRD object, err: %v", rm, rm.RestoreName, err) return rm.StatusUpdater.Update(restore, &v1alpha1.RestoreCondition{ Type: v1alpha1.RestoreFailed, Status: corev1.ConditionTrue, @@ -75,7 +75,7 @@ func (rm *Manager) performRestore(restore *v1alpha1.Restore) error { } if err := rm.restoreData(restore); err != nil { - glog.Errorf("restore cluster %s from %s failed, err: %s", rm, restore.Spec.Type, err) + klog.Errorf("restore cluster %s from %s failed, err: %s", rm, restore.Spec.Type, err) return rm.StatusUpdater.Update(restore, &v1alpha1.RestoreCondition{ Type: v1alpha1.RestoreFailed, Status: corev1.ConditionTrue, @@ -83,7 +83,7 @@ func (rm *Manager) performRestore(restore *v1alpha1.Restore) error { Message: err.Error(), }) } - glog.Infof("restore cluster %s from %s succeed", rm, restore.Spec.Type) + klog.Infof("restore cluster %s from %s succeed", rm, restore.Spec.Type) finish := time.Now() restore.Status.TimeStarted = metav1.Time{Time: started} diff --git a/cmd/backup-manager/app/restore/restore.go b/cmd/backup-manager/app/restore/restore.go index 200c013753..fac9d5e4f7 100644 --- a/cmd/backup-manager/app/restore/restore.go +++ b/cmd/backup-manager/app/restore/restore.go @@ -17,7 +17,7 @@ import ( "fmt" "os/exec" - glog "k8s.io/klog" + "k8s.io/klog" "github.com/pingcap/tidb-operator/cmd/backup-manager/app/util" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" @@ -48,12 +48,12 @@ func (ro *Options) restoreData(restore *v1alpha1.Restore) error { restoreType, } fullArgs = append(fullArgs, args...) - glog.Infof("Running br command with args: %v", fullArgs) + klog.Infof("Running br command with args: %v", fullArgs) output, err := exec.Command("br", fullArgs...).CombinedOutput() if err != nil { return fmt.Errorf("cluster %s, execute br command %v failed, output: %s, err: %v", ro, fullArgs, string(output), err) } - glog.Infof("Restore data for cluster %s successfully, output: %s", ro, string(output)) + klog.Infof("Restore data for cluster %s successfully, output: %s", ro, string(output)) return nil } diff --git a/cmd/backup-manager/app/util/k8s.go b/cmd/backup-manager/app/util/k8s.go index a0de91bf15..a10b5c1fed 100644 --- a/cmd/backup-manager/app/util/k8s.go +++ b/cmd/backup-manager/app/util/k8s.go @@ -22,13 +22,13 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/record" - glog "k8s.io/klog" + "k8s.io/klog" ) // NewEventRecorder return the specify source's recoder func NewEventRecorder(kubeCli kubernetes.Interface, source string) record.EventRecorder { eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&eventv1.EventSinkImpl{ Interface: eventv1.New(kubeCli.CoreV1().RESTClient()).Events("")}) recorder := eventBroadcaster.NewRecorder(v1alpha1.Scheme, corev1.EventSource{Component: source}) diff --git a/cmd/controller-manager/main.go b/cmd/controller-manager/main.go index dc87a1d62a..f3179d3614 100644 --- a/cmd/controller-manager/main.go +++ b/cmd/controller-manager/main.go @@ -46,7 +46,7 @@ import ( "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/client-go/tools/record" "k8s.io/component-base/logs" - glog "k8s.io/klog" + "k8s.io/klog" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -95,36 +95,36 @@ func main() { hostName, err := os.Hostname() if err != nil { - glog.Fatalf("failed to get hostname: %v", err) + klog.Fatalf("failed to get hostname: %v", err) } ns := os.Getenv("NAMESPACE") if ns == "" { - glog.Fatal("NAMESPACE environment variable not set") + klog.Fatal("NAMESPACE environment variable not set") } cfg, err := rest.InClusterConfig() if err != nil { - glog.Fatalf("failed to get config: %v", err) + klog.Fatalf("failed to get config: %v", err) } cli, err := versioned.NewForConfig(cfg) if err != nil { - glog.Fatalf("failed to create Clientset: %v", err) + klog.Fatalf("failed to create Clientset: %v", err) } var kubeCli kubernetes.Interface kubeCli, err = kubernetes.NewForConfig(cfg) if err != nil { - glog.Fatalf("failed to get kubernetes Clientset: %v", err) + klog.Fatalf("failed to get kubernetes Clientset: %v", err) } asCli, err := asclientset.NewForConfig(cfg) if err != nil { - glog.Fatalf("failed to get advanced-statefulset Clientset: %v", err) + klog.Fatalf("failed to get advanced-statefulset Clientset: %v", err) } // TODO: optimize the read of genericCli with the shared cache genericCli, err := client.New(cfg, client.Options{Scheme: scheme.Scheme}) if err != nil { - glog.Fatalf("failed to get the generic kube-apiserver client: %v", err) + klog.Fatalf("failed to get the generic kube-apiserver client: %v", err) } // note that kubeCli here must not be the hijacked one @@ -177,7 +177,7 @@ func main() { // Upgrade before running any controller logic. If it fails, we wait // for process supervisor to restart it again. if err := operatorUpgrader.Upgrade(); err != nil { - glog.Fatalf("failed to upgrade: %v", err) + klog.Fatalf("failed to upgrade: %v", err) } tcController := tidbcluster.NewController(kubeCli, cli, genericCli, informerFactory, kubeInformerFactory, autoFailover, pdFailoverPeriod, tikvFailoverPeriod, tidbFailoverPeriod) @@ -197,15 +197,15 @@ func main() { // Wait for all started informers' cache were synced. for v, synced := range informerFactory.WaitForCacheSync(wait.NeverStop) { if !synced { - glog.Fatalf("error syncing informer for %v", v) + klog.Fatalf("error syncing informer for %v", v) } } for v, synced := range kubeInformerFactory.WaitForCacheSync(wait.NeverStop) { if !synced { - glog.Fatalf("error syncing informer for %v", v) + klog.Fatalf("error syncing informer for %v", v) } } - glog.Infof("cache of informer factories sync successfully") + klog.Infof("cache of informer factories sync successfully") go wait.Forever(func() { backupController.Run(workers, ctx.Done()) }, waitDuration) go wait.Forever(func() { restoreController.Run(workers, ctx.Done()) }, waitDuration) @@ -218,7 +218,7 @@ func main() { wait.Forever(func() { tcController.Run(workers, ctx.Done()) }, waitDuration) } onStopped := func() { - glog.Fatalf("leader election lost") + klog.Fatalf("leader election lost") } // leader election for multiple tidb-controller-manager instances @@ -235,5 +235,5 @@ func main() { }) }, waitDuration) - glog.Fatal(http.ListenAndServe(":6060", nil)) + klog.Fatal(http.ListenAndServe(":6060", nil)) } diff --git a/cmd/discovery/main.go b/cmd/discovery/main.go index 8e890ee428..afe437be5d 100644 --- a/cmd/discovery/main.go +++ b/cmd/discovery/main.go @@ -27,7 +27,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/component-base/logs" - glog "k8s.io/klog" + "k8s.io/klog" ) var ( @@ -54,19 +54,19 @@ func main() { cfg, err := rest.InClusterConfig() if err != nil { - glog.Fatalf("failed to get config: %v", err) + klog.Fatalf("failed to get config: %v", err) } cli, err := versioned.NewForConfig(cfg) if err != nil { - glog.Fatalf("failed to create Clientset: %v", err) + klog.Fatalf("failed to create Clientset: %v", err) } kubeCli, err := kubernetes.NewForConfig(cfg) if err != nil { - glog.Fatalf("failed to get kubernetes Clientset: %v", err) + klog.Fatalf("failed to get kubernetes Clientset: %v", err) } go wait.Forever(func() { server.StartServer(cli, kubeCli, port) }, 5*time.Second) - glog.Fatal(http.ListenAndServe(":6060", nil)) + klog.Fatal(http.ListenAndServe(":6060", nil)) } diff --git a/cmd/scheduler/main.go b/cmd/scheduler/main.go index af310d45b3..e445119eaa 100644 --- a/cmd/scheduler/main.go +++ b/cmd/scheduler/main.go @@ -28,7 +28,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/component-base/logs" - glog "k8s.io/klog" + "k8s.io/klog" ) var ( @@ -56,19 +56,19 @@ func main() { cfg, err := rest.InClusterConfig() if err != nil { - glog.Fatalf("failed to get config: %v", err) + klog.Fatalf("failed to get config: %v", err) } kubeCli, err := kubernetes.NewForConfig(cfg) if err != nil { - glog.Fatalf("failed to get kubernetes Clientset: %v", err) + klog.Fatalf("failed to get kubernetes Clientset: %v", err) } cli, err := versioned.NewForConfig(cfg) if err != nil { - glog.Fatalf("failed to create Clientset: %v", err) + klog.Fatalf("failed to create Clientset: %v", err) } go wait.Forever(func() { server.StartServer(kubeCli, cli, port) }, 5*time.Second) - glog.Fatal(http.ListenAndServe(":6060", nil)) + klog.Fatal(http.ListenAndServe(":6060", nil)) } diff --git a/pkg/apiserver/cmd/start.go b/pkg/apiserver/cmd/start.go index c373d84da9..b257c4dffa 100644 --- a/pkg/apiserver/cmd/start.go +++ b/pkg/apiserver/cmd/start.go @@ -123,7 +123,7 @@ func NewCommandStartServer(builders []*builders.APIGroupBuilder, stopCh <-chan s klog.InitFlags(klogFlags) flags.AddGoFlagSet(klogFlags) - // Sync the glog and klog flags. + // Sync the klog and klog flags. klogFlags.VisitAll(func(f *flag.Flag) { goFlag := flag.CommandLine.Lookup(f.Name) if goFlag != nil { diff --git a/pkg/apiserver/storage/apiserver.go b/pkg/apiserver/storage/apiserver.go index 5af65ed273..ed6169e364 100644 --- a/pkg/apiserver/storage/apiserver.go +++ b/pkg/apiserver/storage/apiserver.go @@ -21,7 +21,7 @@ import ( "k8s.io/apiserver/pkg/registry/generic" "k8s.io/apiserver/pkg/storage/storagebackend/factory" "k8s.io/client-go/rest" - glog "k8s.io/klog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/storage" @@ -59,7 +59,7 @@ func (f *ApiServerRestOptionsFactory) newApiServerStorageDecorator() generic.Sto ) (storage.Interface, factory.DestroyFunc, error) { cli, err := versioned.NewForConfig(f.RestConfig) if err != nil { - glog.Fatalf("failed to create Clientset: %v", err) + klog.Fatalf("failed to create Clientset: %v", err) } objectType := newFunc() return NewApiServerStore(cli, f.Codec, f.StorageNamespace, objectType, newListFunc) diff --git a/pkg/backup/backup/backup_cleaner.go b/pkg/backup/backup/backup_cleaner.go index 4b90d5874b..1c9baca499 100644 --- a/pkg/backup/backup/backup_cleaner.go +++ b/pkg/backup/backup/backup_cleaner.go @@ -26,7 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" batchlisters "k8s.io/client-go/listers/batch/v1" corelisters "k8s.io/client-go/listers/core/v1" - glog "k8s.io/klog" + "k8s.io/klog" ) // BackupCleaner implements the logic for cleaning backup @@ -63,7 +63,7 @@ func (bc *backupCleaner) Clean(backup *v1alpha1.Backup) error { ns := backup.GetNamespace() name := backup.GetName() - glog.Infof("start to clean backup %s/%s", ns, name) + klog.Infof("start to clean backup %s/%s", ns, name) cleanJobName := backup.GetCleanJobName() _, err := bc.jobLister.Jobs(ns).Get(cleanJobName) diff --git a/pkg/backup/backupschedule/backup_schedule_manager.go b/pkg/backup/backupschedule/backup_schedule_manager.go index 9725ac21e5..e2fe7e92c9 100644 --- a/pkg/backup/backupschedule/backup_schedule_manager.go +++ b/pkg/backup/backupschedule/backup_schedule_manager.go @@ -30,7 +30,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" batchlisters "k8s.io/client-go/listers/batch/v1" - glog "k8s.io/klog" + "k8s.io/klog" ) type backupScheduleManager struct { @@ -160,7 +160,7 @@ func getLastScheduledTime(bs *v1alpha1.BackupSchedule) (*time.Time, error) { now := time.Now() if earliestTime.After(now) { // timestamp fallback, waiting for the next backup schedule period - glog.Errorf("backup schedule %s/%s timestamp fallback, lastBackupTime: %s, now: %s", + klog.Errorf("backup schedule %s/%s timestamp fallback, lastBackupTime: %s, now: %s", ns, bsName, earliestTime.Format(time.RFC3339), now.Format(time.RFC3339)) return nil, nil } @@ -184,13 +184,13 @@ func getLastScheduledTime(bs *v1alpha1.BackupSchedule) (*time.Time, error) { bs.Status.AllBackupCleanTime = &metav1.Time{Time: time.Now()} return nil, controller.RequeueErrorf("recovery backup schedule %s/%s from pause status, refresh AllBackupCleanTime.", ns, bsName) } - glog.Errorf("Too many missed start backup schedule time (> 100). Check the clock.") + klog.Errorf("Too many missed start backup schedule time (> 100). Check the clock.") return nil, nil } } if len(scheduledTimes) == 0 { - glog.V(4).Infof("unmet backup schedule %s/%s start time, waiting for the next backup schedule period", ns, bsName) + klog.V(4).Infof("unmet backup schedule %s/%s start time, waiting for the next backup schedule period", ns, bsName) return nil, nil } scheduledTime := scheduledTimes[len(scheduledTimes)-1] @@ -253,7 +253,7 @@ func (bm *backupScheduleManager) backupGC(bs *v1alpha1.BackupSchedule) { return } // TODO: When the backup schedule gc policy is not set, we should set a default backup gc policy. - glog.Warningf("backup schedule %s/%s does not set backup gc policy", ns, bsName) + klog.Warningf("backup schedule %s/%s does not set backup gc policy", ns, bsName) } func (bm *backupScheduleManager) backupGCByMaxReservedTime(bs *v1alpha1.BackupSchedule) { @@ -262,13 +262,13 @@ func (bm *backupScheduleManager) backupGCByMaxReservedTime(bs *v1alpha1.BackupSc reservedTime, err := time.ParseDuration(*bs.Spec.MaxReservedTime) if err != nil { - glog.Errorf("backup schedule %s/%s, invalid MaxReservedTime %s", ns, bsName, *bs.Spec.MaxReservedTime) + klog.Errorf("backup schedule %s/%s, invalid MaxReservedTime %s", ns, bsName, *bs.Spec.MaxReservedTime) return } backupsList, err := bm.getBackupList(bs, false) if err != nil { - glog.Errorf("backupGCByMaxReservedTime, err: %s", err) + klog.Errorf("backupGCByMaxReservedTime, err: %s", err) return } @@ -279,11 +279,11 @@ func (bm *backupScheduleManager) backupGCByMaxReservedTime(bs *v1alpha1.BackupSc } // delete the expired backup if err := bm.backupControl.DeleteBackup(backup); err != nil { - glog.Errorf("backup schedule %s/%s gc backup %s failed, err %v", ns, bsName, backup.GetName(), err) + klog.Errorf("backup schedule %s/%s gc backup %s failed, err %v", ns, bsName, backup.GetName(), err) return } deleteCount += 1 - glog.Infof("backup schedule %s/%s gc backup %s success", ns, bsName, backup.GetName()) + klog.Infof("backup schedule %s/%s gc backup %s success", ns, bsName, backup.GetName()) } if deleteCount == len(backupsList) { @@ -300,7 +300,7 @@ func (bm *backupScheduleManager) backupGCByMaxBackups(bs *v1alpha1.BackupSchedul backupsList, err := bm.getBackupList(bs, true) if err != nil { - glog.Errorf("backupGCByMaxBackups failed, err: %s", err) + klog.Errorf("backupGCByMaxBackups failed, err: %s", err) return } @@ -311,11 +311,11 @@ func (bm *backupScheduleManager) backupGCByMaxBackups(bs *v1alpha1.BackupSchedul } // delete the backup if err := bm.backupControl.DeleteBackup(backup); err != nil { - glog.Errorf("backup schedule %s/%s gc backup %s failed, err %v", ns, bsName, backup.GetName(), err) + klog.Errorf("backup schedule %s/%s gc backup %s failed, err %v", ns, bsName, backup.GetName(), err) return } deleteCount += 1 - glog.Infof("backup schedule %s/%s gc backup %s success", ns, bsName, backup.GetName()) + klog.Infof("backup schedule %s/%s gc backup %s success", ns, bsName, backup.GetName()) } if deleteCount == len(backupsList) { diff --git a/pkg/controller/backup/backup_controller.go b/pkg/controller/backup/backup_controller.go index dd2ec48e1a..6a5cb48704 100644 --- a/pkg/controller/backup/backup_controller.go +++ b/pkg/controller/backup/backup_controller.go @@ -34,7 +34,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" - glog "k8s.io/klog" + "k8s.io/klog" ) // Controller controls backup. @@ -62,7 +62,7 @@ func NewController( kubeInformerFactory kubeinformers.SharedInformerFactory, ) *Controller { eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&eventv1.EventSinkImpl{ Interface: eventv1.New(kubeCli.CoreV1().RESTClient()).Events("")}) recorder := eventBroadcaster.NewRecorder(v1alpha1.Scheme, corev1.EventSource{Component: "backup"}) @@ -115,8 +115,8 @@ func (bkc *Controller) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer bkc.queue.ShutDown() - glog.Info("Starting backup controller") - defer glog.Info("Shutting down backup controller") + klog.Info("Starting backup controller") + defer klog.Info("Shutting down backup controller") for i := 0; i < workers; i++ { go wait.Until(bkc.worker, time.Second, stopCh) @@ -142,10 +142,10 @@ func (bkc *Controller) processNextWorkItem() bool { defer bkc.queue.Done(key) if err := bkc.sync(key.(string)); err != nil { if perrors.Find(err, controller.IsRequeueError) != nil { - glog.Infof("Backup: %v, still need sync: %v, requeuing", key.(string), err) + klog.Infof("Backup: %v, still need sync: %v, requeuing", key.(string), err) bkc.queue.AddRateLimited(key) } else if perrors.Find(err, controller.IsIgnoreError) != nil { - glog.V(4).Infof("Backup: %v, ignore err: %v", key.(string), err) + klog.V(4).Infof("Backup: %v, ignore err: %v", key.(string), err) } else { utilruntime.HandleError(fmt.Errorf("Backup: %v, sync failed, err: %v, requeuing", key.(string), err)) bkc.queue.AddRateLimited(key) @@ -160,7 +160,7 @@ func (bkc *Controller) processNextWorkItem() bool { func (bkc *Controller) sync(key string) error { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing Backup %q (%v)", key, time.Since(startTime)) + klog.V(4).Infof("Finished syncing Backup %q (%v)", key, time.Since(startTime)) }() ns, name, err := cache.SplitMetaNamespaceKey(key) @@ -169,7 +169,7 @@ func (bkc *Controller) sync(key string) error { } backup, err := bkc.backupLister.Backups(ns).Get(name) if errors.IsNotFound(err) { - glog.Infof("Backup has been deleted %v", key) + klog.Infof("Backup has been deleted %v", key) return nil } if err != nil { @@ -190,27 +190,27 @@ func (bkc *Controller) updateBackup(cur interface{}) { if newBackup.DeletionTimestamp != nil { // the backup is being deleted, we need to do some cleanup work, enqueue backup. - glog.Infof("backup %s/%s is being deleted", ns, name) + klog.Infof("backup %s/%s is being deleted", ns, name) bkc.enqueueBackup(newBackup) return } if v1alpha1.IsBackupInvalid(newBackup) { - glog.V(4).Infof("backup %s/%s is invalid, skipping.", ns, name) + klog.V(4).Infof("backup %s/%s is invalid, skipping.", ns, name) return } if v1alpha1.IsBackupComplete(newBackup) { - glog.V(4).Infof("backup %s/%s is Complete, skipping.", ns, name) + klog.V(4).Infof("backup %s/%s is Complete, skipping.", ns, name) return } if v1alpha1.IsBackupScheduled(newBackup) { - glog.V(4).Infof("backup %s/%s is already scheduled, skipping", ns, name) + klog.V(4).Infof("backup %s/%s is already scheduled, skipping", ns, name) return } - glog.V(4).Infof("backup object %s/%s enqueue", ns, name) + klog.V(4).Infof("backup object %s/%s enqueue", ns, name) bkc.enqueueBackup(newBackup) } diff --git a/pkg/controller/backup_control.go b/pkg/controller/backup_control.go index 7173213ffa..4052b2ab41 100644 --- a/pkg/controller/backup_control.go +++ b/pkg/controller/backup_control.go @@ -38,7 +38,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" - glog "k8s.io/klog" + "k8s.io/klog" ) // BackupControlInterface manages Backups used in BackupSchedule @@ -70,9 +70,9 @@ func (rbc *realBackupControl) CreateBackup(backup *v1alpha1.Backup) (*v1alpha1.B bsName := backup.GetLabels()[label.BackupScheduleLabelKey] backup, err := rbc.cli.PingcapV1alpha1().Backups(ns).Create(backup) if err != nil { - glog.Errorf("failed to create Backup: [%s/%s] for backupSchedule/%s, err: %v", ns, backupName, bsName, err) + klog.Errorf("failed to create Backup: [%s/%s] for backupSchedule/%s, err: %v", ns, backupName, bsName, err) } else { - glog.V(4).Infof("create Backup: [%s/%s] for backupSchedule/%s successfully", ns, backupName, bsName) + klog.V(4).Infof("create Backup: [%s/%s] for backupSchedule/%s successfully", ns, backupName, bsName) } rbc.recordBackupEvent("create", backup, err) return backup, err @@ -85,9 +85,9 @@ func (rbc *realBackupControl) DeleteBackup(backup *v1alpha1.Backup) error { bsName := backup.GetLabels()[label.BackupScheduleLabelKey] err := rbc.cli.PingcapV1alpha1().Backups(ns).Delete(backupName, nil) if err != nil { - glog.Errorf("failed to delete Backup: [%s/%s] for backupSchedule/%s, err: %v", ns, backupName, bsName, err) + klog.Errorf("failed to delete Backup: [%s/%s] for backupSchedule/%s, err: %v", ns, backupName, bsName, err) } else { - glog.V(4).Infof("delete backup: [%s/%s] successfully, backupSchedule/%s", ns, backupName, bsName) + klog.V(4).Infof("delete backup: [%s/%s] successfully, backupSchedule/%s", ns, backupName, bsName) } rbc.recordBackupEvent("delete", backup, err) return err diff --git a/pkg/controller/backup_schedule_status_updater.go b/pkg/controller/backup_schedule_status_updater.go index 9e633f9306..14cfdc0a83 100644 --- a/pkg/controller/backup_schedule_status_updater.go +++ b/pkg/controller/backup_schedule_status_updater.go @@ -17,7 +17,7 @@ import ( "fmt" "strings" - glog "k8s.io/klog" + "k8s.io/klog" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" @@ -68,7 +68,7 @@ func (bss *realBackupScheduleStatusUpdater) UpdateBackupScheduleStatus( err := retry.RetryOnConflict(retry.DefaultRetry, func() error { _, updateErr := bss.cli.PingcapV1alpha1().BackupSchedules(ns).Update(bs) if updateErr == nil { - glog.Infof("BackupSchedule: [%s/%s] updated successfully", ns, bsName) + klog.Infof("BackupSchedule: [%s/%s] updated successfully", ns, bsName) return nil } if updated, err := bss.bsLister.BackupSchedules(ns).Get(bsName); err == nil { diff --git a/pkg/controller/backup_status_updater.go b/pkg/controller/backup_status_updater.go index 9a6490d24d..c35d34a1b8 100644 --- a/pkg/controller/backup_status_updater.go +++ b/pkg/controller/backup_status_updater.go @@ -17,7 +17,7 @@ import ( "fmt" "strings" - glog "k8s.io/klog" + "k8s.io/klog" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" @@ -63,7 +63,7 @@ func (bcu *realBackupConditionUpdater) Update(backup *v1alpha1.Backup, condition if isUpdate { _, updateErr := bcu.cli.PingcapV1alpha1().Backups(ns).Update(backup) if updateErr == nil { - glog.Infof("Backup: [%s/%s] updated successfully", ns, backupName) + klog.Infof("Backup: [%s/%s] updated successfully", ns, backupName) return nil } if updated, err := bcu.backupLister.Backups(ns).Get(backupName); err == nil { diff --git a/pkg/controller/backupschedule/backup_schedule_controller.go b/pkg/controller/backupschedule/backup_schedule_controller.go index fd6c4177cd..ce0f2d84d7 100644 --- a/pkg/controller/backupschedule/backup_schedule_controller.go +++ b/pkg/controller/backupschedule/backup_schedule_controller.go @@ -34,7 +34,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" - glog "k8s.io/klog" + "k8s.io/klog" ) // Controller controls restore. @@ -62,7 +62,7 @@ func NewController( kubeInformerFactory kubeinformers.SharedInformerFactory, ) *Controller { eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&eventv1.EventSinkImpl{ Interface: eventv1.New(kubeCli.CoreV1().RESTClient()).Events("")}) recorder := eventBroadcaster.NewRecorder(v1alpha1.Scheme, corev1.EventSource{Component: "backupSchedule"}) @@ -111,8 +111,8 @@ func (bsc *Controller) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer bsc.queue.ShutDown() - glog.Info("Starting backup schedule controller") - defer glog.Info("Shutting down backup schedule controller") + klog.Info("Starting backup schedule controller") + defer klog.Info("Shutting down backup schedule controller") for i := 0; i < workers; i++ { go wait.Until(bsc.worker, time.Second, stopCh) @@ -138,10 +138,10 @@ func (bsc *Controller) processNextWorkItem() bool { defer bsc.queue.Done(key) if err := bsc.sync(key.(string)); err != nil { if perrors.Find(err, controller.IsRequeueError) != nil { - glog.Infof("BackupSchedule: %v, still need sync: %v, requeuing", key.(string), err) + klog.Infof("BackupSchedule: %v, still need sync: %v, requeuing", key.(string), err) bsc.queue.AddRateLimited(key) } else if perrors.Find(err, controller.IsIgnoreError) != nil { - glog.V(4).Infof("BackupSchedule: %v, ignore err: %v, waiting for the next sync", key.(string), err) + klog.V(4).Infof("BackupSchedule: %v, ignore err: %v, waiting for the next sync", key.(string), err) } else { utilruntime.HandleError(fmt.Errorf("BackupSchedule: %v, sync failed, err: %v, requeuing", key.(string), err)) bsc.queue.AddRateLimited(key) @@ -156,7 +156,7 @@ func (bsc *Controller) processNextWorkItem() bool { func (bsc *Controller) sync(key string) error { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing BackupSchedule %q (%v)", key, time.Since(startTime)) + klog.V(4).Infof("Finished syncing BackupSchedule %q (%v)", key, time.Since(startTime)) }() ns, name, err := cache.SplitMetaNamespaceKey(key) @@ -165,7 +165,7 @@ func (bsc *Controller) sync(key string) error { } bs, err := bsc.bsLister.BackupSchedules(ns).Get(name) if errors.IsNotFound(err) { - glog.Infof("BackupSchedule has been deleted %v", key) + klog.Infof("BackupSchedule has been deleted %v", key) return nil } if err != nil { diff --git a/pkg/controller/cert_control.go b/pkg/controller/cert_control.go index 86c25f17d3..62f709679a 100644 --- a/pkg/controller/cert_control.go +++ b/pkg/controller/cert_control.go @@ -27,7 +27,7 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/client-go/kubernetes" certlisters "k8s.io/client-go/listers/certificates/v1beta1" - glog "k8s.io/klog" + "k8s.io/klog" ) // TiDBClusterCertOptions contains information needed to create new certificates @@ -78,7 +78,7 @@ func (rcc *realCertControl) Create(or metav1.OwnerReference, certOpts *TiDBClust // generate certificate if not exist if rcc.secControl.Check(certOpts.Namespace, csrName) { - glog.Infof("Secret %s already exist, reusing the key pair. TidbCluster: %s/%s", csrName, certOpts.Namespace, csrName) + klog.Infof("Secret %s already exist, reusing the key pair. TidbCluster: %s/%s", csrName, certOpts.Namespace, csrName) return nil } @@ -108,7 +108,7 @@ func (rcc *realCertControl) Create(or metav1.OwnerReference, certOpts *TiDBClust csrCh, err := rcc.kubeCli.CertificatesV1beta1().CertificateSigningRequests().Watch(watchReq) if err != nil { - glog.Errorf("error watch CSR for [%s/%s]: %s", certOpts.Namespace, certOpts.Instance, csrName) + klog.Errorf("error watch CSR for [%s/%s]: %s", certOpts.Namespace, certOpts.Instance, csrName) return err } @@ -116,7 +116,7 @@ func (rcc *realCertControl) Create(or metav1.OwnerReference, certOpts *TiDBClust for { select { case <-tick: - glog.Infof("CSR still not approved for [%s/%s]: %s, retry later", certOpts.Namespace, certOpts.Instance, csrName) + klog.Infof("CSR still not approved for [%s/%s]: %s, retry later", certOpts.Namespace, certOpts.Instance, csrName) continue case event, ok := <-watchCh: if !ok { @@ -133,7 +133,7 @@ func (rcc *realCertControl) Create(or metav1.OwnerReference, certOpts *TiDBClust if updatedCSR.UID == csr.UID && approveCond == capi.CertificateApproved && updatedCSR.Status.Certificate != nil { - glog.Infof("signed certificate for [%s/%s]: %s", certOpts.Namespace, certOpts.Instance, csrName) + klog.Infof("signed certificate for [%s/%s]: %s", certOpts.Namespace, certOpts.Instance, csrName) // save signed certificate and key to secret err = rcc.secControl.Create(or, certOpts, updatedCSR.Status.Certificate, key) @@ -179,13 +179,13 @@ func (rcc *realCertControl) sendCSR(or metav1.OwnerReference, ns, instance strin } if csr != nil { - glog.Infof("found exist CSR %s/%s created by tidb-operator, overwriting", ns, csrName) + klog.Infof("found exist CSR %s/%s created by tidb-operator, overwriting", ns, csrName) delOpts := &types.DeleteOptions{TypeMeta: types.TypeMeta{Kind: "CertificateSigningRequest"}} err := rcc.kubeCli.CertificatesV1beta1().CertificateSigningRequests().Delete(csrName, delOpts) if err != nil { return nil, fmt.Errorf("failed to delete exist old CSR for [%s/%s]: %s, error: %v", ns, instance, csrName, err) } - glog.Infof("exist old CSR deleted for [%s/%s]: %s", ns, instance, csrName) + klog.Infof("exist old CSR deleted for [%s/%s]: %s", ns, instance, csrName) return rcc.sendCSR(or, ns, instance, rawCSR, csrName) } @@ -214,7 +214,7 @@ func (rcc *realCertControl) sendCSR(or metav1.OwnerReference, ns, instance strin if err != nil { return resp, fmt.Errorf("failed to create CSR for [%s/%s]: %s, error: %v", ns, instance, csrName, err) } - glog.Infof("CSR created for [%s/%s]: %s", ns, instance, csrName) + klog.Infof("CSR created for [%s/%s]: %s", ns, instance, csrName) return resp, nil } diff --git a/pkg/controller/controller_utils.go b/pkg/controller/controller_utils.go index 2f6ef3de40..6d91a7c895 100644 --- a/pkg/controller/controller_utils.go +++ b/pkg/controller/controller_utils.go @@ -32,7 +32,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/retry" "k8s.io/client-go/util/workqueue" - glog "k8s.io/klog" + "k8s.io/klog" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -218,7 +218,7 @@ func TiKVCapacity(limits corev1.ResourceList) string { } i, b := q.AsInt64() if !b { - glog.Errorf("quantity %s can't be converted to int64", q.String()) + klog.Errorf("quantity %s can't be converted to int64", q.String()) return defaultArgs } if i%humanize.GiByte == 0 { @@ -436,7 +436,7 @@ func WatchForController(informer cache.SharedIndexInformer, q workqueue.Interfac controllerObj, err := fn(meta.GetNamespace(), ref.Name) if err != nil { if errors.IsNotFound(err) { - glog.V(4).Infof("controller %s/%s of %s/%s not found, ignore", + klog.V(4).Infof("controller %s/%s of %s/%s not found, ignore", meta.GetNamespace(), ref.Name, meta.GetNamespace(), meta.GetName()) } else { utilruntime.HandleError(fmt.Errorf("cannot get controller %s/%s of %s/%s", diff --git a/pkg/controller/general_pvc_control.go b/pkg/controller/general_pvc_control.go index b3bb84925e..b76b0cf5b0 100644 --- a/pkg/controller/general_pvc_control.go +++ b/pkg/controller/general_pvc_control.go @@ -25,7 +25,7 @@ import ( corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" - glog "k8s.io/klog" + "k8s.io/klog" ) // GeneralPVCControlInterface manages PVCs used in backup and restore's pvc @@ -57,9 +57,9 @@ func (gpc *realGeneralPVCControl) CreatePVC(object runtime.Object, pvc *corev1.P _, err := gpc.kubeCli.CoreV1().PersistentVolumeClaims(ns).Create(pvc) if err != nil { - glog.Errorf("failed to create pvc: [%s/%s], %s: %s, %v", ns, pvcName, kind, instanceName, err) + klog.Errorf("failed to create pvc: [%s/%s], %s: %s, %v", ns, pvcName, kind, instanceName, err) } else { - glog.V(4).Infof("create pvc: [%s/%s] successfully, %s: %s", ns, pvcName, kind, instanceName) + klog.V(4).Infof("create pvc: [%s/%s] successfully, %s: %s", ns, pvcName, kind, instanceName) } gpc.recordPVCEvent("create", object, pvc, err) return err diff --git a/pkg/controller/job_control.go b/pkg/controller/job_control.go index db38ba55fa..b978f0f5ce 100644 --- a/pkg/controller/job_control.go +++ b/pkg/controller/job_control.go @@ -27,7 +27,7 @@ import ( batchlisters "k8s.io/client-go/listers/batch/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" - glog "k8s.io/klog" + "k8s.io/klog" ) // JobControlInterface manages Jobs used in backup、restore and clean @@ -60,9 +60,9 @@ func (rjc *realJobControl) CreateJob(object runtime.Object, job *batchv1.Job) er _, err := rjc.kubeCli.BatchV1().Jobs(ns).Create(job) if err != nil { - glog.Errorf("failed to create %s job: [%s/%s], cluster: %s, err: %v", strings.ToLower(kind), ns, jobName, instanceName, err) + klog.Errorf("failed to create %s job: [%s/%s], cluster: %s, err: %v", strings.ToLower(kind), ns, jobName, instanceName, err) } else { - glog.V(4).Infof("create %s job: [%s/%s] successfully, cluster: %s", strings.ToLower(kind), ns, jobName, instanceName) + klog.V(4).Infof("create %s job: [%s/%s] successfully, cluster: %s", strings.ToLower(kind), ns, jobName, instanceName) } rjc.recordJobEvent("create", object, job, err) return err @@ -80,9 +80,9 @@ func (rjc *realJobControl) DeleteJob(object runtime.Object, job *batchv1.Job) er } err := rjc.kubeCli.BatchV1().Jobs(ns).Delete(jobName, opts) if err != nil { - glog.Errorf("failed to delete %s job: [%s/%s], cluster: %s, err: %v", strings.ToLower(kind), ns, jobName, instanceName, err) + klog.Errorf("failed to delete %s job: [%s/%s], cluster: %s, err: %v", strings.ToLower(kind), ns, jobName, instanceName, err) } else { - glog.V(4).Infof("delete %s job: [%s/%s] successfully, cluster: %s", strings.ToLower(kind), ns, jobName, instanceName) + klog.V(4).Infof("delete %s job: [%s/%s] successfully, cluster: %s", strings.ToLower(kind), ns, jobName, instanceName) } rjc.recordJobEvent("delete", object, job, err) return err diff --git a/pkg/controller/pod_control.go b/pkg/controller/pod_control.go index 06be0d3efd..f372ba8560 100644 --- a/pkg/controller/pod_control.go +++ b/pkg/controller/pod_control.go @@ -30,7 +30,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" - glog "k8s.io/klog" + "k8s.io/klog" ) // PodControlInterface manages Pods used in TidbCluster @@ -77,10 +77,10 @@ func (rpc *realPodControl) UpdatePod(tc *v1alpha1.TidbCluster, pod *corev1.Pod) var updateErr error updatePod, updateErr = rpc.kubeCli.CoreV1().Pods(ns).Update(pod) if updateErr == nil { - glog.Infof("Pod: [%s/%s] updated successfully, TidbCluster: [%s/%s]", ns, podName, ns, tcName) + klog.Infof("Pod: [%s/%s] updated successfully, TidbCluster: [%s/%s]", ns, podName, ns, tcName) return nil } - glog.Errorf("failed to update Pod: [%s/%s], error: %v", ns, podName, updateErr) + klog.Errorf("failed to update Pod: [%s/%s], error: %v", ns, podName, updateErr) if updated, err := rpc.podLister.Pods(ns).Get(podName); err == nil { // make a copy so we don't mutate the shared cache @@ -156,7 +156,7 @@ func (rpc *realPodControl) UpdateMetaInfo(tc *v1alpha1.TidbCluster, pod *corev1. if labels[label.ClusterIDLabelKey] == clusterID && labels[label.MemberIDLabelKey] == memberID && labels[label.StoreIDLabelKey] == storeID { - glog.V(4).Infof("pod %s/%s already has cluster labels set, skipping. TidbCluster: %s", ns, podName, tcName) + klog.V(4).Infof("pod %s/%s already has cluster labels set, skipping. TidbCluster: %s", ns, podName, tcName) return pod, nil } // labels is a pointer, modify labels will modify pod.Labels @@ -169,10 +169,10 @@ func (rpc *realPodControl) UpdateMetaInfo(tc *v1alpha1.TidbCluster, pod *corev1. var updateErr error updatePod, updateErr = rpc.kubeCli.CoreV1().Pods(ns).Update(pod) if updateErr == nil { - glog.V(4).Infof("update pod %s/%s with cluster labels %v successfully, TidbCluster: %s", ns, podName, labels, tcName) + klog.V(4).Infof("update pod %s/%s with cluster labels %v successfully, TidbCluster: %s", ns, podName, labels, tcName) return nil } - glog.Errorf("failed to update pod %s/%s with cluster labels %v, TidbCluster: %s, err: %v", ns, podName, labels, tcName, updateErr) + klog.Errorf("failed to update pod %s/%s with cluster labels %v, TidbCluster: %s, err: %v", ns, podName, labels, tcName, updateErr) if updated, err := rpc.podLister.Pods(ns).Get(podName); err == nil { // make a copy so we don't mutate the shared cache @@ -195,9 +195,9 @@ func (rpc *realPodControl) DeletePod(tc *v1alpha1.TidbCluster, pod *corev1.Pod) deleteOptions := metav1.DeleteOptions{Preconditions: &preconditions} err := rpc.kubeCli.CoreV1().Pods(ns).Delete(podName, &deleteOptions) if err != nil { - glog.Errorf("failed to delete Pod: [%s/%s], TidbCluster: %s, %v", ns, podName, tcName, err) + klog.Errorf("failed to delete Pod: [%s/%s], TidbCluster: %s, %v", ns, podName, tcName, err) } else { - glog.V(4).Infof("delete Pod: [%s/%s] successfully, TidbCluster: %s", ns, podName, tcName) + klog.V(4).Infof("delete Pod: [%s/%s] successfully, TidbCluster: %s", ns, podName, tcName) } rpc.recordPodEvent("delete", tc, podName, err) return err diff --git a/pkg/controller/pv_control.go b/pkg/controller/pv_control.go index 66d0e2a4e7..c76562560f 100644 --- a/pkg/controller/pv_control.go +++ b/pkg/controller/pv_control.go @@ -30,7 +30,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" - glog "k8s.io/klog" + "k8s.io/klog" ) // PVControlInterface manages PVs used in TidbCluster @@ -98,7 +98,7 @@ func (rpc *realPVControl) UpdateMetaInfo(obj runtime.Object, pv *corev1.Persiste pvName := pv.GetName() pvcRef := pv.Spec.ClaimRef if pvcRef == nil { - glog.Warningf("PV: [%s] doesn't have a ClaimRef, skipping, %s: %s/%s", kind, pvName, ns, name) + klog.Warningf("PV: [%s] doesn't have a ClaimRef, skipping, %s: %s/%s", kind, pvName, ns, name) return pv, nil } @@ -109,7 +109,7 @@ func (rpc *realPVControl) UpdateMetaInfo(obj runtime.Object, pv *corev1.Persiste return pv, err } - glog.Warningf("PV: [%s]'s PVC: [%s/%s] doesn't exist, skipping. %s: %s", pvName, ns, pvcName, kind, name) + klog.Warningf("PV: [%s]'s PVC: [%s/%s] doesn't exist, skipping. %s: %s", pvName, ns, pvcName, kind, name) return pv, nil } @@ -128,7 +128,7 @@ func (rpc *realPVControl) UpdateMetaInfo(obj runtime.Object, pv *corev1.Persiste pv.Labels[label.MemberIDLabelKey] == memberID && pv.Labels[label.StoreIDLabelKey] == storeID && pv.Annotations[label.AnnPodNameKey] == podName { - glog.V(4).Infof("pv %s already has labels and annotations synced, skipping. %s: %s/%s", pvName, kind, ns, name) + klog.V(4).Infof("pv %s already has labels and annotations synced, skipping. %s: %s/%s", pvName, kind, ns, name) return pv, nil } @@ -150,10 +150,10 @@ func (rpc *realPVControl) UpdateMetaInfo(obj runtime.Object, pv *corev1.Persiste var updateErr error updatePV, updateErr = rpc.kubeCli.CoreV1().PersistentVolumes().Update(pv) if updateErr == nil { - glog.Infof("PV: [%s] updated successfully, %s: %s/%s", pvName, kind, ns, name) + klog.Infof("PV: [%s] updated successfully, %s: %s/%s", pvName, kind, ns, name) return nil } - glog.Errorf("failed to update PV: [%s], %s %s/%s, error: %v", pvName, kind, ns, name, err) + klog.Errorf("failed to update PV: [%s], %s %s/%s, error: %v", pvName, kind, ns, name, err) if updated, err := rpc.pvLister.Get(pvName); err == nil { // make a copy so we don't mutate the shared cache diff --git a/pkg/controller/pvc_control.go b/pkg/controller/pvc_control.go index 2ec2ad51e1..5b16eaadea 100644 --- a/pkg/controller/pvc_control.go +++ b/pkg/controller/pvc_control.go @@ -27,7 +27,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" - glog "k8s.io/klog" + "k8s.io/klog" ) // TODO add unit tests @@ -68,9 +68,9 @@ func (rpc *realPVCControl) DeletePVC(tc *v1alpha1.TidbCluster, pvc *corev1.Persi pvcName := pvc.GetName() err := rpc.kubeCli.CoreV1().PersistentVolumeClaims(tc.GetNamespace()).Delete(pvcName, nil) if err != nil { - glog.Errorf("failed to delete PVC: [%s/%s], TidbCluster: %s, %v", ns, pvcName, tcName, err) + klog.Errorf("failed to delete PVC: [%s/%s], TidbCluster: %s, %v", ns, pvcName, tcName, err) } - glog.V(4).Infof("delete PVC: [%s/%s] successfully, TidbCluster: %s", ns, pvcName, tcName) + klog.V(4).Infof("delete PVC: [%s/%s] successfully, TidbCluster: %s", ns, pvcName, tcName) rpc.recordPVCEvent("delete", tc, pvcName, err) return err } @@ -87,10 +87,10 @@ func (rpc *realPVCControl) UpdatePVC(tc *v1alpha1.TidbCluster, pvc *corev1.Persi var updateErr error updatePVC, updateErr = rpc.kubeCli.CoreV1().PersistentVolumeClaims(ns).Update(pvc) if updateErr == nil { - glog.Infof("update PVC: [%s/%s] successfully, TidbCluster: %s", ns, pvcName, tcName) + klog.Infof("update PVC: [%s/%s] successfully, TidbCluster: %s", ns, pvcName, tcName) return nil } - glog.Errorf("failed to update PVC: [%s/%s], TidbCluster: %s, error: %v", ns, pvcName, tcName, updateErr) + klog.Errorf("failed to update PVC: [%s/%s], TidbCluster: %s, error: %v", ns, pvcName, tcName, updateErr) if updated, err := rpc.pvcLister.PersistentVolumeClaims(ns).Get(pvcName); err == nil { // make a copy so we don't mutate the shared cache @@ -128,7 +128,7 @@ func (rpc *realPVCControl) UpdateMetaInfo(tc *v1alpha1.TidbCluster, pvc *corev1. pvc.Labels[label.MemberIDLabelKey] == memberID && pvc.Labels[label.StoreIDLabelKey] == storeID && pvc.Annotations[label.AnnPodNameKey] == podName { - glog.V(4).Infof("pvc %s/%s already has labels and annotations synced, skipping, TidbCluster: %s", ns, pvcName, tcName) + klog.V(4).Infof("pvc %s/%s already has labels and annotations synced, skipping, TidbCluster: %s", ns, pvcName, tcName) return pvc, nil } @@ -144,10 +144,10 @@ func (rpc *realPVCControl) UpdateMetaInfo(tc *v1alpha1.TidbCluster, pvc *corev1. var updateErr error updatePVC, updateErr = rpc.kubeCli.CoreV1().PersistentVolumeClaims(ns).Update(pvc) if updateErr == nil { - glog.V(4).Infof("update PVC: [%s/%s] successfully, TidbCluster: %s", ns, pvcName, tcName) + klog.V(4).Infof("update PVC: [%s/%s] successfully, TidbCluster: %s", ns, pvcName, tcName) return nil } - glog.Errorf("failed to update PVC: [%s/%s], TidbCluster: %s, error: %v", ns, pvcName, tcName, updateErr) + klog.Errorf("failed to update PVC: [%s/%s], TidbCluster: %s, error: %v", ns, pvcName, tcName, updateErr) if updated, err := rpc.pvcLister.PersistentVolumeClaims(ns).Get(pvcName); err == nil { // make a copy so we don't mutate the shared cache diff --git a/pkg/controller/restore/restore_controller.go b/pkg/controller/restore/restore_controller.go index 3581fbdf37..d170fa493f 100644 --- a/pkg/controller/restore/restore_controller.go +++ b/pkg/controller/restore/restore_controller.go @@ -34,7 +34,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" - glog "k8s.io/klog" + "k8s.io/klog" ) // Controller controls restore. @@ -62,7 +62,7 @@ func NewController( kubeInformerFactory kubeinformers.SharedInformerFactory, ) *Controller { eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&eventv1.EventSinkImpl{ Interface: eventv1.New(kubeCli.CoreV1().RESTClient()).Events("")}) recorder := eventBroadcaster.NewRecorder(v1alpha1.Scheme, corev1.EventSource{Component: "restore"}) @@ -114,8 +114,8 @@ func (rsc *Controller) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer rsc.queue.ShutDown() - glog.Info("Starting restore controller") - defer glog.Info("Shutting down restore controller") + klog.Info("Starting restore controller") + defer klog.Info("Shutting down restore controller") for i := 0; i < workers; i++ { go wait.Until(rsc.worker, time.Second, stopCh) @@ -141,10 +141,10 @@ func (rsc *Controller) processNextWorkItem() bool { defer rsc.queue.Done(key) if err := rsc.sync(key.(string)); err != nil { if perrors.Find(err, controller.IsRequeueError) != nil { - glog.Infof("Restore: %v, still need sync: %v, requeuing", key.(string), err) + klog.Infof("Restore: %v, still need sync: %v, requeuing", key.(string), err) rsc.queue.AddRateLimited(key) } else if perrors.Find(err, controller.IsIgnoreError) != nil { - glog.V(4).Infof("Restore: %v, ignore err: %v", key.(string), err) + klog.V(4).Infof("Restore: %v, ignore err: %v", key.(string), err) } else { utilruntime.HandleError(fmt.Errorf("Restore: %v, sync failed, err: %v, requeuing", key.(string), err)) rsc.queue.AddRateLimited(key) @@ -159,7 +159,7 @@ func (rsc *Controller) processNextWorkItem() bool { func (rsc *Controller) sync(key string) error { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing Restore %q (%v)", key, time.Since(startTime)) + klog.V(4).Infof("Finished syncing Restore %q (%v)", key, time.Since(startTime)) }() ns, name, err := cache.SplitMetaNamespaceKey(key) @@ -168,7 +168,7 @@ func (rsc *Controller) sync(key string) error { } restore, err := rsc.restoreLister.Restores(ns).Get(name) if errors.IsNotFound(err) { - glog.Infof("Restore has been deleted %v", key) + klog.Infof("Restore has been deleted %v", key) return nil } if err != nil { @@ -188,21 +188,21 @@ func (rsc *Controller) updateRestore(cur interface{}) { name := newRestore.GetName() if v1alpha1.IsRestoreInvalid(newRestore) { - glog.V(4).Infof("restore %s/%s is Invalid, skipping.", ns, name) + klog.V(4).Infof("restore %s/%s is Invalid, skipping.", ns, name) return } if v1alpha1.IsRestoreComplete(newRestore) { - glog.V(4).Infof("restore %s/%s is Complete, skipping.", ns, name) + klog.V(4).Infof("restore %s/%s is Complete, skipping.", ns, name) return } if v1alpha1.IsRestoreScheduled(newRestore) { - glog.V(4).Infof("restore %s/%s is already scheduled, skipping", ns, name) + klog.V(4).Infof("restore %s/%s is already scheduled, skipping", ns, name) return } - glog.V(4).Infof("restore object %s/%s enqueue", ns, name) + klog.V(4).Infof("restore object %s/%s enqueue", ns, name) rsc.enqueueRestore(newRestore) } diff --git a/pkg/controller/restore_status_updater.go b/pkg/controller/restore_status_updater.go index 486f146c3c..591cc3b075 100644 --- a/pkg/controller/restore_status_updater.go +++ b/pkg/controller/restore_status_updater.go @@ -17,7 +17,7 @@ import ( "fmt" "strings" - glog "k8s.io/klog" + "k8s.io/klog" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" @@ -63,7 +63,7 @@ func (rcu *realRestoreConditionUpdater) Update(restore *v1alpha1.Restore, condit if isUpdate { _, updateErr := rcu.cli.PingcapV1alpha1().Restores(ns).Update(restore) if updateErr == nil { - glog.Infof("Restore: [%s/%s] updated successfully", ns, restoreName) + klog.Infof("Restore: [%s/%s] updated successfully", ns, restoreName) return nil } if updated, err := rcu.restoreLister.Restores(ns).Get(restoreName); err == nil { diff --git a/pkg/controller/secret_control.go b/pkg/controller/secret_control.go index 7392c4f9ea..da7a28abdf 100644 --- a/pkg/controller/secret_control.go +++ b/pkg/controller/secret_control.go @@ -26,7 +26,7 @@ import ( types "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" corelisters "k8s.io/client-go/listers/core/v1" - glog "k8s.io/klog" + "k8s.io/klog" ) // SecretControlInterface manages certificates used by TiDB clusters @@ -72,7 +72,7 @@ func (rsc *realSecretControl) Create(or metav1.OwnerReference, certOpts *TiDBClu _, err := rsc.kubeCli.CoreV1().Secrets(certOpts.Namespace).Create(secret) if err == nil { - glog.Infof("save cert to secret %s/%s", certOpts.Namespace, secretName) + klog.Infof("save cert to secret %s/%s", certOpts.Namespace, secretName) } return err } @@ -91,24 +91,24 @@ func (rsc *realSecretControl) Load(ns string, secretName string) ([]byte, []byte func (rsc *realSecretControl) Check(ns string, secretName string) bool { certBytes, keyBytes, err := rsc.Load(ns, secretName) if err != nil { - glog.Errorf("certificate validation failed for [%s/%s], error loading cert from secret, %v", ns, secretName, err) + klog.Errorf("certificate validation failed for [%s/%s], error loading cert from secret, %v", ns, secretName, err) return false } // validate if the certificate is valid block, _ := pem.Decode(certBytes) if block == nil { - glog.Errorf("certificate validation failed for [%s/%s], can not decode cert to PEM", ns, secretName) + klog.Errorf("certificate validation failed for [%s/%s], can not decode cert to PEM", ns, secretName) return false } cert, err := x509.ParseCertificate(block.Bytes) if err != nil { - glog.Errorf("certificate validation failed for [%s/%s], can not parse cert, %v", ns, secretName, err) + klog.Errorf("certificate validation failed for [%s/%s], can not parse cert, %v", ns, secretName, err) return false } rootCAs, err := certutil.ReadCACerts() if err != nil { - glog.Errorf("certificate validation failed for [%s/%s], error loading CAs, %v", ns, secretName, err) + klog.Errorf("certificate validation failed for [%s/%s], error loading CAs, %v", ns, secretName, err) return false } @@ -121,14 +121,14 @@ func (rsc *realSecretControl) Check(ns string, secretName string) bool { } _, err = cert.Verify(verifyOpts) if err != nil { - glog.Errorf("certificate validation failed for [%s/%s], %v", ns, secretName, err) + klog.Errorf("certificate validation failed for [%s/%s], %v", ns, secretName, err) return false } // validate if the certificate and private key matches _, err = tls.X509KeyPair(certBytes, keyBytes) if err != nil { - glog.Errorf("certificate validation failed for [%s/%s], error loading key pair, %v", ns, secretName, err) + klog.Errorf("certificate validation failed for [%s/%s], error loading key pair, %v", ns, secretName, err) return false } diff --git a/pkg/controller/service_control.go b/pkg/controller/service_control.go index 1d858c1a0f..5a0dd1cf56 100644 --- a/pkg/controller/service_control.go +++ b/pkg/controller/service_control.go @@ -29,7 +29,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" - glog "k8s.io/klog" + "k8s.io/klog" ) // ExternalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. @@ -74,7 +74,7 @@ func (sc *realServiceControl) UpdateService(tc *v1alpha1.TidbCluster, svc *corev var updateErr error updateSvc, updateErr = sc.kubeCli.CoreV1().Services(ns).Update(svc) if updateErr == nil { - glog.Infof("update Service: [%s/%s] successfully, TidbCluster: %s", ns, svcName, tcName) + klog.Infof("update Service: [%s/%s] successfully, TidbCluster: %s", ns, svcName, tcName) return nil } diff --git a/pkg/controller/stateful_set_control.go b/pkg/controller/stateful_set_control.go index 7d4a427487..cbe83a02e2 100644 --- a/pkg/controller/stateful_set_control.go +++ b/pkg/controller/stateful_set_control.go @@ -30,7 +30,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" - glog "k8s.io/klog" + "k8s.io/klog" ) // StatefulSetControlInterface defines the interface that uses to create, update, and delete StatefulSets, @@ -78,10 +78,10 @@ func (sc *realStatefulSetControl) UpdateStatefulSet(tc *v1alpha1.TidbCluster, se var updateErr error updatedSS, updateErr = sc.kubeCli.AppsV1().StatefulSets(ns).Update(set) if updateErr == nil { - glog.Infof("TidbCluster: [%s/%s]'s StatefulSet: [%s/%s] updated successfully", ns, tcName, ns, setName) + klog.Infof("TidbCluster: [%s/%s]'s StatefulSet: [%s/%s] updated successfully", ns, tcName, ns, setName) return nil } - glog.Errorf("failed to update TidbCluster: [%s/%s]'s StatefulSet: [%s/%s], error: %v", ns, tcName, ns, setName, updateErr) + klog.Errorf("failed to update TidbCluster: [%s/%s]'s StatefulSet: [%s/%s], error: %v", ns, tcName, ns, setName, updateErr) if updated, err := sc.setLister.StatefulSets(ns).Get(setName); err == nil { // make a copy so we don't mutate the shared cache diff --git a/pkg/controller/tidbcluster/tidb_cluster_controller.go b/pkg/controller/tidbcluster/tidb_cluster_controller.go index 60f7a78d89..e4f84ee1a6 100644 --- a/pkg/controller/tidbcluster/tidb_cluster_controller.go +++ b/pkg/controller/tidbcluster/tidb_cluster_controller.go @@ -39,7 +39,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" - glog "k8s.io/klog" + "k8s.io/klog" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -77,7 +77,7 @@ func NewController( tidbFailoverPeriod time.Duration, ) *Controller { eventBroadcaster := record.NewBroadcasterWithCorrelatorOptions(record.CorrelatorOptions{QPS: 1}) - eventBroadcaster.StartLogging(glog.V(2).Infof) + eventBroadcaster.StartLogging(klog.V(2).Infof) eventBroadcaster.StartRecordingToSink(&eventv1.EventSinkImpl{ Interface: eventv1.New(kubeCli.CoreV1().RESTClient()).Events("")}) recorder := eventBroadcaster.NewRecorder(v1alpha1.Scheme, corev1.EventSource{Component: "tidb-controller-manager"}) @@ -242,8 +242,8 @@ func (tcc *Controller) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer tcc.queue.ShutDown() - glog.Info("Starting tidbcluster controller") - defer glog.Info("Shutting down tidbcluster controller") + klog.Info("Starting tidbcluster controller") + defer klog.Info("Shutting down tidbcluster controller") for i := 0; i < workers; i++ { go wait.Until(tcc.worker, time.Second, stopCh) @@ -269,7 +269,7 @@ func (tcc *Controller) processNextWorkItem() bool { defer tcc.queue.Done(key) if err := tcc.sync(key.(string)); err != nil { if perrors.Find(err, controller.IsRequeueError) != nil { - glog.Infof("TidbCluster: %v, still need sync: %v, requeuing", key.(string), err) + klog.Infof("TidbCluster: %v, still need sync: %v, requeuing", key.(string), err) } else { utilruntime.HandleError(fmt.Errorf("TidbCluster: %v, sync failed %v, requeuing", key.(string), err)) } @@ -284,7 +284,7 @@ func (tcc *Controller) processNextWorkItem() bool { func (tcc *Controller) sync(key string) error { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing TidbCluster %q (%v)", key, time.Since(startTime)) + klog.V(4).Infof("Finished syncing TidbCluster %q (%v)", key, time.Since(startTime)) }() ns, name, err := cache.SplitMetaNamespaceKey(key) @@ -293,7 +293,7 @@ func (tcc *Controller) sync(key string) error { } tc, err := tcc.tcLister.TidbClusters(ns).Get(name) if errors.IsNotFound(err) { - glog.Infof("TidbCluster has been deleted %v", key) + klog.Infof("TidbCluster has been deleted %v", key) return nil } if err != nil { @@ -335,7 +335,7 @@ func (tcc *Controller) addStatefulSet(obj interface{}) { if tc == nil { return } - glog.V(4).Infof("StatefuSet %s/%s created, TidbCluster: %s/%s", ns, setName, ns, tc.Name) + klog.V(4).Infof("StatefuSet %s/%s created, TidbCluster: %s/%s", ns, setName, ns, tc.Name) tcc.enqueueTidbCluster(tc) } @@ -356,7 +356,7 @@ func (tcc *Controller) updateStatefuSet(old, cur interface{}) { if tc == nil { return } - glog.V(4).Infof("StatefulSet %s/%s updated, %+v -> %+v.", ns, setName, oldSet.Spec, curSet.Spec) + klog.V(4).Infof("StatefulSet %s/%s updated, %+v -> %+v.", ns, setName, oldSet.Spec, curSet.Spec) tcc.enqueueTidbCluster(tc) } @@ -387,7 +387,7 @@ func (tcc *Controller) deleteStatefulSet(obj interface{}) { if tc == nil { return } - glog.V(4).Infof("StatefulSet %s/%s deleted through %v.", ns, setName, utilruntime.GetCaller()) + klog.V(4).Infof("StatefulSet %s/%s deleted through %v.", ns, setName, utilruntime.GetCaller()) tcc.enqueueTidbCluster(tc) } diff --git a/pkg/controller/tidbcluster_control.go b/pkg/controller/tidbcluster_control.go index 268f7769ea..2f5f7827a4 100644 --- a/pkg/controller/tidbcluster_control.go +++ b/pkg/controller/tidbcluster_control.go @@ -28,7 +28,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" - glog "k8s.io/klog" + "k8s.io/klog" ) // TidbClusterControlInterface manages TidbClusters @@ -65,10 +65,10 @@ func (rtc *realTidbClusterControl) UpdateTidbCluster(tc *v1alpha1.TidbCluster, n var updateErr error updateTC, updateErr = rtc.cli.PingcapV1alpha1().TidbClusters(ns).Update(tc) if updateErr == nil { - glog.Infof("TidbCluster: [%s/%s] updated successfully", ns, tcName) + klog.Infof("TidbCluster: [%s/%s] updated successfully", ns, tcName) return nil } - glog.Errorf("failed to update TidbCluster: [%s/%s], error: %v", ns, tcName, updateErr) + klog.Errorf("failed to update TidbCluster: [%s/%s], error: %v", ns, tcName, updateErr) if updated, err := rtc.tcLister.TidbClusters(ns).Get(tcName); err == nil { // make a copy so we don't mutate the shared cache diff --git a/pkg/discovery/discovery.go b/pkg/discovery/discovery.go index b06ecab75d..d8e888a951 100644 --- a/pkg/discovery/discovery.go +++ b/pkg/discovery/discovery.go @@ -25,7 +25,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/pdapi" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" - glog "k8s.io/klog" + "k8s.io/klog" ) // TiDBDiscovery helps new PD member to discover all other members in cluster bootstrap phase. @@ -65,7 +65,7 @@ func (td *tidbDiscovery) Discover(advertisePeerUrl string) (string, error) { if advertisePeerUrl == "" { return "", fmt.Errorf("advertisePeerUrl is empty") } - glog.Infof("advertisePeerUrl is: %s", advertisePeerUrl) + klog.Infof("advertisePeerUrl is: %s", advertisePeerUrl) strArr := strings.Split(advertisePeerUrl, ".") if len(strArr) != 4 { return "", fmt.Errorf("advertisePeerUrl format is wrong: %s", advertisePeerUrl) diff --git a/pkg/discovery/server/mux.go b/pkg/discovery/server/mux.go index e52f60084e..a929ce559e 100644 --- a/pkg/discovery/server/mux.go +++ b/pkg/discovery/server/mux.go @@ -23,7 +23,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" "github.com/pingcap/tidb-operator/pkg/discovery" "k8s.io/client-go/kubernetes" - glog "k8s.io/klog" + "k8s.io/klog" ) type server struct { @@ -38,17 +38,17 @@ func StartServer(cli versioned.Interface, kubeCli kubernetes.Interface, port int ws.Route(ws.GET("/new/{advertise-peer-url}").To(svr.newHandler)) restful.Add(ws) - glog.Infof("starting TiDB Discovery server, listening on 0.0.0.0:%d", port) - glog.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", port), nil)) + klog.Infof("starting TiDB Discovery server, listening on 0.0.0.0:%d", port) + klog.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", port), nil)) } func (svr *server) newHandler(req *restful.Request, resp *restful.Response) { encodedAdvertisePeerURL := req.PathParameter("advertise-peer-url") data, err := base64.StdEncoding.DecodeString(encodedAdvertisePeerURL) if err != nil { - glog.Errorf("failed to decode advertise-peer-url: %s", encodedAdvertisePeerURL) + klog.Errorf("failed to decode advertise-peer-url: %s", encodedAdvertisePeerURL) if err := resp.WriteError(http.StatusInternalServerError, err); err != nil { - glog.Errorf("failed to writeError: %v", err) + klog.Errorf("failed to writeError: %v", err) } return } @@ -56,15 +56,15 @@ func (svr *server) newHandler(req *restful.Request, resp *restful.Response) { result, err := svr.discovery.Discover(advertisePeerURL) if err != nil { - glog.Errorf("failed to discover: %s, %v", advertisePeerURL, err) + klog.Errorf("failed to discover: %s, %v", advertisePeerURL, err) if err := resp.WriteError(http.StatusInternalServerError, err); err != nil { - glog.Errorf("failed to writeError: %v", err) + klog.Errorf("failed to writeError: %v", err) } return } - glog.Infof("generated args for %s: %s", advertisePeerURL, result) + klog.Infof("generated args for %s: %s", advertisePeerURL, result) if _, err := io.WriteString(resp, result); err != nil { - glog.Errorf("failed to writeString: %s, %v", result, err) + klog.Errorf("failed to writeString: %s, %v", result, err) } } diff --git a/pkg/httputil/httputil.go b/pkg/httputil/httputil.go index e884746f92..7e1b303add 100644 --- a/pkg/httputil/httputil.go +++ b/pkg/httputil/httputil.go @@ -19,7 +19,7 @@ import ( "io/ioutil" "net/http" - glog "k8s.io/klog" + "k8s.io/klog" ) const ( @@ -32,7 +32,7 @@ const ( // This is designed to be used in a defer statement. func DeferClose(c io.Closer) { if err := c.Close(); err != nil { - glog.Error(err) + klog.Error(err) } } diff --git a/pkg/manager/member/orphan_pods_cleaner.go b/pkg/manager/member/orphan_pods_cleaner.go index e961ca5d49..b914e649b4 100644 --- a/pkg/manager/member/orphan_pods_cleaner.go +++ b/pkg/manager/member/orphan_pods_cleaner.go @@ -22,7 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" corelisters "k8s.io/client-go/listers/core/v1" - glog "k8s.io/klog" + "k8s.io/klog" ) const ( @@ -146,10 +146,10 @@ func (opc *orphanPodsCleaner) Clean(tc *v1alpha1.TidbCluster) (map[string]string } err = opc.podControl.DeletePod(tc, pod) if err != nil { - glog.Errorf("orphan pods cleaner: failed to clean orphan pod: %s/%s, %v", ns, podName, err) + klog.Errorf("orphan pods cleaner: failed to clean orphan pod: %s/%s, %v", ns, podName, err) return skipReason, err } - glog.Infof("orphan pods cleaner: clean orphan pod: %s/%s successfully", ns, podName) + klog.Infof("orphan pods cleaner: clean orphan pod: %s/%s successfully", ns, podName) } return skipReason, nil diff --git a/pkg/manager/member/pd_failover.go b/pkg/manager/member/pd_failover.go index 449474d28a..0d668eea23 100644 --- a/pkg/manager/member/pd_failover.go +++ b/pkg/manager/member/pd_failover.go @@ -28,7 +28,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/record" - glog "k8s.io/klog" + "k8s.io/klog" ) // TODO add maxFailoverCount @@ -109,7 +109,7 @@ func (pf *pdFailover) Failover(tc *v1alpha1.TidbCluster) error { func (pf *pdFailover) Recover(tc *v1alpha1.TidbCluster) { tc.Status.PD.FailureMembers = nil - glog.Infof("pd failover: clearing pd failoverMembers, %s/%s", tc.GetNamespace(), tc.GetName()) + klog.Infof("pd failover: clearing pd failoverMembers, %s/%s", tc.GetNamespace(), tc.GetName()) } func (pf *pdFailover) tryToMarkAPeerAsFailure(tc *v1alpha1.TidbCluster) error { @@ -180,10 +180,10 @@ func (pf *pdFailover) tryToDeleteAFailureMember(tc *v1alpha1.TidbCluster) error // invoke deleteMember api to delete a member from the pd cluster err = controller.GetPDClient(pf.pdControl, tc).DeleteMemberByID(memberID) if err != nil { - glog.Errorf("pd failover: failed to delete member: %d, %v", memberID, err) + klog.Errorf("pd failover: failed to delete member: %d, %v", memberID, err) return err } - glog.Infof("pd failover: delete member: %d successfully", memberID) + klog.Infof("pd failover: delete member: %d successfully", memberID) pf.recorder.Eventf(tc, apiv1.EventTypeWarning, "PDMemberDeleted", "%s(%d) deleted from cluster", failurePodName, memberID) @@ -215,10 +215,10 @@ func (pf *pdFailover) tryToDeleteAFailureMember(tc *v1alpha1.TidbCluster) error if pvc != nil && pvc.DeletionTimestamp == nil && pvc.GetUID() == failureMember.PVCUID { err = pf.pvcControl.DeletePVC(tc, pvc) if err != nil { - glog.Errorf("pd failover: failed to delete pvc: %s/%s, %v", ns, pvcName, err) + klog.Errorf("pd failover: failed to delete pvc: %s/%s, %v", ns, pvcName, err) return err } - glog.Infof("pd failover: pvc: %s/%s successfully", ns, pvcName) + klog.Infof("pd failover: pvc: %s/%s successfully", ns, pvcName) } setMemberDeleted(tc, failurePodName) @@ -229,7 +229,7 @@ func setMemberDeleted(tc *v1alpha1.TidbCluster, podName string) { failureMember := tc.Status.PD.FailureMembers[podName] failureMember.MemberDeleted = true tc.Status.PD.FailureMembers[podName] = failureMember - glog.Infof("pd failover: set pd member: %s/%s deleted", tc.GetName(), podName) + klog.Infof("pd failover: set pd member: %s/%s deleted", tc.GetName(), podName) } type fakePDFailover struct{} diff --git a/pkg/manager/member/pd_member_manager.go b/pkg/manager/member/pd_member_manager.go index 9c812fda5d..9ae8edb337 100644 --- a/pkg/manager/member/pd_member_manager.go +++ b/pkg/manager/member/pd_member_manager.go @@ -33,7 +33,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" v1 "k8s.io/client-go/listers/apps/v1" corelisters "k8s.io/client-go/listers/core/v1" - glog "k8s.io/klog" + "k8s.io/klog" ) const ( @@ -224,7 +224,7 @@ func (pmm *pdMemberManager) syncPDStatefulSetForTidbCluster(tc *v1alpha1.TidbClu } if err := pmm.syncTidbClusterStatus(tc, oldPDSet); err != nil { - glog.Errorf("failed to sync TidbCluster: [%s/%s]'s status, error: %v", ns, tcName, err) + klog.Errorf("failed to sync TidbCluster: [%s/%s]'s status, error: %v", ns, tcName, err) } if !tc.Status.PD.Synced { @@ -370,7 +370,7 @@ func (pmm *pdMemberManager) syncTidbClusterStatus(tc *v1alpha1.TidbCluster, set } name := memberHealth.Name if len(name) == 0 { - glog.Warningf("PD member: [%d] doesn't have a name, and can't get it from clientUrls: [%s], memberHealth Info: [%v] in [%s/%s]", + klog.Warningf("PD member: [%d] doesn't have a name, and can't get it from clientUrls: [%s], memberHealth Info: [%v] in [%s/%s]", id, memberHealth.ClientUrls, memberHealth, ns, tcName) continue } diff --git a/pkg/manager/member/pd_scaler.go b/pkg/manager/member/pd_scaler.go index ead05aa166..a84acb8a25 100644 --- a/pkg/manager/member/pd_scaler.go +++ b/pkg/manager/member/pd_scaler.go @@ -24,7 +24,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/pdapi" apps "k8s.io/api/apps/v1" corelisters "k8s.io/client-go/listers/core/v1" - glog "k8s.io/klog" + "k8s.io/klog" ) // TODO add e2e test specs @@ -59,7 +59,7 @@ func (psd *pdScaler) ScaleOut(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet return nil } - glog.Infof("scaling out pd statefulset %s/%s, ordinal: %d (replicas: %d, delete slots: %v)", oldSet.Namespace, oldSet.Name, ordinal, replicas, deleteSlots.List()) + klog.Infof("scaling out pd statefulset %s/%s, ordinal: %d (replicas: %d, delete slots: %v)", oldSet.Namespace, oldSet.Name, ordinal, replicas, deleteSlots.List()) _, err := psd.deleteDeferDeletingPVC(tc, oldSet.GetName(), v1alpha1.PDMemberType, ordinal) if err != nil { return err @@ -110,7 +110,7 @@ func (psd *pdScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet, return fmt.Errorf("TidbCluster: %s/%s's pd status sync failed,can't scale in now", ns, tcName) } - glog.Infof("scaling in pd statefulset %s/%s, ordinal: %d (replicas: %d, delete slots: %v)", oldSet.Namespace, oldSet.Name, ordinal, replicas, deleteSlots.List()) + klog.Infof("scaling in pd statefulset %s/%s, ordinal: %d (replicas: %d, delete slots: %v)", oldSet.Namespace, oldSet.Name, ordinal, replicas, deleteSlots.List()) if controller.PodWebhookEnabled { setReplicasAndDeleteSlots(newSet, replicas, deleteSlots) @@ -137,10 +137,10 @@ func (psd *pdScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet, err := pdClient.DeleteMember(memberName) if err != nil { - glog.Errorf("pd scale in: failed to delete member %s, %v", memberName, err) + klog.Errorf("pd scale in: failed to delete member %s, %v", memberName, err) return err } - glog.Infof("pd scale in: delete member %s successfully", memberName) + klog.Infof("pd scale in: delete member %s successfully", memberName) pvcName := ordinalPVCName(v1alpha1.PDMemberType, setName, ordinal) pvc, err := psd.pvcLister.PersistentVolumeClaims(ns).Get(pvcName) @@ -156,11 +156,11 @@ func (psd *pdScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet, _, err = psd.pvcControl.UpdatePVC(tc, pvc) if err != nil { - glog.Errorf("pd scale in: failed to set pvc %s/%s annotation: %s to %s", + klog.Errorf("pd scale in: failed to set pvc %s/%s annotation: %s to %s", ns, pvcName, label.AnnPVCDeferDeleting, now) return err } - glog.Infof("pd scale in: set pvc %s/%s annotation: %s to %s", + klog.Infof("pd scale in: set pvc %s/%s annotation: %s to %s", ns, pvcName, label.AnnPVCDeferDeleting, now) setReplicasAndDeleteSlots(newSet, replicas, deleteSlots) diff --git a/pkg/manager/member/pd_upgrader.go b/pkg/manager/member/pd_upgrader.go index ffa1cc58ef..24cdea02ab 100644 --- a/pkg/manager/member/pd_upgrader.go +++ b/pkg/manager/member/pd_upgrader.go @@ -22,7 +22,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/pdapi" apps "k8s.io/api/apps/v1" corelisters "k8s.io/client-go/listers/core/v1" - glog "k8s.io/klog" + "k8s.io/klog" ) type pdUpgrader struct { @@ -68,7 +68,7 @@ func (pu *pdUpgrader) gracefulUpgrade(tc *v1alpha1.TidbCluster, oldSet *apps.Sta // If we encounter this situation, we will let the native statefulset controller do the upgrade completely, which may be unsafe for upgrading pd. // Therefore, in the production environment, we should try to avoid modifying the pd statefulset update strategy directly. newSet.Spec.UpdateStrategy = oldSet.Spec.UpdateStrategy - glog.Warningf("tidbcluster: [%s/%s] pd statefulset %s UpdateStrategy has been modified manually", ns, tcName, oldSet.GetName()) + klog.Warningf("tidbcluster: [%s/%s] pd statefulset %s UpdateStrategy has been modified manually", ns, tcName, oldSet.GetName()) return nil } @@ -118,10 +118,10 @@ func (pu *pdUpgrader) upgradePDPod(tc *v1alpha1.TidbCluster, ordinal int32, newS } err := pu.transferPDLeaderTo(tc, targetName) if err != nil { - glog.Errorf("pd upgrader: failed to transfer pd leader to: %s, %v", targetName, err) + klog.Errorf("pd upgrader: failed to transfer pd leader to: %s, %v", targetName, err) return err } - glog.Infof("pd upgrader: transfer pd leader to: %s successfully", targetName) + klog.Infof("pd upgrader: transfer pd leader to: %s successfully", targetName) return controller.RequeueErrorf("tidbcluster: [%s/%s]'s pd member: [%s] is transferring leader to pd member: [%s]", ns, tcName, upgradePodName, targetName) } diff --git a/pkg/manager/member/pump_member_manager.go b/pkg/manager/member/pump_member_manager.go index b03f069753..19f47ccd80 100644 --- a/pkg/manager/member/pump_member_manager.go +++ b/pkg/manager/member/pump_member_manager.go @@ -29,7 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" v1 "k8s.io/client-go/listers/apps/v1" corelisters "k8s.io/client-go/listers/core/v1" - glog "k8s.io/klog" + "k8s.io/klog" ) const ( @@ -107,7 +107,7 @@ func (pmm *pumpMemberManager) syncPumpStatefulSetForTidbCluster(tc *v1alpha1.Tid } if err := pmm.syncTiDBClusterStatus(tc, oldPumpSet); err != nil { - glog.Errorf("failed to sync TidbCluster: [%s/%s]'s status, error: %v", tc.Namespace, tc.Name, err) + klog.Errorf("failed to sync TidbCluster: [%s/%s]'s status, error: %v", tc.Namespace, tc.Name, err) return err } diff --git a/pkg/manager/member/pvc_cleaner.go b/pkg/manager/member/pvc_cleaner.go index 1a4b2bb4f1..64097adeef 100644 --- a/pkg/manager/member/pvc_cleaner.go +++ b/pkg/manager/member/pvc_cleaner.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" corelisters "k8s.io/client-go/listers/core/v1" - glog "k8s.io/klog" + "k8s.io/klog" ) const ( @@ -170,7 +170,7 @@ func (rpc *realPVCCleaner) reclaimPV(tc *v1alpha1.TidbCluster) (map[string]strin if err != nil { return skipReason, fmt.Errorf("cluster %s/%s patch pv %s to %s failed, err: %v", ns, tcName, pvName, corev1.PersistentVolumeReclaimDelete, err) } - glog.Infof("cluster %s/%s patch pv %s to policy %s success", ns, tcName, pvName, corev1.PersistentVolumeReclaimDelete) + klog.Infof("cluster %s/%s patch pv %s to policy %s success", ns, tcName, pvName, corev1.PersistentVolumeReclaimDelete) } apiPVC, err := rpc.kubeCli.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{}) @@ -190,7 +190,7 @@ func (rpc *realPVCCleaner) reclaimPV(tc *v1alpha1.TidbCluster) (map[string]strin if err := rpc.pvcControl.DeletePVC(tc, pvc); err != nil { return skipReason, fmt.Errorf("cluster %s/%s delete pvc %s failed, err: %v", ns, tcName, pvcName, err) } - glog.Infof("cluster %s/%s reclaim pv %s success, pvc %s", ns, tcName, pvName, pvcName) + klog.Infof("cluster %s/%s reclaim pv %s success, pvc %s", ns, tcName, pvName, pvcName) } return skipReason, nil } @@ -217,7 +217,7 @@ func (rpc *realPVCCleaner) cleanScheduleLock(tc *v1alpha1.TidbCluster) (map[stri if pvc.Annotations[label.AnnPVCDeferDeleting] != "" { if _, exist := pvc.Annotations[label.AnnPVCPodScheduling]; !exist { // The defer deleting PVC without pod scheduling annotation, do nothing - glog.V(4).Infof("cluster %s/%s defer delete pvc %s has not pod scheduling annotation, skip clean", ns, tcName, pvcName) + klog.V(4).Infof("cluster %s/%s defer delete pvc %s has not pod scheduling annotation, skip clean", ns, tcName, pvcName) skipReason[pvcName] = skipReasonPVCCleanerDeferDeletePVCNotHasLock continue } @@ -247,14 +247,14 @@ func (rpc *realPVCCleaner) cleanScheduleLock(tc *v1alpha1.TidbCluster) (map[stri if _, exist := pvc.Annotations[label.AnnPVCPodScheduling]; !exist { // The PVC without pod scheduling annotation, do nothing - glog.V(4).Infof("cluster %s/%s pvc %s has not pod scheduling annotation, skip clean", ns, tcName, pvcName) + klog.V(4).Infof("cluster %s/%s pvc %s has not pod scheduling annotation, skip clean", ns, tcName, pvcName) skipReason[pvcName] = skipReasonPVCCleanerPVCNotHasLock continue } if pvc.Status.Phase != corev1.ClaimBound || pod.Spec.NodeName == "" { // This pod has not been scheduled yet, no need to clean up the pvc pod schedule annotation - glog.V(4).Infof("cluster %s/%s pod %s has not been scheduled yet, skip clean pvc %s pod schedule annotation", ns, tcName, podName, pvcName) + klog.V(4).Infof("cluster %s/%s pod %s has not been scheduled yet, skip clean pvc %s pod schedule annotation", ns, tcName, podName, pvcName) skipReason[pvcName] = skipReasonPVCCleanerPodWaitingForScheduling continue } @@ -263,7 +263,7 @@ func (rpc *realPVCCleaner) cleanScheduleLock(tc *v1alpha1.TidbCluster) (map[stri if _, err := rpc.pvcControl.UpdatePVC(tc, pvc); err != nil { return skipReason, fmt.Errorf("cluster %s/%s remove pvc %s pod scheduling annotation faild, err: %v", ns, tcName, pvcName, err) } - glog.Infof("cluster %s/%s, clean pvc %s pod scheduling annotation successfully", ns, tcName, pvcName) + klog.Infof("cluster %s/%s, clean pvc %s pod scheduling annotation successfully", ns, tcName, pvcName) } return skipReason, nil diff --git a/pkg/manager/member/scaler.go b/pkg/manager/member/scaler.go index 017bc6c0db..47a5f0dc0b 100644 --- a/pkg/manager/member/scaler.go +++ b/pkg/manager/member/scaler.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/util/sets" corelisters "k8s.io/client-go/listers/core/v1" - glog "k8s.io/klog" + "k8s.io/klog" ) const ( @@ -78,10 +78,10 @@ func (gs *generalScaler) deleteDeferDeletingPVC(tc *v1alpha1.TidbCluster, err = gs.pvcControl.DeletePVC(tc, pvc) if err != nil { - glog.Errorf("scale out: failed to delete pvc %s/%s, %v", ns, pvcName, err) + klog.Errorf("scale out: failed to delete pvc %s/%s, %v", ns, pvcName, err) return skipReason, err } - glog.Infof("scale out: delete pvc %s/%s successfully", ns, pvcName) + klog.Infof("scale out: delete pvc %s/%s successfully", ns, pvcName) return skipReason, nil } @@ -98,11 +98,11 @@ func setReplicasAndDeleteSlots(newSet *apps.StatefulSet, replicas int32, deleteS *newSet.Spec.Replicas = replicas if features.DefaultFeatureGate.Enabled(features.AdvancedStatefulSet) { helper.SetDeleteSlots(newSet, deleteSlots) - glog.Infof("scale statefulset: %s/%s replicas from %d to %d (delete slots: %v)", + klog.Infof("scale statefulset: %s/%s replicas from %d to %d (delete slots: %v)", newSet.GetNamespace(), newSet.GetName(), oldReplicas, replicas, deleteSlots.List()) return } - glog.Infof("scale statefulset: %s/%s replicas from %d to %d", + klog.Infof("scale statefulset: %s/%s replicas from %d to %d", newSet.GetNamespace(), newSet.GetName(), oldReplicas, replicas) } diff --git a/pkg/manager/member/tidb_failover.go b/pkg/manager/member/tidb_failover.go index e0102c371f..7da6f6dc79 100644 --- a/pkg/manager/member/tidb_failover.go +++ b/pkg/manager/member/tidb_failover.go @@ -18,7 +18,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - glog "k8s.io/klog" + "k8s.io/klog" ) type tidbFailover struct { @@ -41,14 +41,14 @@ func (tf *tidbFailover) Failover(tc *v1alpha1.TidbCluster) error { _, exist := tc.Status.TiDB.FailureMembers[tidbMember.Name] if exist && tidbMember.Health { delete(tc.Status.TiDB.FailureMembers, tidbMember.Name) - glog.Infof("tidb failover: delete %s from tidb failoverMembers", tidbMember.Name) + klog.Infof("tidb failover: delete %s from tidb failoverMembers", tidbMember.Name) } } if tc.Spec.TiDB.MaxFailoverCount != nil { maxFailoverCount := *tc.Spec.TiDB.MaxFailoverCount if maxFailoverCount > 0 && len(tc.Status.TiDB.FailureMembers) >= int(maxFailoverCount) { - glog.Warningf("the failure members count reached the limit:%d", tc.Spec.TiDB.MaxFailoverCount) + klog.Warningf("the failure members count reached the limit:%d", tc.Spec.TiDB.MaxFailoverCount) return nil } } diff --git a/pkg/manager/member/tidb_upgrader.go b/pkg/manager/member/tidb_upgrader.go index 10c41e8422..8a3dd4db2a 100644 --- a/pkg/manager/member/tidb_upgrader.go +++ b/pkg/manager/member/tidb_upgrader.go @@ -19,7 +19,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/controller" apps "k8s.io/api/apps/v1" corelisters "k8s.io/client-go/listers/core/v1" - glog "k8s.io/klog" + "k8s.io/klog" ) type tidbUpgrader struct { @@ -70,7 +70,7 @@ func (tdu *tidbUpgrader) Upgrade(tc *v1alpha1.TidbCluster, oldSet *apps.Stateful // If we encounter this situation, we will let the native statefulset controller do the upgrade completely, which may be unsafe for upgrading tidb. // Therefore, in the production environment, we should try to avoid modifying the tidb statefulset update strategy directly. newSet.Spec.UpdateStrategy = oldSet.Spec.UpdateStrategy - glog.Warningf("tidbcluster: [%s/%s] tidb statefulset %s UpdateStrategy has been modified manually", ns, tcName, oldSet.GetName()) + klog.Warningf("tidbcluster: [%s/%s] tidb statefulset %s UpdateStrategy has been modified manually", ns, tcName, oldSet.GetName()) return nil } diff --git a/pkg/manager/member/tikv_failover.go b/pkg/manager/member/tikv_failover.go index acf16c4952..1cf67e3a80 100644 --- a/pkg/manager/member/tikv_failover.go +++ b/pkg/manager/member/tikv_failover.go @@ -18,7 +18,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - glog "k8s.io/klog" + "k8s.io/klog" ) type tikvFailover struct { @@ -54,7 +54,7 @@ func (tf *tikvFailover) Failover(tc *v1alpha1.TidbCluster) error { if tc.Spec.TiKV.MaxFailoverCount != nil { maxFailoverCount := *tc.Spec.TiKV.MaxFailoverCount if maxFailoverCount > 0 && len(tc.Status.TiKV.FailureStores) >= int(maxFailoverCount) { - glog.Warningf("%s/%s failure stores count reached the limit: %d", ns, tcName, tc.Spec.TiKV.MaxFailoverCount) + klog.Warningf("%s/%s failure stores count reached the limit: %d", ns, tcName, tc.Spec.TiKV.MaxFailoverCount) return nil } } diff --git a/pkg/manager/member/tikv_member_manager.go b/pkg/manager/member/tikv_member_manager.go index 15c79453db..b423766034 100644 --- a/pkg/manager/member/tikv_member_manager.go +++ b/pkg/manager/member/tikv_member_manager.go @@ -34,7 +34,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" v1 "k8s.io/client-go/listers/apps/v1" corelisters "k8s.io/client-go/listers/core/v1" - glog "k8s.io/klog" + "k8s.io/klog" ) const ( @@ -622,7 +622,7 @@ func (tkmm *tikvMemberManager) syncTidbClusterStatus(tc *v1alpha1.TidbCluster, s // avoid LastHeartbeatTime be overwrite by zero time when pd lost LastHeartbeatTime if status.LastHeartbeatTime.IsZero() { if oldStatus, ok := previousStores[status.ID]; ok { - glog.V(4).Infof("the pod:%s's store LastHeartbeatTime is zero,so will keep in %v", status.PodName, oldStatus.LastHeartbeatTime) + klog.V(4).Infof("the pod:%s's store LastHeartbeatTime is zero,so will keep in %v", status.PodName, oldStatus.LastHeartbeatTime) status.LastHeartbeatTime = oldStatus.LastHeartbeatTime } } @@ -711,19 +711,19 @@ func (tkmm *tikvMemberManager) setStoreLabelsForTiKV(tc *v1alpha1.TidbCluster) ( nodeName := pod.Spec.NodeName ls, err := tkmm.getNodeLabels(nodeName, locationLabels) if err != nil || len(ls) == 0 { - glog.Warningf("node: [%s] has no node labels, skipping set store labels for Pod: [%s/%s]", nodeName, ns, podName) + klog.Warningf("node: [%s] has no node labels, skipping set store labels for Pod: [%s/%s]", nodeName, ns, podName) continue } if !tkmm.storeLabelsEqualNodeLabels(store.Store.Labels, ls) { set, err := pdCli.SetStoreLabels(store.Store.Id, ls) if err != nil { - glog.Warningf("failed to set pod: [%s/%s]'s store labels: %v", ns, podName, ls) + klog.Warningf("failed to set pod: [%s/%s]'s store labels: %v", ns, podName, ls) continue } if set { setCount++ - glog.Infof("pod: [%s/%s] set labels: %v successfully", ns, podName, ls) + klog.Infof("pod: [%s/%s] set labels: %v successfully", ns, podName, ls) } } } diff --git a/pkg/manager/member/tikv_scaler.go b/pkg/manager/member/tikv_scaler.go index 8d56cd0a73..d59a1a2495 100644 --- a/pkg/manager/member/tikv_scaler.go +++ b/pkg/manager/member/tikv_scaler.go @@ -24,7 +24,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/pdapi" apps "k8s.io/api/apps/v1" corelisters "k8s.io/client-go/listers/core/v1" - glog "k8s.io/klog" + "k8s.io/klog" podutil "k8s.io/kubernetes/pkg/api/v1/pod" ) @@ -58,7 +58,7 @@ func (tsd *tikvScaler) ScaleOut(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulS return nil } - glog.Infof("scaling out tikv statefulset %s/%s, ordinal: %d (replicas: %d, delete slots: %v)", oldSet.Namespace, oldSet.Name, ordinal, replicas, deleteSlots.List()) + klog.Infof("scaling out tikv statefulset %s/%s, ordinal: %d (replicas: %d, delete slots: %v)", oldSet.Namespace, oldSet.Name, ordinal, replicas, deleteSlots.List()) _, err := tsd.deleteDeferDeletingPVC(tc, oldSet.GetName(), v1alpha1.TiKVMemberType, ordinal) if err != nil { return err @@ -78,12 +78,12 @@ func (tsd *tikvScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSe // tikv can not scale in when it is upgrading if tc.TiKVUpgrading() { - glog.Infof("the TidbCluster: [%s/%s]'s tikv is upgrading,can not scale in until upgrade have completed", + klog.Infof("the TidbCluster: [%s/%s]'s tikv is upgrading,can not scale in until upgrade have completed", ns, tcName) return nil } - glog.Infof("scaling in tikv statefulset %s/%s, ordinal: %d (replicas: %d, delete slots: %v)", oldSet.Namespace, oldSet.Name, ordinal, replicas, deleteSlots.List()) + klog.Infof("scaling in tikv statefulset %s/%s, ordinal: %d (replicas: %d, delete slots: %v)", oldSet.Namespace, oldSet.Name, ordinal, replicas, deleteSlots.List()) // We need remove member from cluster before reducing statefulset replicas podName := ordinalPodName(v1alpha1.TiKVMemberType, tcName, ordinal) pod, err := tsd.podLister.Pods(ns).Get(podName) @@ -105,10 +105,10 @@ func (tsd *tikvScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSe } if state != v1alpha1.TiKVStateOffline { if err := controller.GetPDClient(tsd.pdControl, tc).DeleteStore(id); err != nil { - glog.Errorf("tikv scale in: failed to delete store %d, %v", id, err) + klog.Errorf("tikv scale in: failed to delete store %d, %v", id, err) return err } - glog.Infof("tikv scale in: delete store %d for tikv %s/%s successfully", id, ns, podName) + klog.Infof("tikv scale in: delete store %d for tikv %s/%s successfully", id, ns, podName) } return controller.RequeueErrorf("TiKV %s/%s store %d still in cluster, state: %s", ns, podName, id, state) } @@ -121,7 +121,7 @@ func (tsd *tikvScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSe } // TODO: double check if store is really not in Up/Offline/Down state - glog.Infof("TiKV %s/%s store %d becomes tombstone", ns, podName, id) + klog.Infof("TiKV %s/%s store %d becomes tombstone", ns, podName, id) pvcName := ordinalPVCName(v1alpha1.TiKVMemberType, setName, ordinal) pvc, err := tsd.pvcLister.PersistentVolumeClaims(ns).Get(pvcName) @@ -135,11 +135,11 @@ func (tsd *tikvScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSe pvc.Annotations[label.AnnPVCDeferDeleting] = now _, err = tsd.pvcControl.UpdatePVC(tc, pvc) if err != nil { - glog.Errorf("tikv scale in: failed to set pvc %s/%s annotation: %s to %s", + klog.Errorf("tikv scale in: failed to set pvc %s/%s annotation: %s to %s", ns, pvcName, label.AnnPVCDeferDeleting, now) return err } - glog.Infof("tikv scale in: set pvc %s/%s annotation: %s to %s", + klog.Infof("tikv scale in: set pvc %s/%s annotation: %s to %s", ns, pvcName, label.AnnPVCDeferDeleting, now) setReplicasAndDeleteSlots(newSet, replicas, deleteSlots) @@ -178,11 +178,11 @@ func (tsd *tikvScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSe pvc.Annotations[label.AnnPVCDeferDeleting] = now _, err = tsd.pvcControl.UpdatePVC(tc, pvc) if err != nil { - glog.Errorf("pod %s not ready, tikv scale in: failed to set pvc %s/%s annotation: %s to %s", + klog.Errorf("pod %s not ready, tikv scale in: failed to set pvc %s/%s annotation: %s to %s", podName, ns, pvcName, label.AnnPVCDeferDeleting, now) return err } - glog.Infof("pod %s not ready, tikv scale in: set pvc %s/%s annotation: %s to %s", + klog.Infof("pod %s not ready, tikv scale in: set pvc %s/%s annotation: %s to %s", podName, ns, pvcName, label.AnnPVCDeferDeleting, now) setReplicasAndDeleteSlots(newSet, replicas, deleteSlots) return nil diff --git a/pkg/manager/member/tikv_upgrader.go b/pkg/manager/member/tikv_upgrader.go index 7b0e0d9303..8283e09209 100644 --- a/pkg/manager/member/tikv_upgrader.go +++ b/pkg/manager/member/tikv_upgrader.go @@ -25,7 +25,7 @@ import ( apps "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" corelisters "k8s.io/client-go/listers/core/v1" - glog "k8s.io/klog" + "k8s.io/klog" ) const ( @@ -83,7 +83,7 @@ func (tku *tikvUpgrader) Upgrade(tc *v1alpha1.TidbCluster, oldSet *apps.Stateful // If we encounter this situation, we will let the native statefulset controller do the upgrade completely, which may be unsafe for upgrading tikv. // Therefore, in the production environment, we should try to avoid modifying the tikv statefulset update strategy directly. newSet.Spec.UpdateStrategy = oldSet.Spec.UpdateStrategy - glog.Warningf("tidbcluster: [%s/%s] tikv statefulset %s UpdateStrategy has been modified manually", ns, tcName, oldSet.GetName()) + klog.Warningf("tidbcluster: [%s/%s] tikv statefulset %s UpdateStrategy has been modified manually", ns, tcName, oldSet.GetName()) return nil } @@ -170,7 +170,7 @@ func (tku *tikvUpgrader) readyToUpgrade(upgradePod *corev1.Pod, store v1alpha1.T if evictLeaderBeginTimeStr, evicting := upgradePod.Annotations[EvictLeaderBeginTime]; evicting { evictLeaderBeginTime, err := time.Parse(time.RFC3339, evictLeaderBeginTimeStr) if err != nil { - glog.Errorf("parse annotation:[%s] to time failed.", EvictLeaderBeginTime) + klog.Errorf("parse annotation:[%s] to time failed.", EvictLeaderBeginTime) return false } if time.Now().After(evictLeaderBeginTime.Add(EvictLeaderTimeout)) { @@ -185,11 +185,11 @@ func (tku *tikvUpgrader) beginEvictLeader(tc *v1alpha1.TidbCluster, storeID uint podName := pod.GetName() err := controller.GetPDClient(tku.pdControl, tc).BeginEvictLeader(storeID) if err != nil { - glog.Errorf("tikv upgrader: failed to begin evict leader: %d, %s/%s, %v", + klog.Errorf("tikv upgrader: failed to begin evict leader: %d, %s/%s, %v", storeID, ns, podName, err) return err } - glog.Infof("tikv upgrader: begin evict leader: %d, %s/%s successfully", storeID, ns, podName) + klog.Infof("tikv upgrader: begin evict leader: %d, %s/%s successfully", storeID, ns, podName) if pod.Annotations == nil { pod.Annotations = map[string]string{} } @@ -197,11 +197,11 @@ func (tku *tikvUpgrader) beginEvictLeader(tc *v1alpha1.TidbCluster, storeID uint pod.Annotations[EvictLeaderBeginTime] = now _, err = tku.podControl.UpdatePod(tc, pod) if err != nil { - glog.Errorf("tikv upgrader: failed to set pod %s/%s annotation %s to %s, %v", + klog.Errorf("tikv upgrader: failed to set pod %s/%s annotation %s to %s, %v", ns, podName, EvictLeaderBeginTime, now, err) return err } - glog.Infof("tikv upgrader: set pod %s/%s annotation %s to %s successfully", + klog.Infof("tikv upgrader: set pod %s/%s annotation %s to %s successfully", ns, podName, EvictLeaderBeginTime, now) return nil } @@ -219,10 +219,10 @@ func (tku *tikvUpgrader) endEvictLeader(tc *v1alpha1.TidbCluster, ordinal int32) err = tku.pdControl.GetPDClient(pdapi.Namespace(tc.GetNamespace()), tc.GetName(), tc.IsTLSClusterEnabled()).EndEvictLeader(storeID) if err != nil { - glog.Errorf("tikv upgrader: failed to end evict leader storeID: %d ordinal: %d, %v", storeID, ordinal, err) + klog.Errorf("tikv upgrader: failed to end evict leader storeID: %d ordinal: %d, %v", storeID, ordinal, err) return err } - glog.Infof("tikv upgrader: end evict leader storeID: %d ordinal: %d successfully", storeID, ordinal) + klog.Infof("tikv upgrader: end evict leader storeID: %d ordinal: %d successfully", storeID, ordinal) return nil } diff --git a/pkg/manager/member/utils.go b/pkg/manager/member/utils.go index ee251127b6..97492ecc07 100644 --- a/pkg/manager/member/utils.go +++ b/pkg/manager/member/utils.go @@ -28,7 +28,7 @@ import ( corev1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - glog "k8s.io/klog" + "k8s.io/klog" ) const ( @@ -114,7 +114,7 @@ func statefulSetEqual(new apps.StatefulSet, old apps.StatefulSet) bool { if lastAppliedConfig, ok := old.Annotations[LastAppliedConfigAnnotation]; ok { err := json.Unmarshal([]byte(lastAppliedConfig), &oldConfig) if err != nil { - glog.Errorf("unmarshal Statefulset: [%s/%s]'s applied config failed,error: %v", old.GetNamespace(), old.GetName(), err) + klog.Errorf("unmarshal Statefulset: [%s/%s]'s applied config failed,error: %v", old.GetNamespace(), old.GetName(), err) return false } return apiequality.Semantic.DeepEqual(oldConfig.Replicas, new.Spec.Replicas) && @@ -131,7 +131,7 @@ func templateEqual(new *apps.StatefulSet, old *apps.StatefulSet) bool { if ok { err := json.Unmarshal([]byte(lastAppliedConfig), &oldStsSpec) if err != nil { - glog.Errorf("unmarshal PodTemplate: [%s/%s]'s applied config failed,error: %v", old.GetNamespace(), old.GetName(), err) + klog.Errorf("unmarshal PodTemplate: [%s/%s]'s applied config failed,error: %v", old.GetNamespace(), old.GetName(), err) return false } return apiequality.Semantic.DeepEqual(oldStsSpec.Template.Spec, new.Spec.Template.Spec) @@ -142,7 +142,7 @@ func templateEqual(new *apps.StatefulSet, old *apps.StatefulSet) bool { // setUpgradePartition set statefulSet's rolling update partition func setUpgradePartition(set *apps.StatefulSet, upgradeOrdinal int32) { set.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateStatefulSetStrategy{Partition: &upgradeOrdinal} - glog.Infof("set %s/%s partition to %d", set.GetNamespace(), set.GetName(), upgradeOrdinal) + klog.Infof("set %s/%s partition to %d", set.GetNamespace(), set.GetName(), upgradeOrdinal) } func imagePullFailed(pod *corev1.Pod) bool { diff --git a/pkg/pdapi/pdapi.go b/pkg/pdapi/pdapi.go index 348b978f90..ce380ab3e4 100644 --- a/pkg/pdapi/pdapi.go +++ b/pkg/pdapi/pdapi.go @@ -26,7 +26,7 @@ import ( "time" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" - glog "k8s.io/klog" + "k8s.io/klog" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" @@ -118,7 +118,7 @@ func (pdc *defaultPDControl) GetPDClient(namespace Namespace, tcName string, tls if tlsEnabled { tlsConfig, err = GetTLSConfig(pdc.kubeCli, namespace, tcName, nil) if err != nil { - glog.Errorf("Unable to get tls config for tidb cluster %q, pd client may not work: %v", tcName, err) + klog.Errorf("Unable to get tls config for tidb cluster %q, pd client may not work: %v", tcName, err) return &pdClient{url: PdClientURL(namespace, tcName, scheme), httpClient: &http.Client{Timeout: DefaultTimeout}} } } @@ -541,10 +541,10 @@ func (pc *pdClient) EndEvictLeader(storeID uint64) error { return nil } if res.StatusCode == http.StatusOK { - glog.Infof("call DELETE method: %s success", apiURL) + klog.Infof("call DELETE method: %s success", apiURL) } else { err2 := httputil.ReadErrorBody(res.Body) - glog.Errorf("call DELETE method: %s failed,statusCode: %v,error: %v", apiURL, res.StatusCode, err2) + klog.Errorf("call DELETE method: %s failed,statusCode: %v,error: %v", apiURL, res.StatusCode, err2) } // pd will return an error with the body contains "scheduler not found" if the scheduler is not found diff --git a/pkg/scheduler/predicates/ha.go b/pkg/scheduler/predicates/ha.go index 7d52c300f9..4954466453 100644 --- a/pkg/scheduler/predicates/ha.go +++ b/pkg/scheduler/predicates/ha.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/kubernetes" "k8s.io/client-go/util/retry" - glog "k8s.io/klog" + "k8s.io/klog" ) type ha struct { @@ -87,7 +87,7 @@ func (h *ha) Filter(instanceName string, pod *apiv1.Pod, nodes []apiv1.Node) ([] tcName := getTCNameFromPod(pod, component) if component != label.PDLabelVal && component != label.TiKVLabelVal { - glog.V(4).Infof("component %s is ignored in HA predicate", component) + klog.V(4).Infof("component %s is ignored in HA predicate", component) return nodes, nil } @@ -118,7 +118,7 @@ func (h *ha) Filter(instanceName string, pod *apiv1.Pod, nodes []apiv1.Node) ([] return nil, err } replicas := getReplicasFrom(tc, component) - glog.Infof("ha: tidbcluster %s/%s component %s replicas %d", ns, tcName, component, replicas) + klog.Infof("ha: tidbcluster %s/%s component %s replicas %d", ns, tcName, component, replicas) allNodes := make(sets.String) nodeMap := make(map[string][]string) @@ -137,7 +137,7 @@ func (h *ha) Filter(instanceName string, pod *apiv1.Pod, nodes []apiv1.Node) ([] nodeMap[nodeName] = append(nodeMap[nodeName], pName) } - glog.V(4).Infof("nodeMap: %+v", nodeMap) + klog.V(4).Infof("nodeMap: %+v", nodeMap) min := -1 minNodeNames := make([]string, 0) @@ -187,13 +187,13 @@ func (h *ha) Filter(instanceName string, pod *apiv1.Pod, nodes []apiv1.Node) ([] // tikv replicas less than 3 cannot achieve high availability if component == label.TiKVLabelVal && replicas < 3 { minNodeNames = append(minNodeNames, nodeName) - glog.Infof("replicas is %d, add node %s to minNodeNames", replicas, nodeName) + klog.Infof("replicas is %d, add node %s to minNodeNames", replicas, nodeName) continue } if podsCount+1 > maxPodsPerNode { // pods on this node exceeds the limit, skip - glog.Infof("node %s has %d instances of component %s, max allowed is %d, skipping", + klog.Infof("node %s has %d instances of component %s, max allowed is %d, skipping", nodeName, podsCount, component, maxPodsPerNode) continue } @@ -203,7 +203,7 @@ func (h *ha) Filter(instanceName string, pod *apiv1.Pod, nodes []apiv1.Node) ([] min = podsCount } if podsCount > min { - glog.Infof("node %s podsCount %d > min %d, skipping", nodeName, podsCount, min) + klog.Infof("node %s podsCount %d > min %d, skipping", nodeName, podsCount, min) continue } if podsCount < min { @@ -282,11 +282,11 @@ func (h *ha) realAcquireLock(pod *apiv1.Pod) (*apiv1.PersistentVolumeClaim, *api delete(schedulingPVC.Annotations, label.AnnPVCPodScheduling) err = h.updatePVCFn(schedulingPVC) if err != nil { - glog.Errorf("ha: failed to delete pvc %s/%s annotation %s, %v", + klog.Errorf("ha: failed to delete pvc %s/%s annotation %s, %v", ns, schedulingPVC.GetName(), label.AnnPVCPodScheduling, err) return schedulingPVC, currentPVC, err } - glog.Infof("ha: delete pvc %s/%s annotation %s successfully", + klog.Infof("ha: delete pvc %s/%s annotation %s successfully", ns, schedulingPVC.GetName(), label.AnnPVCPodScheduling) return schedulingPVC, currentPVC, h.setCurrentPodScheduling(currentPVC) } @@ -319,10 +319,10 @@ func (h *ha) realUpdatePVCFn(pvc *apiv1.PersistentVolumeClaim) error { return retry.RetryOnConflict(retry.DefaultBackoff, func() error { _, updateErr := h.kubeCli.CoreV1().PersistentVolumeClaims(ns).Update(pvc) if updateErr == nil { - glog.Infof("update PVC: [%s/%s] successfully, TidbCluster: %s", ns, pvcName, tcName) + klog.Infof("update PVC: [%s/%s] successfully, TidbCluster: %s", ns, pvcName, tcName) return nil } - glog.Errorf("failed to update PVC: [%s/%s], TidbCluster: %s, error: %v", ns, pvcName, tcName, updateErr) + klog.Errorf("failed to update PVC: [%s/%s], TidbCluster: %s, error: %v", ns, pvcName, tcName, updateErr) if updated, err := h.pvcGetFn(ns, pvcName); err == nil { // make a copy so we don't mutate the shared cache @@ -355,11 +355,11 @@ func (h *ha) setCurrentPodScheduling(pvc *apiv1.PersistentVolumeClaim) error { pvc.Annotations[label.AnnPVCPodScheduling] = now err := h.updatePVCFn(pvc) if err != nil { - glog.Errorf("ha: failed to set pvc %s/%s annotation %s to %s, %v", + klog.Errorf("ha: failed to set pvc %s/%s annotation %s to %s, %v", ns, pvcName, label.AnnPVCPodScheduling, now, err) return err } - glog.Infof("ha: set pvc %s/%s annotation %s to %s successfully", + klog.Infof("ha: set pvc %s/%s annotation %s to %s successfully", ns, pvcName, label.AnnPVCPodScheduling, now) return nil } diff --git a/pkg/scheduler/predicates/stable_scheduling.go b/pkg/scheduler/predicates/stable_scheduling.go index 1ab7bea2ac..0c1bdd0706 100644 --- a/pkg/scheduler/predicates/stable_scheduling.go +++ b/pkg/scheduler/predicates/stable_scheduling.go @@ -25,7 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/kubernetes" - glog "k8s.io/klog" + "k8s.io/klog" ) const ( @@ -93,10 +93,10 @@ func (p *stableScheduling) Filter(instanceName string, pod *apiv1.Pod, nodes []a nodeName := p.findPreviousNodeInTC(tc, pod) if nodeName != "" { - glog.V(2).Infof("found previous node %q for pod %q in TiDB cluster %q", nodeName, podName, tcName) + klog.V(2).Infof("found previous node %q for pod %q in TiDB cluster %q", nodeName, podName, tcName) for _, node := range nodes { if node.Name == nodeName { - glog.V(2).Infof("previous node %q for pod %q in TiDB cluster %q exists in candicates, filter out other nodes", nodeName, podName, tcName) + klog.V(2).Infof("previous node %q for pod %q in TiDB cluster %q exists in candicates, filter out other nodes", nodeName, podName, tcName) return []apiv1.Node{node}, nil } } @@ -104,7 +104,7 @@ func (p *stableScheduling) Filter(instanceName string, pod *apiv1.Pod, nodes []a } msg := fmt.Sprintf("no previous node exists for pod %q in TiDB cluster %s/%s", podName, ns, tcName) - glog.Warning(msg) + klog.Warning(msg) return nodes, errors.New(msg) } diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 6a03670057..6a5f35a33a 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -25,7 +25,7 @@ import ( kubescheme "k8s.io/client-go/kubernetes/scheme" eventv1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/record" - glog "k8s.io/klog" + "k8s.io/klog" schedulerapiv1 "k8s.io/kubernetes/pkg/scheduler/api/v1" ) @@ -53,7 +53,7 @@ type scheduler struct { // NewScheduler returns a Scheduler func NewScheduler(kubeCli kubernetes.Interface, cli versioned.Interface) Scheduler { eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&eventv1.EventSinkImpl{ Interface: eventv1.New(kubeCli.CoreV1().RESTClient()).Events("")}) recorder := eventBroadcaster.NewRecorder(kubescheme.Scheme, apiv1.EventSource{Component: "tidb-scheduler"}) @@ -93,7 +93,7 @@ func (s *scheduler) Filter(args *schedulerapiv1.ExtenderArgs) (*schedulerapiv1.E var instanceName string var exist bool if instanceName, exist = pod.Labels[label.InstanceLabelKey]; !exist { - glog.Warningf("can't find instanceName in pod labels: %s/%s", ns, podName) + klog.Warningf("can't find instanceName in pod labels: %s/%s", ns, podName) return &schedulerapiv1.ExtenderFilterResult{ Nodes: args.Nodes, }, nil @@ -101,7 +101,7 @@ func (s *scheduler) Filter(args *schedulerapiv1.ExtenderArgs) (*schedulerapiv1.E component, ok := pod.Labels[label.ComponentLabelKey] if !ok { - glog.Warningf("can't find component label in pod labels: %s/%s", ns, podName) + klog.Warningf("can't find component label in pod labels: %s/%s", ns, podName) return &schedulerapiv1.ExtenderFilterResult{ Nodes: args.Nodes, }, nil @@ -109,18 +109,18 @@ func (s *scheduler) Filter(args *schedulerapiv1.ExtenderArgs) (*schedulerapiv1.E predicatesByComponent, ok := s.predicates[component] if !ok { - glog.Warningf("no predicate for component %q, ignored", component) + klog.Warningf("no predicate for component %q, ignored", component) return &schedulerapiv1.ExtenderFilterResult{ Nodes: args.Nodes, }, nil } - glog.Infof("scheduling pod: %s/%s", ns, podName) + klog.Infof("scheduling pod: %s/%s", ns, podName) var err error for _, predicate := range predicatesByComponent { - glog.Infof("entering predicate: %s, nodes: %v", predicate.Name(), predicates.GetNodeNames(kubeNodes)) + klog.Infof("entering predicate: %s, nodes: %v", predicate.Name(), predicates.GetNodeNames(kubeNodes)) kubeNodes, err = predicate.Filter(instanceName, pod, kubeNodes) - glog.Infof("leaving predicate: %s, nodes: %v", predicate.Name(), predicates.GetNodeNames(kubeNodes)) + klog.Infof("leaving predicate: %s, nodes: %v", predicate.Name(), predicates.GetNodeNames(kubeNodes)) if err != nil { s.recorder.Event(pod, apiv1.EventTypeWarning, predicate.Name(), err.Error()) if len(kubeNodes) == 0 { diff --git a/pkg/scheduler/server/mux.go b/pkg/scheduler/server/mux.go index 9061ce54dc..85ab686f8e 100644 --- a/pkg/scheduler/server/mux.go +++ b/pkg/scheduler/server/mux.go @@ -22,7 +22,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" "github.com/pingcap/tidb-operator/pkg/scheduler" "k8s.io/client-go/kubernetes" - glog "k8s.io/klog" + "k8s.io/klog" schedulerapiv1 "k8s.io/kubernetes/pkg/scheduler/api/v1" ) @@ -58,8 +58,8 @@ func StartServer(kubeCli kubernetes.Interface, cli versioned.Interface, port int Writes(schedulerapiv1.HostPriorityList{})) restful.Add(ws) - glog.Infof("start scheduler extender server, listening on 0.0.0.0:%d", port) - glog.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", port), nil)) + klog.Infof("start scheduler extender server, listening on 0.0.0.0:%d", port) + klog.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", port), nil)) } func (svr *server) filterNode(req *restful.Request, resp *restful.Response) { @@ -104,8 +104,8 @@ func (svr *server) prioritizeNode(req *restful.Request, resp *restful.Response) } func errorResponse(resp *restful.Response, svcErr restful.ServiceError) { - glog.Error(svcErr.Message) + klog.Error(svcErr.Message) if writeErr := resp.WriteServiceError(svcErr.Code, svcErr); writeErr != nil { - glog.Errorf("unable to write error: %v", writeErr) + klog.Errorf("unable to write error: %v", writeErr) } } diff --git a/pkg/tkctl/cmd/list/list.go b/pkg/tkctl/cmd/list/list.go index d115b7ff6b..42fa1d2394 100644 --- a/pkg/tkctl/cmd/list/list.go +++ b/pkg/tkctl/cmd/list/list.go @@ -19,7 +19,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/tkctl/readable" "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" - glog "k8s.io/klog" + "k8s.io/klog" cmdutil "k8s.io/kubectl/pkg/cmd/util" kubeprinters "k8s.io/kubernetes/pkg/printers" ) @@ -129,7 +129,7 @@ func (o *ListOptions) Run(tkcContext *config.TkcContext, cmd *cobra.Command, arg for _, info := range infos { internalObj, err := v1alpha1.Scheme.ConvertToVersion(info.Object, v1alpha1.SchemeGroupVersion) if err != nil { - glog.V(1).Info(err) + klog.V(1).Info(err) printer.PrintObj(info.Object, w) } else { printer.PrintObj(internalObj, w) diff --git a/pkg/tkctl/config/config.go b/pkg/tkctl/config/config.go index c2079e927d..5a06e7bcee 100644 --- a/pkg/tkctl/config/config.go +++ b/pkg/tkctl/config/config.go @@ -25,7 +25,7 @@ import ( "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - glog "k8s.io/klog" + "k8s.io/klog" restclient "k8s.io/client-go/rest" ) @@ -98,11 +98,11 @@ func (c *TkcContext) ToTkcClientConfig() (*TkcClientConfig, error) { // try loading tidb cluster config tcConfigFile, err := tcConfigLocation() if err != nil { - glog.V(4).Info("Error getting tidb cluster config file location") + klog.V(4).Info("Error getting tidb cluster config file location") } else { tcConfig, err := LoadFile(tcConfigFile) if err != nil { - glog.V(4).Info("Error reading tidb cluster config file") + klog.V(4).Info("Error reading tidb cluster config file") c.TidbClusterConfig = &TidbClusterConfig{} } else { c.TidbClusterConfig = tcConfig diff --git a/pkg/util/crypto/certs.go b/pkg/util/crypto/certs.go index ba65ac5c91..318d9b3187 100644 --- a/pkg/util/crypto/certs.go +++ b/pkg/util/crypto/certs.go @@ -24,7 +24,7 @@ import ( "io/ioutil" "net" - glog "k8s.io/klog" + "k8s.io/klog" ) const ( @@ -97,11 +97,11 @@ func ReadCACerts() (*x509.CertPool, error) { // load k8s CA cert caCert, err := ioutil.ReadFile(k8sCAFile) if err != nil { - glog.Errorf("fail to read CA file %s, error: %v", k8sCAFile, err) + klog.Errorf("fail to read CA file %s, error: %v", k8sCAFile, err) return nil, err } if ok := rootCAs.AppendCertsFromPEM(caCert); !ok { - glog.Warningf("fail to append CA file to pool, using system CAs only") + klog.Warningf("fail to append CA file to pool, using system CAs only") } return rootCAs, nil } diff --git a/pkg/version/version.go b/pkg/version/version.go index 4b75fce148..9e431936d8 100644 --- a/pkg/version/version.go +++ b/pkg/version/version.go @@ -17,7 +17,7 @@ import ( "fmt" "runtime" - glog "k8s.io/klog" + "k8s.io/klog" ) var ( @@ -35,8 +35,8 @@ func PrintVersionInfo() { // LogVersionInfo print version info at startup func LogVersionInfo() { - glog.Infof("Welcome to TiDB Operator.") - glog.Infof("TiDB Operator Version: %#v", Get()) + klog.Infof("Welcome to TiDB Operator.") + klog.Infof("TiDB Operator Version: %#v", Get()) } // Get returns the overall codebase version. It's for detecting diff --git a/tests/actions.go b/tests/actions.go index 95639de4d9..c4d1bbe0a5 100644 --- a/tests/actions.go +++ b/tests/actions.go @@ -69,7 +69,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" - glog "k8s.io/klog" + "k8s.io/klog" aggregatorclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -457,12 +457,12 @@ func (oi *OperatorConfig) Enabled(feature string) bool { func (oa *operatorActions) runKubectlOrDie(args ...string) string { cmd := "kubectl" - glog.Infof("Running '%s %s'", cmd, strings.Join(args, " ")) + klog.Infof("Running '%s %s'", cmd, strings.Join(args, " ")) out, err := exec.Command(cmd, args...).CombinedOutput() if err != nil { - glog.Fatalf("Failed to run '%s %s'\nCombined output: %q\nError: %v", cmd, strings.Join(args, " "), string(out), err) + klog.Fatalf("Failed to run '%s %s'\nCombined output: %q\nError: %v", cmd, strings.Join(args, " "), string(out), err) } - glog.Infof("Combined output: %q", string(out)) + klog.Infof("Combined output: %q", string(out)) return string(out) } @@ -474,7 +474,7 @@ func (oa *operatorActions) CleanCRDOrDie() { func (oa *operatorActions) InstallCRDOrDie(info *OperatorConfig) { if info.Enabled(features.AdvancedStatefulSet) { if isSupported, err := utildiscovery.IsAPIGroupVersionSupported(oa.kubeCli.Discovery(), "apiextensions.k8s.io/v1"); err != nil { - glog.Fatal(err) + klog.Fatal(err) } else if isSupported { oa.runKubectlOrDie("apply", "-f", oa.manifestPath("e2e/advanced-statefulset-crd.v1.yaml")) } else { @@ -483,19 +483,19 @@ func (oa *operatorActions) InstallCRDOrDie(info *OperatorConfig) { } oa.runKubectlOrDie("apply", "-f", oa.manifestPath("e2e/crd.yaml")) oa.runKubectlOrDie("apply", "-f", oa.manifestPath("e2e/data-resource-crd.yaml")) - glog.Infof("Wait for all CRDs are established") + klog.Infof("Wait for all CRDs are established") e2eutil.WaitForCRDsEstablished(oa.apiExtCli, labels.Everything()) // workaround for https://github.com/kubernetes/kubernetes/issues/65517 - glog.Infof("force sync kubectl cache") + klog.Infof("force sync kubectl cache") cmdArgs := []string{"sh", "-c", "rm -rf ~/.kube/cache ~/.kube/http-cache"} _, err := exec.Command(cmdArgs[0], cmdArgs[1:]...).CombinedOutput() if err != nil { - glog.Fatalf("Failed to run '%s': %v", strings.Join(cmdArgs, " "), err) + klog.Fatalf("Failed to run '%s': %v", strings.Join(cmdArgs, " "), err) } } func (oa *operatorActions) DeployOperator(info *OperatorConfig) error { - glog.Infof("deploying tidb-operator %s", info.ReleaseName) + klog.Infof("deploying tidb-operator %s", info.ReleaseName) if info.Tag != "e2e" { if err := oa.cloneOperatorRepo(); err != nil { @@ -518,14 +518,14 @@ func (oa *operatorActions) DeployOperator(info *OperatorConfig) error { info.ReleaseName, info.Namespace, info.OperatorHelmSetString(nil)) - glog.Info(cmd) + klog.Info(cmd) res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() if err != nil { return fmt.Errorf("failed to deploy operator: %v, %s", err, string(res)) } - glog.Infof("Wait for all apiesrvices are available") + klog.Infof("Wait for all apiesrvices are available") return e2eutil.WaitForAPIServicesAvaiable(oa.aggrCli, labels.Everything()) } @@ -536,7 +536,7 @@ func (oa *operatorActions) DeployOperatorOrDie(info *OperatorConfig) { } func (oa *operatorActions) CleanOperator(info *OperatorConfig) error { - glog.Infof("cleaning tidb-operator %s", info.ReleaseName) + klog.Infof("cleaning tidb-operator %s", info.ReleaseName) res, err := exec.Command("helm", "del", "--purge", info.ReleaseName).CombinedOutput() @@ -554,7 +554,7 @@ func (oa *operatorActions) CleanOperatorOrDie(info *OperatorConfig) { } func (oa *operatorActions) UpgradeOperator(info *OperatorConfig) error { - glog.Infof("upgrading tidb-operator %s", info.ReleaseName) + klog.Infof("upgrading tidb-operator %s", info.ReleaseName) listOptions := metav1.ListOptions{ LabelSelector: labels.SelectorFromSet( @@ -580,7 +580,7 @@ func (oa *operatorActions) UpgradeOperator(info *OperatorConfig) error { return fmt.Errorf("failed to upgrade operator to: %s, %v, %s", info.Image, err, string(res)) } - glog.Infof("Wait for all apiesrvices are available") + klog.Infof("Wait for all apiesrvices are available") err = e2eutil.WaitForAPIServicesAvaiable(oa.aggrCli, labels.Everything()) if err != nil { return err @@ -594,7 +594,7 @@ func (oa *operatorActions) UpgradeOperator(info *OperatorConfig) error { waitFn := func() (done bool, err error) { pods2, err := oa.kubeCli.CoreV1().Pods(metav1.NamespaceAll).List(listOptions) if err != nil { - glog.Error(err) + klog.Error(err) return false, nil } @@ -625,18 +625,18 @@ func ensurePodsUnchanged(pods1, pods2 *corev1.PodList) error { return err } if reflect.DeepEqual(pods1UIDs, pods2UIDs) { - glog.V(4).Infof("%s", string(pods1Yaml)) - glog.V(4).Infof("%s", string(pods2Yaml)) - glog.V(4).Infof("%v", pods1UIDs) - glog.V(4).Infof("%v", pods2UIDs) - glog.V(4).Infof("pods unchanged after operator upgraded") + klog.V(4).Infof("%s", string(pods1Yaml)) + klog.V(4).Infof("%s", string(pods2Yaml)) + klog.V(4).Infof("%v", pods1UIDs) + klog.V(4).Infof("%v", pods2UIDs) + klog.V(4).Infof("pods unchanged after operator upgraded") return nil } - glog.Infof("%s", string(pods1Yaml)) - glog.Infof("%s", string(pods2Yaml)) - glog.Infof("%v", pods1UIDs) - glog.Infof("%v", pods2UIDs) + klog.Infof("%s", string(pods1Yaml)) + klog.Infof("%s", string(pods2Yaml)) + klog.Infof("%v", pods1UIDs) + klog.Infof("%v", pods2UIDs) return fmt.Errorf("some pods changed after operator upgraded") } @@ -665,7 +665,7 @@ func (oa *operatorActions) DeployTidbCluster(info *TidbClusterConfig) error { return nil } - glog.Infof("deploying tidb cluster [%s/%s]", info.Namespace, info.ClusterName) + klog.Infof("deploying tidb cluster [%s/%s]", info.Namespace, info.ClusterName) oa.EmitEvent(info, "DeployTidbCluster") namespace := &corev1.Namespace{ @@ -691,7 +691,7 @@ func (oa *operatorActions) DeployTidbCluster(info *TidbClusterConfig) error { return err } cmd = fmt.Sprintf(" %s --values %s", cmd, svFilePath) - glog.Info(cmd) + klog.Info(cmd) if res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput(); err != nil { return fmt.Errorf("failed to deploy tidbcluster: %s/%s, %v, %s", @@ -708,7 +708,7 @@ func (oa *operatorActions) DeployTidbClusterOrDie(info *TidbClusterConfig) { } func (oa *operatorActions) CleanTidbCluster(info *TidbClusterConfig) error { - glog.Infof("cleaning tidbcluster %s/%s", info.Namespace, info.ClusterName) + klog.Infof("cleaning tidbcluster %s/%s", info.Namespace, info.ClusterName) oa.EmitEvent(info, "CleanTidbCluster") ns := info.Namespace tcName := info.ClusterName @@ -738,7 +738,7 @@ func (oa *operatorActions) CleanTidbCluster(info *TidbClusterConfig) error { for _, pvc := range pvcList.Items { beforePVCNames = append(beforePVCNames, pvc.GetName()) } - glog.V(4).Info(beforePVCNames) + klog.V(4).Info(beforePVCNames) pvList, err := oa.kubeCli.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { @@ -747,10 +747,10 @@ func (oa *operatorActions) CleanTidbCluster(info *TidbClusterConfig) error { var beforePVNames []string for _, pv := range pvList.Items { beforePVNames = append(beforePVNames, pv.GetName()) - glog.V(4).Infof("%s, %s, %v", pv.Name, pv.Spec.PersistentVolumeReclaimPolicy, pv.Labels) - glog.V(4).Info(pv.Spec.ClaimRef) + klog.V(4).Infof("%s, %s, %v", pv.Name, pv.Spec.PersistentVolumeReclaimPolicy, pv.Labels) + klog.V(4).Info(pv.Spec.ClaimRef) } - glog.V(4).Info(beforePVNames) + klog.V(4).Info(beforePVNames) charts := []string{ info.ClusterName, @@ -779,7 +779,7 @@ func (oa *operatorActions) CleanTidbCluster(info *TidbClusterConfig) error { for _, pvc := range pvcList.Items { afterPVCNames = append(afterPVCNames, pvc.GetName()) } - glog.V(4).Info(afterPVCNames) + klog.V(4).Info(afterPVCNames) if !reflect.DeepEqual(beforePVCNames, afterPVCNames) { return fmt.Errorf("pvc changed when we delete cluster: %s/%s, before: %v, after: %v", ns, tcName, beforePVCNames, afterPVCNames) @@ -794,10 +794,10 @@ func (oa *operatorActions) CleanTidbCluster(info *TidbClusterConfig) error { for _, pv := range pvList.Items { afterPVNames = append(afterPVNames, pv.GetName()) } - glog.V(4).Info(afterPVNames) + klog.V(4).Info(afterPVNames) if !reflect.DeepEqual(beforePVNames, afterPVNames) { - glog.Errorf("pv changed when we delete cluster: %s/%s, before: %v, after: %v", + klog.Errorf("pv changed when we delete cluster: %s/%s, before: %v, after: %v", ns, tcName, beforePVNames, afterPVNames) return false, nil } @@ -859,7 +859,7 @@ func (oa *operatorActions) CleanTidbCluster(info *TidbClusterConfig) error { label.ManagedByLabelKey, "tidb-operator", label.NamespaceLabelKey, info.Namespace, label.InstanceLabelKey, info.ClusterName) - glog.V(4).Info(patchPVCmd) + klog.V(4).Info(patchPVCmd) if res, err := exec.Command("/bin/sh", "-c", patchPVCmd).CombinedOutput(); err != nil { return fmt.Errorf("failed to patch pv: %v, %s", err, string(res)) } @@ -867,18 +867,18 @@ func (oa *operatorActions) CleanTidbCluster(info *TidbClusterConfig) error { pollFn := func() (bool, error) { if res, err := exec.Command("kubectl", "get", "po", "--output=name", "-n", info.Namespace, "-l", setStr). CombinedOutput(); err != nil || len(res) != 0 { - glog.V(4).Infof("waiting for tidbcluster: %s/%s pods deleting, %v, [%s]", + klog.V(4).Infof("waiting for tidbcluster: %s/%s pods deleting, %v, [%s]", info.Namespace, info.ClusterName, err, string(res)) return false, nil } pvCmd := fmt.Sprintf("kubectl get pv | grep %s | grep %s 2>/dev/null|grep Released", info.Namespace, info.ClusterName) - glog.V(4).Info(pvCmd) + klog.V(4).Info(pvCmd) if res, err := exec.Command("/bin/sh", "-c", pvCmd).CombinedOutput(); len(res) == 0 { return true, nil } else if err != nil { - glog.V(4).Infof("waiting for tidbcluster: %s/%s pv deleting, %v, %s", + klog.V(4).Infof("waiting for tidbcluster: %s/%s pv deleting, %v, %s", info.Namespace, info.ClusterName, err, string(res)) return false, nil } @@ -903,7 +903,7 @@ func (oa *operatorActions) GetTidbMemberAssignedNodes(info *TidbClusterConfig) ( } podList, err := oa.kubeCli.CoreV1().Pods(ns).List(listOptions) if err != nil { - glog.Errorf("failed to get tidb pods: %s/%s, %v", ns, tcName, err) + klog.Errorf("failed to get tidb pods: %s/%s, %v", ns, tcName, err) return nil, err } for _, pod := range podList.Items { @@ -921,7 +921,7 @@ func (oa *operatorActions) GetTidbMemberAssignedNodesOrDie(info *TidbClusterConf } func (oa *operatorActions) CheckTidbMemberAssignedNodes(info *TidbClusterConfig, oldAssignedNodes map[string]string) error { - glog.Infof("checking tidb member [%s/%s] assigned nodes", info.Namespace, info.ClusterName) + klog.Infof("checking tidb member [%s/%s] assigned nodes", info.Namespace, info.ClusterName) assignedNodes, err := oa.GetTidbMemberAssignedNodes(info) if err != nil { return err @@ -942,7 +942,7 @@ func (oa *operatorActions) CheckTidbMemberAssignedNodesOrDie(info *TidbClusterCo } func (oa *operatorActions) CheckTidbClusterStatus(info *TidbClusterConfig) error { - glog.Infof("checking tidb cluster [%s/%s] status", info.Namespace, info.ClusterName) + klog.Infof("checking tidb cluster [%s/%s] status", info.Namespace, info.ClusterName) ns := info.Namespace tcName := info.ClusterName @@ -950,7 +950,7 @@ func (oa *operatorActions) CheckTidbClusterStatus(info *TidbClusterConfig) error var tc *v1alpha1.TidbCluster var err error if tc, err = oa.cli.PingcapV1alpha1().TidbClusters(ns).Get(tcName, metav1.GetOptions{}); err != nil { - glog.Errorf("failed to get tidbcluster: %s/%s, %v", ns, tcName, err) + klog.Errorf("failed to get tidbcluster: %s/%s, %v", ns, tcName, err) return false, nil } @@ -961,69 +961,69 @@ func (oa *operatorActions) CheckTidbClusterStatus(info *TidbClusterConfig) error return false, nil } - glog.V(4).Infof("check tidb cluster begin tidbMembersReadyFn") + klog.V(4).Infof("check tidb cluster begin tidbMembersReadyFn") if b, err := oa.tidbMembersReadyFn(tc); !b && err == nil { return false, nil } - glog.V(4).Infof("check tidb cluster begin reclaimPolicySyncFn") + klog.V(4).Infof("check tidb cluster begin reclaimPolicySyncFn") if b, err := oa.reclaimPolicySyncFn(tc); !b && err == nil { return false, nil } - glog.V(4).Infof("check tidb cluster begin metaSyncFn") + klog.V(4).Infof("check tidb cluster begin metaSyncFn") if b, err := oa.metaSyncFn(tc); !b && err == nil { return false, nil } else if err != nil { - glog.Error(err) + klog.Error(err) return false, nil } - glog.V(4).Infof("check tidb cluster begin schedulerHAFn") + klog.V(4).Infof("check tidb cluster begin schedulerHAFn") if b, err := oa.schedulerHAFn(tc); !b && err == nil { return false, nil } - glog.V(4).Infof("check all pd and tikv instances have not pod scheduling annotation") + klog.V(4).Infof("check all pd and tikv instances have not pod scheduling annotation") if info.OperatorTag != "v1.0.0" { if b, err := oa.podsScheduleAnnHaveDeleted(tc); !b && err == nil { return false, nil } } - glog.V(4).Infof("check store labels") + klog.V(4).Infof("check store labels") if b, err := oa.storeLabelsIsSet(tc, info.TopologyKey); !b && err == nil { return false, nil } else if err != nil { return false, err } - glog.V(4).Infof("check tidb cluster begin passwordIsSet") + klog.V(4).Infof("check tidb cluster begin passwordIsSet") if b, err := oa.passwordIsSet(info); !b && err == nil { return false, nil } if info.Monitor { - glog.V(4).Infof("check tidb monitor normal") + klog.V(4).Infof("check tidb monitor normal") if b, err := oa.monitorNormal(info); !b && err == nil { return false, nil } } if info.EnableConfigMapRollout { - glog.V(4).Info("check tidb cluster configuration synced") + klog.V(4).Info("check tidb cluster configuration synced") if b, err := oa.checkTidbClusterConfigUpdated(tc, info); !b && err == nil { return false, nil } } if info.EnablePVReclaim { - glog.V(4).Infof("check reclaim pvs success when scale in pd or tikv") + klog.V(4).Infof("check reclaim pvs success when scale in pd or tikv") if b, err := oa.checkReclaimPVSuccess(tc); !b && err == nil { return false, nil } } return true, nil }); err != nil { - glog.Errorf("check tidb cluster status failed: %s", err.Error()) + klog.Errorf("check tidb cluster status failed: %s", err.Error()) return fmt.Errorf("failed to waiting for tidbcluster %s/%s ready in 120 minutes", ns, tcName) } @@ -1143,7 +1143,7 @@ func (oa *operatorActions) ScaleTidbCluster(info *TidbClusterConfig) error { if err != nil { return err } - glog.Info("[SCALE] " + cmd) + klog.Info("[SCALE] " + cmd) res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() if err != nil { return pingcapErrors.Wrapf(err, "failed to scale tidb cluster: %s", string(res)) @@ -1161,32 +1161,32 @@ func (oa *operatorActions) CheckScaleInSafely(info *TidbClusterConfig) error { return wait.Poll(oa.pollInterval, DefaultPollTimeout, func() (done bool, err error) { tc, err := oa.cli.PingcapV1alpha1().TidbClusters(info.Namespace).Get(info.ClusterName, metav1.GetOptions{}) if err != nil { - glog.Infof("failed to get tidbcluster when scale in tidbcluster, error: %v", err) + klog.Infof("failed to get tidbcluster when scale in tidbcluster, error: %v", err) return false, nil } tikvSetName := controller.TiKVMemberName(info.ClusterName) tikvSet, err := oa.tcStsGetter.StatefulSets(info.Namespace).Get(tikvSetName, metav1.GetOptions{}) if err != nil { - glog.Infof("failed to get tikvSet statefulset: [%s], error: %v", tikvSetName, err) + klog.Infof("failed to get tikvSet statefulset: [%s], error: %v", tikvSetName, err) return false, nil } pdClient, cancel, err := oa.getPDClient(tc) if err != nil { - glog.Errorf("Failed to create external PD client for tidb cluster %q: %v", tc.GetName(), err) + klog.Errorf("Failed to create external PD client for tidb cluster %q: %v", tc.GetName(), err) return false, nil } defer cancel() stores, err := pdClient.GetStores() if err != nil { - glog.Infof("pdClient.GetStores failed,error: %v", err) + klog.Infof("pdClient.GetStores failed,error: %v", err) return false, nil } if len(stores.Stores) > int(*tikvSet.Spec.Replicas) { - glog.Infof("stores.Stores: %v", stores.Stores) - glog.Infof("tikvSet.Spec.Replicas: %d", *tikvSet.Spec.Replicas) + klog.Infof("stores.Stores: %v", stores.Stores) + klog.Infof("tikvSet.Spec.Replicas: %d", *tikvSet.Spec.Replicas) return false, fmt.Errorf("the tikvSet.Spec.Replicas may reduce before tikv complete offline") } @@ -1202,7 +1202,7 @@ func (oa *operatorActions) CheckScaledCorrectly(info *TidbClusterConfig, podUIDs return wait.Poll(oa.pollInterval, DefaultPollTimeout, func() (done bool, err error) { podUIDs, err := oa.GetPodUIDMap(info) if err != nil { - glog.Infof("failed to get pd pods's uid, error: %v", err) + klog.Infof("failed to get pd pods's uid, error: %v", err) return false, nil } @@ -1224,7 +1224,7 @@ func (oa *operatorActions) setPartitionAnnotation(namespace, tcName, component s // add annotation to pause statefulset upgrade process cmd := fmt.Sprintf("kubectl annotate tc %s -n %s tidb.pingcap.com/%s-partition=%d --overwrite", tcName, namespace, component, ordinal) - glog.Infof("%s", cmd) + klog.Infof("%s", cmd) output, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() if err != nil { return fmt.Errorf("fail to set annotation for [%s/%s], component: %s, partition: %d, err: %v, output: %s", namespace, tcName, component, ordinal, err, string(output)) @@ -1239,7 +1239,7 @@ func (oa *operatorActions) UpgradeTidbCluster(info *TidbClusterConfig) error { if err != nil { return err } - glog.Info("[UPGRADE] " + cmd) + klog.Info("[UPGRADE] " + cmd) res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() if err != nil { return pingcapErrors.Wrapf(err, "failed to upgrade tidb cluster: %s", string(res)) @@ -1290,21 +1290,21 @@ func (oa *operatorActions) CheckUpgrade(ctx context.Context, info *TidbClusterCo scheduler := fmt.Sprintf("evict-leader-scheduler-%s", findStoreFn(tc, podName)) pdClient, cancel, err := oa.getPDClient(tc) if err != nil { - glog.Errorf("Failed to create external PD client for tidb cluster %q: %v", tc.GetName(), err) + klog.Errorf("Failed to create external PD client for tidb cluster %q: %v", tc.GetName(), err) return false, nil } defer cancel() schedulers, err := pdClient.GetEvictLeaderSchedulers() if err != nil { - glog.Errorf("failed to get evict leader schedulers, %v", err) + klog.Errorf("failed to get evict leader schedulers, %v", err) return false, nil } - glog.V(4).Infof("index:%d,schedulers:%v,error:%v", i, schedulers, err) + klog.V(4).Infof("index:%d,schedulers:%v,error:%v", i, schedulers, err) if len(schedulers) > 1 { - glog.Errorf("there are too many evict leader schedulers: %v", schedulers) + klog.Errorf("there are too many evict leader schedulers: %v", schedulers) for _, s := range schedulers { if s == scheduler { - glog.Infof("found scheudler: %s", scheduler) + klog.Infof("found scheudler: %s", scheduler) return true, nil } } @@ -1314,14 +1314,14 @@ func (oa *operatorActions) CheckUpgrade(ctx context.Context, info *TidbClusterCo return false, nil } if schedulers[0] == scheduler { - glog.Infof("index: %d,the schedulers: %s = %s", i, schedulers[0], scheduler) + klog.Infof("index: %d,the schedulers: %s = %s", i, schedulers[0], scheduler) return true, nil } - glog.Errorf("index: %d,the scheduler: %s != %s", i, schedulers[0], scheduler) + klog.Errorf("index: %d,the scheduler: %s != %s", i, schedulers[0], scheduler) return false, nil }) if err != nil { - glog.Errorf("failed to check upgrade %s/%s, %v", ns, tcName, err) + klog.Errorf("failed to check upgrade %s/%s, %v", ns, tcName, err) return err } } @@ -1345,19 +1345,19 @@ func (oa *operatorActions) CheckUpgrade(ctx context.Context, info *TidbClusterCo return wait.PollImmediate(1*time.Second, 6*time.Minute, func() (done bool, err error) { pdClient, cancel, err := oa.getPDClient(tc) if err != nil { - glog.Errorf("Failed to create external PD client for tidb cluster %q: %v", tc.GetName(), err) + klog.Errorf("Failed to create external PD client for tidb cluster %q: %v", tc.GetName(), err) return false, nil } defer cancel() schedulers, err := pdClient.GetEvictLeaderSchedulers() if err != nil { - glog.Errorf("failed to get evict leader schedulers, %v", err) + klog.Errorf("failed to get evict leader schedulers, %v", err) return false, nil } if len(schedulers) == 0 { return true, nil } - glog.Errorf("schedulers: %v is not empty", schedulers) + klog.Errorf("schedulers: %v is not empty", schedulers) return false, nil }) } @@ -1375,7 +1375,7 @@ func (oa *operatorActions) CleanMonitor(info *TidbClusterConfig) error { return func getMemberContainer(kubeCli kubernetes.Interface, stsGetter typedappsv1.StatefulSetsGetter, namespace, tcName, component string) (*corev1.Container, bool) { sts, err := stsGetter.StatefulSets(namespace).Get(fmt.Sprintf("%s-%s", tcName, component), metav1.GetOptions{}) if err != nil { - glog.Errorf("failed to get sts for component %s of cluster %s/%s", component, namespace, tcName) + klog.Errorf("failed to get sts for component %s of cluster %s/%s", component, namespace, tcName) return nil, false } listOption := metav1.ListOptions{ @@ -1383,16 +1383,16 @@ func getMemberContainer(kubeCli kubernetes.Interface, stsGetter typedappsv1.Stat } podList, err := kubeCli.CoreV1().Pods(namespace).List(listOption) if err != nil { - glog.Errorf("fail to get pods for component %s of cluster %s/%s", component, namespace, tcName) + klog.Errorf("fail to get pods for component %s of cluster %s/%s", component, namespace, tcName) return nil, false } if len(podList.Items) == 0 { - glog.Errorf("no pods found for component %s of cluster %s/%s", component, namespace, tcName) + klog.Errorf("no pods found for component %s of cluster %s/%s", component, namespace, tcName) return nil, false } pod := podList.Items[0] if len(pod.Spec.Containers) == 0 { - glog.Errorf("no containers found for component %s of cluster %s/%s", component, namespace, tcName) + klog.Errorf("no containers found for component %s of cluster %s/%s", component, namespace, tcName) return nil, false } @@ -1413,7 +1413,7 @@ func (oa *operatorActions) pdMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, err pdSet, err := oa.tcStsGetter.StatefulSets(ns).Get(pdSetName, metav1.GetOptions{}) if err != nil { - glog.Errorf("failed to get statefulset: %s/%s, %v", ns, pdSetName, err) + klog.Errorf("failed to get statefulset: %s/%s, %v", ns, pdSetName, err) return false, nil } @@ -1422,48 +1422,48 @@ func (oa *operatorActions) pdMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, err } if tc.Status.PD.StatefulSet == nil { - glog.Infof("tidbcluster: %s/%s .status.PD.StatefulSet is nil", ns, tcName) + klog.Infof("tidbcluster: %s/%s .status.PD.StatefulSet is nil", ns, tcName) return false, nil } failureCount := len(tc.Status.PD.FailureMembers) replicas := tc.Spec.PD.Replicas + int32(failureCount) if *pdSet.Spec.Replicas != replicas { - glog.Infof("statefulset: %s/%s .spec.Replicas(%d) != %d", + klog.Infof("statefulset: %s/%s .spec.Replicas(%d) != %d", ns, pdSetName, *pdSet.Spec.Replicas, replicas) return false, nil } if pdSet.Status.ReadyReplicas != tc.Spec.PD.Replicas { - glog.Infof("statefulset: %s/%s .status.ReadyReplicas(%d) != %d", + klog.Infof("statefulset: %s/%s .status.ReadyReplicas(%d) != %d", ns, pdSetName, pdSet.Status.ReadyReplicas, tc.Spec.PD.Replicas) return false, nil } if len(tc.Status.PD.Members) != int(tc.Spec.PD.Replicas) { - glog.Infof("tidbcluster: %s/%s .status.PD.Members count(%d) != %d", + klog.Infof("tidbcluster: %s/%s .status.PD.Members count(%d) != %d", ns, tcName, len(tc.Status.PD.Members), tc.Spec.PD.Replicas) return false, nil } if pdSet.Status.ReadyReplicas != pdSet.Status.Replicas { - glog.Infof("statefulset: %s/%s .status.ReadyReplicas(%d) != .status.Replicas(%d)", + klog.Infof("statefulset: %s/%s .status.ReadyReplicas(%d) != .status.Replicas(%d)", ns, pdSetName, pdSet.Status.ReadyReplicas, pdSet.Status.Replicas) return false, nil } c, found := getMemberContainer(oa.kubeCli, oa.tcStsGetter, ns, tc.Name, label.PDLabelVal) if !found { - glog.Infof("statefulset: %s/%s not found containers[name=pd] or pod %s-0", + klog.Infof("statefulset: %s/%s not found containers[name=pd] or pod %s-0", ns, pdSetName, pdSetName) return false, nil } if tc.PDImage() != c.Image { - glog.Infof("statefulset: %s/%s .spec.template.spec.containers[name=pd].image(%s) != %s", + klog.Infof("statefulset: %s/%s .spec.template.spec.containers[name=pd].image(%s) != %s", ns, pdSetName, c.Image, tc.PDImage()) return false, nil } for _, member := range tc.Status.PD.Members { if !member.Health { - glog.Infof("tidbcluster: %s/%s pd member(%s/%s) is not health", + klog.Infof("tidbcluster: %s/%s pd member(%s/%s) is not health", ns, tcName, member.ID, member.Name) return false, nil } @@ -1472,11 +1472,11 @@ func (oa *operatorActions) pdMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, err pdServiceName := controller.PDMemberName(tcName) pdPeerServiceName := controller.PDPeerMemberName(tcName) if _, err := oa.kubeCli.CoreV1().Services(ns).Get(pdServiceName, metav1.GetOptions{}); err != nil { - glog.Errorf("failed to get service: %s/%s", ns, pdServiceName) + klog.Errorf("failed to get service: %s/%s", ns, pdServiceName) return false, nil } if _, err := oa.kubeCli.CoreV1().Services(ns).Get(pdPeerServiceName, metav1.GetOptions{}); err != nil { - glog.Errorf("failed to get peer service: %s/%s", ns, pdPeerServiceName) + klog.Errorf("failed to get peer service: %s/%s", ns, pdPeerServiceName) return false, nil } @@ -1490,7 +1490,7 @@ func (oa *operatorActions) tikvMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, e tikvSet, err := oa.tcStsGetter.StatefulSets(ns).Get(tikvSetName, metav1.GetOptions{}) if err != nil { - glog.Errorf("failed to get statefulset: %s/%s, %v", ns, tikvSetName, err) + klog.Errorf("failed to get statefulset: %s/%s, %v", ns, tikvSetName, err) return false, nil } @@ -1499,55 +1499,55 @@ func (oa *operatorActions) tikvMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, e } if tc.Status.TiKV.StatefulSet == nil { - glog.Infof("tidbcluster: %s/%s .status.TiKV.StatefulSet is nil", ns, tcName) + klog.Infof("tidbcluster: %s/%s .status.TiKV.StatefulSet is nil", ns, tcName) return false, nil } failureCount := len(tc.Status.TiKV.FailureStores) replicas := tc.Spec.TiKV.Replicas + int32(failureCount) if *tikvSet.Spec.Replicas != replicas { - glog.Infof("statefulset: %s/%s .spec.Replicas(%d) != %d", + klog.Infof("statefulset: %s/%s .spec.Replicas(%d) != %d", ns, tikvSetName, *tikvSet.Spec.Replicas, replicas) return false, nil } if tikvSet.Status.ReadyReplicas != replicas { - glog.Infof("statefulset: %s/%s .status.ReadyReplicas(%d) != %d", + klog.Infof("statefulset: %s/%s .status.ReadyReplicas(%d) != %d", ns, tikvSetName, tikvSet.Status.ReadyReplicas, replicas) return false, nil } if len(tc.Status.TiKV.Stores) != int(replicas) { - glog.Infof("tidbcluster: %s/%s .status.TiKV.Stores.count(%d) != %d", + klog.Infof("tidbcluster: %s/%s .status.TiKV.Stores.count(%d) != %d", ns, tcName, len(tc.Status.TiKV.Stores), replicas) return false, nil } if tikvSet.Status.ReadyReplicas != tikvSet.Status.Replicas { - glog.Infof("statefulset: %s/%s .status.ReadyReplicas(%d) != .status.Replicas(%d)", + klog.Infof("statefulset: %s/%s .status.ReadyReplicas(%d) != .status.Replicas(%d)", ns, tikvSetName, tikvSet.Status.ReadyReplicas, tikvSet.Status.Replicas) return false, nil } c, found := getMemberContainer(oa.kubeCli, oa.tcStsGetter, ns, tc.Name, label.TiKVLabelVal) if !found { - glog.Infof("statefulset: %s/%s not found containers[name=tikv] or pod %s-0", + klog.Infof("statefulset: %s/%s not found containers[name=tikv] or pod %s-0", ns, tikvSetName, tikvSetName) return false, nil } if tc.TiKVImage() != c.Image { - glog.Infof("statefulset: %s/%s .spec.template.spec.containers[name=tikv].image(%s) != %s", + klog.Infof("statefulset: %s/%s .spec.template.spec.containers[name=tikv].image(%s) != %s", ns, tikvSetName, c.Image, tc.TiKVImage()) return false, nil } for _, store := range tc.Status.TiKV.Stores { if store.State != v1alpha1.TiKVStateUp { - glog.Infof("tidbcluster: %s/%s's store(%s) state != %s", ns, tcName, store.ID, v1alpha1.TiKVStateUp) + klog.Infof("tidbcluster: %s/%s's store(%s) state != %s", ns, tcName, store.ID, v1alpha1.TiKVStateUp) return false, nil } } tikvPeerServiceName := controller.TiKVPeerMemberName(tcName) if _, err := oa.kubeCli.CoreV1().Services(ns).Get(tikvPeerServiceName, metav1.GetOptions{}); err != nil { - glog.Errorf("failed to get peer service: %s/%s", ns, tikvPeerServiceName) + klog.Errorf("failed to get peer service: %s/%s", ns, tikvPeerServiceName) return false, nil } @@ -1561,7 +1561,7 @@ func (oa *operatorActions) tidbMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, e tidbSet, err := oa.tcStsGetter.StatefulSets(ns).Get(tidbSetName, metav1.GetOptions{}) if err != nil { - glog.Errorf("failed to get statefulset: %s/%s, %v", ns, tidbSetName, err) + klog.Errorf("failed to get statefulset: %s/%s, %v", ns, tidbSetName, err) return false, nil } @@ -1570,53 +1570,53 @@ func (oa *operatorActions) tidbMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, e } if tc.Status.TiDB.StatefulSet == nil { - glog.Infof("tidbcluster: %s/%s .status.TiDB.StatefulSet is nil", ns, tcName) + klog.Infof("tidbcluster: %s/%s .status.TiDB.StatefulSet is nil", ns, tcName) return false, nil } failureCount := len(tc.Status.TiDB.FailureMembers) replicas := tc.Spec.TiDB.Replicas + int32(failureCount) if *tidbSet.Spec.Replicas != replicas { - glog.Infof("statefulset: %s/%s .spec.Replicas(%d) != %d", + klog.Infof("statefulset: %s/%s .spec.Replicas(%d) != %d", ns, tidbSetName, *tidbSet.Spec.Replicas, replicas) return false, nil } if tidbSet.Status.ReadyReplicas != tc.Spec.TiDB.Replicas { - glog.Infof("statefulset: %s/%s .status.ReadyReplicas(%d) != %d", + klog.Infof("statefulset: %s/%s .status.ReadyReplicas(%d) != %d", ns, tidbSetName, tidbSet.Status.ReadyReplicas, tc.Spec.TiDB.Replicas) return false, nil } if len(tc.Status.TiDB.Members) != int(tc.Spec.TiDB.Replicas) { - glog.Infof("tidbcluster: %s/%s .status.TiDB.Members count(%d) != %d", + klog.Infof("tidbcluster: %s/%s .status.TiDB.Members count(%d) != %d", ns, tcName, len(tc.Status.TiDB.Members), tc.Spec.TiDB.Replicas) return false, nil } if tidbSet.Status.ReadyReplicas != tidbSet.Status.Replicas { - glog.Infof("statefulset: %s/%s .status.ReadyReplicas(%d) != .status.Replicas(%d)", + klog.Infof("statefulset: %s/%s .status.ReadyReplicas(%d) != .status.Replicas(%d)", ns, tidbSetName, tidbSet.Status.ReadyReplicas, tidbSet.Status.Replicas) return false, nil } c, found := getMemberContainer(oa.kubeCli, oa.tcStsGetter, ns, tc.Name, label.TiDBLabelVal) if !found { - glog.Infof("statefulset: %s/%s not found containers[name=tidb] or pod %s-0", + klog.Infof("statefulset: %s/%s not found containers[name=tidb] or pod %s-0", ns, tidbSetName, tidbSetName) return false, nil } if tc.TiDBImage() != c.Image { - glog.Infof("statefulset: %s/%s .spec.template.spec.containers[name=tidb].image(%s) != %s", + klog.Infof("statefulset: %s/%s .spec.template.spec.containers[name=tidb].image(%s) != %s", ns, tidbSetName, c.Image, tc.TiDBImage()) return false, nil } _, err = oa.kubeCli.CoreV1().Services(ns).Get(tidbSetName, metav1.GetOptions{}) if err != nil { - glog.Errorf("failed to get service: %s/%s", ns, tidbSetName) + klog.Errorf("failed to get service: %s/%s", ns, tidbSetName) return false, nil } _, err = oa.kubeCli.CoreV1().Services(ns).Get(controller.TiDBPeerMemberName(tcName), metav1.GetOptions{}) if err != nil { - glog.Errorf("failed to get peer service: %s/%s", ns, controller.TiDBPeerMemberName(tcName)) + klog.Errorf("failed to get peer service: %s/%s", ns, controller.TiDBPeerMemberName(tcName)) return false, nil } @@ -1634,17 +1634,17 @@ func (oa *operatorActions) reclaimPolicySyncFn(tc *v1alpha1.TidbCluster) (bool, var pvcList *corev1.PersistentVolumeClaimList var err error if pvcList, err = oa.kubeCli.CoreV1().PersistentVolumeClaims(ns).List(listOptions); err != nil { - glog.Errorf("failed to list pvs for tidbcluster %s/%s, %v", ns, tcName, err) + klog.Errorf("failed to list pvs for tidbcluster %s/%s, %v", ns, tcName, err) return false, nil } for _, pvc := range pvcList.Items { pvName := pvc.Spec.VolumeName if pv, err := oa.kubeCli.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}); err != nil { - glog.Errorf("failed to get pv: %s, error: %v", pvName, err) + klog.Errorf("failed to get pv: %s, error: %v", pvName, err) return false, nil } else if pv.Spec.PersistentVolumeReclaimPolicy != tc.Spec.PVReclaimPolicy { - glog.Errorf("pv: %s's reclaimPolicy is not Retain", pvName) + klog.Errorf("pv: %s's reclaimPolicy is not Retain", pvName) return false, nil } } @@ -1658,13 +1658,13 @@ func (oa *operatorActions) metaSyncFn(tc *v1alpha1.TidbCluster) (bool, error) { pdClient, cancel, err := oa.getPDClient(tc) if err != nil { - glog.Errorf("Failed to create external PD client for tidb cluster %q: %v", tc.GetName(), err) + klog.Errorf("Failed to create external PD client for tidb cluster %q: %v", tc.GetName(), err) return false, nil } defer cancel() var cluster *metapb.Cluster if cluster, err = pdClient.GetCluster(); err != nil { - glog.Errorf("failed to get cluster from pdControl: %s/%s, error: %v", ns, tcName, err) + klog.Errorf("failed to get cluster from pdControl: %s/%s, error: %v", ns, tcName, err) return false, nil } @@ -1677,7 +1677,7 @@ func (oa *operatorActions) metaSyncFn(tc *v1alpha1.TidbCluster) (bool, error) { var podList *corev1.PodList if podList, err = oa.kubeCli.CoreV1().Pods(ns).List(listOptions); err != nil { - glog.Errorf("failed to list pods for tidbcluster %s/%s, %v", ns, tcName, err) + klog.Errorf("failed to list pods for tidbcluster %s/%s, %v", ns, tcName, err) return false, nil } @@ -1685,7 +1685,7 @@ outerLoop: for _, pod := range podList.Items { podName := pod.GetName() if pod.Labels[label.ClusterIDLabelKey] != clusterID { - glog.Infof("tidbcluster %s/%s's pod %s's label %s not equals %s ", + klog.Infof("tidbcluster %s/%s's pod %s's label %s not equals %s ", ns, tcName, podName, label.ClusterIDLabelKey, clusterID) return false, nil } @@ -1696,7 +1696,7 @@ outerLoop: var memberID string members, err := pdClient.GetMembers() if err != nil { - glog.Errorf("failed to get members for tidbcluster %s/%s, %v", ns, tcName, err) + klog.Errorf("failed to get members for tidbcluster %s/%s, %v", ns, tcName, err) return false, nil } for _, member := range members.Members { @@ -1706,7 +1706,7 @@ outerLoop: } } if memberID == "" { - glog.Errorf("tidbcluster: %s/%s's pod %s label [%s] is empty", + klog.Errorf("tidbcluster: %s/%s's pod %s label [%s] is empty", ns, tcName, podName, label.MemberIDLabelKey) return false, nil } @@ -1718,7 +1718,7 @@ outerLoop: var storeID string stores, err := pdClient.GetStores() if err != nil { - glog.Errorf("failed to get stores for tidbcluster %s/%s, %v", ns, tcName, err) + klog.Errorf("failed to get stores for tidbcluster %s/%s, %v", ns, tcName, err) return false, nil } for _, store := range stores.Stores { @@ -1729,7 +1729,7 @@ outerLoop: } } if storeID == "" { - glog.Errorf("tidbcluster: %s/%s's pod %s label [%s] is empty", + klog.Errorf("tidbcluster: %s/%s's pod %s label [%s] is empty", tc.GetNamespace(), tc.GetName(), podName, label.StoreIDLabelKey) return false, nil } @@ -1756,7 +1756,7 @@ outerLoop: var pvc *corev1.PersistentVolumeClaim if pvc, err = oa.kubeCli.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{}); err != nil { - glog.Errorf("failed to get pvc %s/%s for pod %s/%s", ns, pvcName, ns, podName) + klog.Errorf("failed to get pvc %s/%s for pod %s/%s", ns, pvcName, ns, podName) return false, nil } if pvc.Labels[label.ClusterIDLabelKey] != clusterID { @@ -1783,7 +1783,7 @@ outerLoop: pvName := pvc.Spec.VolumeName var pv *corev1.PersistentVolume if pv, err = oa.kubeCli.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}); err != nil { - glog.Errorf("failed to get pv for pvc %s/%s, %v", ns, pvcName, err) + klog.Errorf("failed to get pv for pvc %s/%s, %v", ns, pvcName, err) return false, nil } if pv.Labels[label.NamespaceLabelKey] != ns { @@ -1852,7 +1852,7 @@ func (oa *operatorActions) schedulerHAFn(tc *v1alpha1.TidbCluster) (bool, error) var podList *corev1.PodList var err error if podList, err = oa.kubeCli.CoreV1().Pods(ns).List(listOptions); err != nil { - glog.Errorf("failed to list pods for tidbcluster %s/%s, %v", ns, tcName, err) + klog.Errorf("failed to list pods for tidbcluster %s/%s, %v", ns, tcName, err) return false, nil } @@ -1894,7 +1894,7 @@ func (oa *operatorActions) podsScheduleAnnHaveDeleted(tc *v1alpha1.TidbCluster) pvcList, err := oa.kubeCli.CoreV1().PersistentVolumeClaims(ns).List(listOptions) if err != nil { - glog.Errorf("failed to list pvcs for tidb cluster %s/%s, err: %v", ns, tcName, err) + klog.Errorf("failed to list pvcs for tidb cluster %s/%s, err: %v", ns, tcName, err) return false, nil } @@ -1906,7 +1906,7 @@ func (oa *operatorActions) podsScheduleAnnHaveDeleted(tc *v1alpha1.TidbCluster) } if _, exist := pvc.Annotations[label.AnnPVCPodScheduling]; exist { - glog.Errorf("tidb cluster %s/%s pvc %s has pod scheduling annotation", ns, tcName, pvcName) + klog.Errorf("tidb cluster %s/%s pvc %s has pod scheduling annotation", ns, tcName, pvcName) return false, nil } } @@ -1917,13 +1917,13 @@ func (oa *operatorActions) podsScheduleAnnHaveDeleted(tc *v1alpha1.TidbCluster) func (oa *operatorActions) checkReclaimPVSuccess(tc *v1alpha1.TidbCluster) (bool, error) { // check pv reclaim for pd if err := oa.checkComponentReclaimPVSuccess(tc, label.PDLabelVal); err != nil { - glog.Errorf(err.Error()) + klog.Errorf(err.Error()) return false, nil } // check pv reclaim for tikv if err := oa.checkComponentReclaimPVSuccess(tc, label.TiKVLabelVal); err != nil { - glog.Errorf(err.Error()) + klog.Errorf(err.Error()) return false, nil } return true, nil @@ -2001,7 +2001,7 @@ func (oa *operatorActions) getComponentPVList(tc *v1alpha1.TidbCluster, componen func (oa *operatorActions) storeLabelsIsSet(tc *v1alpha1.TidbCluster, topologyKey string) (bool, error) { pdClient, cancel, err := oa.getPDClient(tc) if err != nil { - glog.Errorf("Failed to create external PD client for tidb cluster %q: %v", tc.GetName(), err) + klog.Errorf("Failed to create external PD client for tidb cluster %q: %v", tc.GetName(), err) return false, nil } defer cancel() @@ -2034,28 +2034,28 @@ func (oa *operatorActions) passwordIsSet(clusterInfo *TidbClusterConfig) (bool, var job *batchv1.Job var err error if job, err = oa.kubeCli.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}); err != nil { - glog.Errorf("failed to get job %s/%s, %v", ns, jobName, err) + klog.Errorf("failed to get job %s/%s, %v", ns, jobName, err) return false, nil } if job.Status.Succeeded < 1 { - glog.Errorf("tidbcluster: %s/%s password setter job not finished", ns, tcName) + klog.Errorf("tidbcluster: %s/%s password setter job not finished", ns, tcName) return false, nil } var db *sql.DB dsn, cancel, err := oa.getTiDBDSN(ns, tcName, "test", clusterInfo.Password) if err != nil { - glog.Errorf("failed to get TiDB DSN: %v", err) + klog.Errorf("failed to get TiDB DSN: %v", err) return false, nil } defer cancel() if db, err = sql.Open("mysql", dsn); err != nil { - glog.Errorf("can't open connection to mysql: %s, %v", dsn, err) + klog.Errorf("can't open connection to mysql: %s, %v", dsn, err) return false, nil } defer db.Close() if err := db.Ping(); err != nil { - glog.Errorf("can't connect to mysql: %s with password %s, %v", dsn, clusterInfo.Password, err) + klog.Errorf("can't connect to mysql: %s with password %s, %v", dsn, clusterInfo.Password, err) return false, nil } @@ -2068,20 +2068,20 @@ func (oa *operatorActions) monitorNormal(clusterInfo *TidbClusterConfig) (bool, monitorDeploymentName := fmt.Sprintf("%s-monitor", tcName) monitorDeployment, err := oa.kubeCli.AppsV1().Deployments(ns).Get(monitorDeploymentName, metav1.GetOptions{}) if err != nil { - glog.Errorf("get monitor deployment: [%s/%s] failed", ns, monitorDeploymentName) + klog.Errorf("get monitor deployment: [%s/%s] failed", ns, monitorDeploymentName) return false, nil } if monitorDeployment.Status.ReadyReplicas < 1 { - glog.Infof("monitor ready replicas %d < 1", monitorDeployment.Status.ReadyReplicas) + klog.Infof("monitor ready replicas %d < 1", monitorDeployment.Status.ReadyReplicas) return false, nil } if err := oa.checkPrometheus(clusterInfo); err != nil { - glog.Infof("check [%s/%s]'s prometheus data failed: %v", ns, monitorDeploymentName, err) + klog.Infof("check [%s/%s]'s prometheus data failed: %v", ns, monitorDeploymentName, err) return false, nil } if err := oa.checkGrafanaData(clusterInfo); err != nil { - glog.Infof("check [%s/%s]'s grafana data failed: %v", ns, monitorDeploymentName, err) + klog.Infof("check [%s/%s]'s grafana data failed: %v", ns, monitorDeploymentName, err) return false, nil } return true, nil @@ -2103,17 +2103,17 @@ func (oa *operatorActions) checkTidbClusterConfigUpdated(tc *v1alpha1.TidbCluste func (oa *operatorActions) checkPdConfigUpdated(tc *v1alpha1.TidbCluster, clusterInfo *TidbClusterConfig) bool { pdClient, cancel, err := oa.getPDClient(tc) if err != nil { - glog.Errorf("Failed to create external PD client for tidb cluster %q: %v", tc.GetName(), err) + klog.Errorf("Failed to create external PD client for tidb cluster %q: %v", tc.GetName(), err) return false } defer cancel() config, err := pdClient.GetConfig() if err != nil { - glog.Errorf("failed to get PD configuraion from tidb cluster [%s/%s]", tc.Namespace, tc.Name) + klog.Errorf("failed to get PD configuraion from tidb cluster [%s/%s]", tc.Namespace, tc.Name) return false } if len(clusterInfo.PDLogLevel) > 0 && clusterInfo.PDLogLevel != config.Log.Level { - glog.Errorf("check [%s/%s] PD logLevel configuration updated failed: desired [%s], actual [%s] not equal", + klog.Errorf("check [%s/%s] PD logLevel configuration updated failed: desired [%s], actual [%s] not equal", tc.Namespace, tc.Name, clusterInfo.PDLogLevel, @@ -2122,7 +2122,7 @@ func (oa *operatorActions) checkPdConfigUpdated(tc *v1alpha1.TidbCluster, cluste } // TODO: fix #487 PD configuration update for persisted configurations //if clusterInfo.PDMaxReplicas > 0 && config.Replication.MaxReplicas != uint64(clusterInfo.PDMaxReplicas) { - // glog.Errorf("check [%s/%s] PD maxReplicas configuration updated failed: desired [%d], actual [%d] not equal", + // klog.Errorf("check [%s/%s] PD maxReplicas configuration updated failed: desired [%d], actual [%d] not equal", // tc.Namespace, // tc.Name, // clusterInfo.PDMaxReplicas, @@ -2135,17 +2135,17 @@ func (oa *operatorActions) checkPdConfigUpdated(tc *v1alpha1.TidbCluster, cluste func (oa *operatorActions) checkTiDBConfigUpdated(tc *v1alpha1.TidbCluster, clusterInfo *TidbClusterConfig) bool { ordinals, err := util.GetPodOrdinals(tc, v1alpha1.TiDBMemberType) if err != nil { - glog.Errorf("failed to get pod ordinals for tidb cluster %s/%s (member: %v)", tc.Namespace, tc.Name, v1alpha1.TiDBMemberType) + klog.Errorf("failed to get pod ordinals for tidb cluster %s/%s (member: %v)", tc.Namespace, tc.Name, v1alpha1.TiDBMemberType) return false } for i := range ordinals { config, err := oa.tidbControl.GetSettings(tc, int32(i)) if err != nil { - glog.Errorf("failed to get TiDB configuration from cluster [%s/%s], ordinal: %d, error: %v", tc.Namespace, tc.Name, i, err) + klog.Errorf("failed to get TiDB configuration from cluster [%s/%s], ordinal: %d, error: %v", tc.Namespace, tc.Name, i, err) return false } if clusterInfo.TiDBTokenLimit > 0 && uint(clusterInfo.TiDBTokenLimit) != config.TokenLimit { - glog.Errorf("check [%s/%s] TiDB instance [%d] configuration updated failed: desired [%d], actual [%d] not equal", + klog.Errorf("check [%s/%s] TiDB instance [%d] configuration updated failed: desired [%d], actual [%d] not equal", tc.Namespace, tc.Name, i, clusterInfo.TiDBTokenLimit, config.TokenLimit) return false } @@ -2193,7 +2193,7 @@ func getDatasourceID(addr string) (int, error) { defer func() { err := resp.Body.Close() if err != nil { - glog.Warningf("close response failed, err: %v", err) + klog.Warningf("close response failed, err: %v", err) } }() @@ -2234,7 +2234,7 @@ func notFound(res string) bool { func (oa *operatorActions) cloneOperatorRepo() error { cmd := fmt.Sprintf("git clone %s %s", oa.cfg.OperatorRepoUrl, oa.cfg.OperatorRepoDir) - glog.Info(cmd) + klog.Info(cmd) res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() if err != nil && !strings.Contains(string(res), "already exists") { return fmt.Errorf("failed to clone tidb-operator repository: %v, %s", err, string(res)) @@ -2255,7 +2255,7 @@ func (oa *operatorActions) checkoutTag(tagName string) error { if tagName != "v1.0.0" { cmd = cmd + fmt.Sprintf(" && cp -rf charts/tidb-drainer %s", oa.drainerChartPath(tagName)) } - glog.Info(cmd) + klog.Info(cmd) res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() if err != nil { return fmt.Errorf("failed to check tag: %s, %v, %s", tagName, err, string(res)) @@ -2266,7 +2266,7 @@ func (oa *operatorActions) checkoutTag(tagName string) error { func (oa *operatorActions) DeployAdHocBackup(info *TidbClusterConfig) error { oa.EmitEvent(info, "DeployAdHocBackup") - glog.Infof("begin to deploy adhoc backup cluster[%s] namespace[%s]", info.ClusterName, info.Namespace) + klog.Infof("begin to deploy adhoc backup cluster[%s] namespace[%s]", info.ClusterName, info.Namespace) var tsStr string getTSFn := func() (bool, error) { @@ -2275,7 +2275,7 @@ func (oa *operatorActions) DeployAdHocBackup(info *TidbClusterConfig) error { if oa.fw != nil { localHost, localPort, cancel, err := portforward.ForwardOnePort(oa.fw, info.Namespace, fmt.Sprintf("svc/%s-tidb", info.ClusterName), 4000) if err != nil { - glog.Errorf("failed to forward port %d for %s/%s", 4000, info.Namespace, info.ClusterName) + klog.Errorf("failed to forward port %d for %s/%s", 4000, info.Namespace, info.ClusterName) return false, nil } defer cancel() @@ -2295,11 +2295,11 @@ func (oa *operatorActions) DeployAdHocBackup(info *TidbClusterConfig) error { mysqlHost, mysqlPort, ) - glog.Info(getTSCmd) + klog.Info(getTSCmd) res, err := exec.Command("/bin/bash", "-c", getTSCmd).CombinedOutput() if err != nil { - glog.Errorf("failed to get ts %v, %s", err, string(res)) + klog.Errorf("failed to get ts %v, %s", err, string(res)) return false, nil } tsStr = string(res) @@ -2325,7 +2325,7 @@ func (oa *operatorActions) DeployAdHocBackup(info *TidbClusterConfig) error { fullbackupName := fmt.Sprintf("%s-backup", info.ClusterName) cmd := fmt.Sprintf("helm install -n %s --namespace %s %s --set-string %s", fullbackupName, info.Namespace, oa.backupChartPath(info.OperatorTag), setString) - glog.Infof("install adhoc deployment [%s]", cmd) + klog.Infof("install adhoc deployment [%s]", cmd) res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() if err != nil { return fmt.Errorf("failed to launch adhoc backup job: %v, %s", err, string(res)) @@ -2335,7 +2335,7 @@ func (oa *operatorActions) DeployAdHocBackup(info *TidbClusterConfig) error { } func (oa *operatorActions) CheckAdHocBackup(info *TidbClusterConfig) (string, error) { - glog.Infof("checking adhoc backup cluster[%s] namespace[%s]", info.ClusterName, info.Namespace) + klog.Infof("checking adhoc backup cluster[%s] namespace[%s]", info.ClusterName, info.Namespace) ns := info.Namespace var ts string @@ -2343,11 +2343,11 @@ func (oa *operatorActions) CheckAdHocBackup(info *TidbClusterConfig) (string, er fn := func() (bool, error) { job, err := oa.kubeCli.BatchV1().Jobs(info.Namespace).Get(jobName, metav1.GetOptions{}) if err != nil { - glog.Errorf("failed to get jobs %s ,%v", jobName, err) + klog.Errorf("failed to get jobs %s ,%v", jobName, err) return false, nil } if job.Status.Succeeded == 0 { - glog.Errorf("cluster [%s] back up job is not completed, please wait! ", info.ClusterName) + klog.Errorf("cluster [%s] back up job is not completed, please wait! ", info.ClusterName) return false, nil } @@ -2356,7 +2356,7 @@ func (oa *operatorActions) CheckAdHocBackup(info *TidbClusterConfig) (string, er } podList, err := oa.kubeCli.CoreV1().Pods(ns).List(listOptions) if err != nil { - glog.Errorf("failed to list pods: %v", err) + klog.Errorf("failed to list pods: %v", err) return false, nil } @@ -2369,23 +2369,23 @@ func (oa *operatorActions) CheckAdHocBackup(info *TidbClusterConfig) (string, er } } if podName == "" { - glog.Errorf("failed to find the ad-hoc backup: %s podName", jobName) + klog.Errorf("failed to find the ad-hoc backup: %s podName", jobName) return false, nil } getTsCmd := fmt.Sprintf("kubectl logs -n %s %s | grep 'commitTS = ' | cut -d '=' -f2 | sed 's/ *//g'", ns, podName) tsData, err := exec.Command("/bin/sh", "-c", getTsCmd).CombinedOutput() if err != nil { - glog.Errorf("failed to get ts of pod %s, %v", podName, err) + klog.Errorf("failed to get ts of pod %s, %v", podName, err) return false, nil } if string(tsData) == "" { - glog.Errorf("ts is empty pod %s", podName) + klog.Errorf("ts is empty pod %s", podName) return false, nil } ts = strings.TrimSpace(string(tsData)) - glog.Infof("ad-hoc backup ts: %s", ts) + klog.Infof("ad-hoc backup ts: %s", ts) return true, nil } @@ -2401,7 +2401,7 @@ func (oa *operatorActions) CheckAdHocBackup(info *TidbClusterConfig) (string, er func (oa *operatorActions) Restore(from *TidbClusterConfig, to *TidbClusterConfig) error { oa.EmitEvent(from, fmt.Sprintf("RestoreBackup: target: %s", to.ClusterName)) oa.EmitEvent(to, fmt.Sprintf("RestoreBackup: source: %s", from.ClusterName)) - glog.Infof("deploying restore, the data is from cluster[%s/%s] to cluster[%s/%s]", + klog.Infof("deploying restore, the data is from cluster[%s/%s] to cluster[%s/%s]", from.Namespace, from.ClusterName, to.Namespace, to.ClusterName) sets := map[string]string{ @@ -2417,7 +2417,7 @@ func (oa *operatorActions) Restore(from *TidbClusterConfig, to *TidbClusterConfi restoreName := fmt.Sprintf("%s-restore", to.ClusterName) cmd := fmt.Sprintf("helm install -n %s --namespace %s %s --set-string %s", restoreName, to.Namespace, oa.backupChartPath(to.OperatorTag), setString) - glog.Infof("install restore [%s]", cmd) + klog.Infof("install restore [%s]", cmd) res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() if err != nil { return fmt.Errorf("failed to launch restore job: %v, %s", err, string(res)) @@ -2427,23 +2427,23 @@ func (oa *operatorActions) Restore(from *TidbClusterConfig, to *TidbClusterConfi } func (oa *operatorActions) CheckRestore(from *TidbClusterConfig, to *TidbClusterConfig) error { - glog.Infof("begin to check restore backup cluster[%s] namespace[%s]", from.ClusterName, from.Namespace) + klog.Infof("begin to check restore backup cluster[%s] namespace[%s]", from.ClusterName, from.Namespace) jobName := fmt.Sprintf("%s-restore-%s", to.ClusterName, from.BackupName) fn := func() (bool, error) { job, err := oa.kubeCli.BatchV1().Jobs(to.Namespace).Get(jobName, metav1.GetOptions{}) if err != nil { - glog.Errorf("failed to get jobs %s ,%v", jobName, err) + klog.Errorf("failed to get jobs %s ,%v", jobName, err) return false, nil } if job.Status.Succeeded == 0 { - glog.Errorf("cluster [%s] restore job is not completed, please wait! ", to.ClusterName) + klog.Errorf("cluster [%s] restore job is not completed, please wait! ", to.ClusterName) return false, nil } _, err = oa.DataIsTheSameAs(to, from) if err != nil { // ad-hoc restore don't check the data really, just logging - glog.Infof("check restore: %v", err) + klog.Infof("check restore: %v", err) } return true, nil @@ -2527,7 +2527,7 @@ func (oa *operatorActions) DataIsTheSameAs(tc, otherInfo *TidbClusterConfig) (bo otherInfo.Namespace, otherInfo.ClusterName, tableName, otherCnt) return false, err } - glog.Infof("cluster %s/%s's table %s count(*) = %d and cluster %s/%s's table %s count(*) = %d", + klog.Infof("cluster %s/%s's table %s count(*) = %d and cluster %s/%s's table %s count(*) = %d", tc.Namespace, tc.ClusterName, tableName, cnt, otherInfo.Namespace, otherInfo.ClusterName, tableName, otherCnt) } @@ -2578,7 +2578,7 @@ func releaseIsExist(err error) bool { func (oa *operatorActions) DeployScheduledBackup(info *TidbClusterConfig) error { oa.EmitEvent(info, "DeploySchedulerBackup") - glog.Infof("begin to deploy scheduled backup") + klog.Infof("begin to deploy scheduled backup") cron := fmt.Sprintf("'*/1 * * * *'") sets := map[string]string{ @@ -2596,7 +2596,7 @@ func (oa *operatorActions) DeployScheduledBackup(info *TidbClusterConfig) error return err } - glog.Infof("scheduled-backup deploy [%s]", cmd) + klog.Infof("scheduled-backup deploy [%s]", cmd) res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() if err != nil { return fmt.Errorf("failed to launch scheduler backup job: %v, %s", err, string(res)) @@ -2605,7 +2605,7 @@ func (oa *operatorActions) DeployScheduledBackup(info *TidbClusterConfig) error } func (oa *operatorActions) disableScheduledBackup(info *TidbClusterConfig) error { - glog.Infof("disabling scheduled backup") + klog.Infof("disabling scheduled backup") sets := map[string]string{ "clusterName": info.ClusterName, @@ -2617,7 +2617,7 @@ func (oa *operatorActions) disableScheduledBackup(info *TidbClusterConfig) error return err } - glog.Infof("scheduled-backup disable [%s]", cmd) + klog.Infof("scheduled-backup disable [%s]", cmd) res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() if err != nil { return fmt.Errorf("failed to disable scheduler backup job: %v, %s", err, string(res)) @@ -2626,19 +2626,19 @@ func (oa *operatorActions) disableScheduledBackup(info *TidbClusterConfig) error } func (oa *operatorActions) CheckScheduledBackup(info *TidbClusterConfig) error { - glog.Infof("checking scheduler backup for tidb cluster[%s/%s]", info.Namespace, info.ClusterName) + klog.Infof("checking scheduler backup for tidb cluster[%s/%s]", info.Namespace, info.ClusterName) jobName := fmt.Sprintf("%s-scheduled-backup", info.ClusterName) fn := func() (bool, error) { job, err := oa.kubeCli.BatchV1beta1().CronJobs(info.Namespace).Get(jobName, metav1.GetOptions{}) if err != nil { - glog.Errorf("failed to get cronjobs %s ,%v", jobName, err) + klog.Errorf("failed to get cronjobs %s ,%v", jobName, err) return false, nil } jobs, err := oa.kubeCli.BatchV1().Jobs(info.Namespace).List(metav1.ListOptions{}) if err != nil { - glog.Errorf("failed to list jobs %s ,%v", info.Namespace, err) + klog.Errorf("failed to list jobs %s ,%v", info.Namespace, err) return false, nil } @@ -2650,7 +2650,7 @@ func (oa *operatorActions) CheckScheduledBackup(info *TidbClusterConfig) error { } if len(backupJobs) == 0 { - glog.Errorf("cluster [%s] scheduler jobs is creating, please wait!", info.ClusterName) + klog.Errorf("cluster [%s] scheduler jobs is creating, please wait!", info.ClusterName) return false, nil } @@ -2666,12 +2666,12 @@ func (oa *operatorActions) CheckScheduledBackup(info *TidbClusterConfig) error { } if succededJobCount >= 3 { - glog.Infof("cluster [%s/%s] scheduled back up job completed count: %d", + klog.Infof("cluster [%s/%s] scheduled back up job completed count: %d", info.Namespace, info.ClusterName, succededJobCount) return true, nil } - glog.Infof("cluster [%s/%s] scheduled back up job is not completed, please wait! ", + klog.Infof("cluster [%s/%s] scheduled back up job is not completed, please wait! ", info.Namespace, info.ClusterName) return false, nil } @@ -2704,7 +2704,7 @@ func getParentUIDFromJob(j batchv1.Job) (types.UID, bool) { } if controllerRef.Kind != "CronJob" { - glog.Infof("Job with non-CronJob parent, name %s namespace %s", j.Name, j.Namespace) + klog.Infof("Job with non-CronJob parent, name %s namespace %s", j.Name, j.Namespace) return types.UID(""), false } @@ -2762,7 +2762,7 @@ func (oa *operatorActions) getBackupDir(info *TidbClusterConfig) ([]string, erro _, err = oa.kubeCli.CoreV1().Pods(info.Namespace).Create(pod) if err != nil && !errors.IsAlreadyExists(err) { - glog.Errorf("cluster: [%s/%s] create get backup dir pod failed, error :%v", info.Namespace, info.ClusterName, err) + klog.Errorf("cluster: [%s/%s] create get backup dir pod failed, error :%v", info.Namespace, info.ClusterName, err) return nil, err } @@ -2785,12 +2785,12 @@ func (oa *operatorActions) getBackupDir(info *TidbClusterConfig) ([]string, erro cmd := fmt.Sprintf("kubectl exec %s -n %s ls /data", backupDirPodName, info.Namespace) res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() if err != nil { - glog.Errorf("cluster:[%s/%s] exec :%s failed,error:%v,result:%s", info.Namespace, info.ClusterName, cmd, err, string(res)) + klog.Errorf("cluster:[%s/%s] exec :%s failed,error:%v,result:%s", info.Namespace, info.ClusterName, cmd, err, string(res)) return nil, err } dirs := strings.Split(string(res), "\n") - glog.Infof("dirs in pod info name [%s] dir name [%s]", scheduledPvcName, strings.Join(dirs, ",")) + klog.Infof("dirs in pod info name [%s] dir name [%s]", scheduledPvcName, strings.Join(dirs, ",")) return dirs, nil } @@ -2805,11 +2805,11 @@ func (oa *operatorActions) DeployIncrementalBackup(from *TidbClusterConfig, to * } if withDrainer { oa.EmitEvent(from, fmt.Sprintf("DeployIncrementalBackup: slave: %s", to.ClusterName)) - glog.Infof("begin to deploy incremental backup, source cluster[%s/%s], target cluster [%s/%s]", + klog.Infof("begin to deploy incremental backup, source cluster[%s/%s], target cluster [%s/%s]", from.Namespace, from.ClusterName, to.Namespace, to.ClusterName) } else { oa.EmitEvent(from, "Enable pump cluster") - glog.Infof("begin to enable pump for cluster[%s/%s]", + klog.Infof("begin to enable pump for cluster[%s/%s]", from.Namespace, from.ClusterName) } @@ -2860,7 +2860,7 @@ func (oa *operatorActions) DeployIncrementalBackup(from *TidbClusterConfig, to * if err != nil { return err } - glog.Infof(cmd) + klog.Infof(cmd) res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() if err != nil { return fmt.Errorf("failed to launch incremental backup job: %v, %s", err, string(res)) @@ -2869,17 +2869,17 @@ func (oa *operatorActions) DeployIncrementalBackup(from *TidbClusterConfig, to * } func (oa *operatorActions) CheckIncrementalBackup(info *TidbClusterConfig, withDrainer bool) error { - glog.Infof("begin to check incremental backup cluster[%s] namespace[%s]", info.ClusterName, info.Namespace) + klog.Infof("begin to check incremental backup cluster[%s] namespace[%s]", info.ClusterName, info.Namespace) pumpStatefulSetName := fmt.Sprintf("%s-pump", info.ClusterName) fn := func() (bool, error) { pumpStatefulSet, err := oa.kubeCli.AppsV1().StatefulSets(info.Namespace).Get(pumpStatefulSetName, metav1.GetOptions{}) if err != nil { - glog.Errorf("failed to get jobs %s ,%v", pumpStatefulSetName, err) + klog.Errorf("failed to get jobs %s ,%v", pumpStatefulSetName, err) return false, nil } if pumpStatefulSet.Status.Replicas != pumpStatefulSet.Status.ReadyReplicas { - glog.Errorf("pump replicas is not ready, please wait ! %s ", pumpStatefulSetName) + klog.Errorf("pump replicas is not ready, please wait ! %s ", pumpStatefulSetName) return false, nil } @@ -2895,7 +2895,7 @@ func (oa *operatorActions) CheckIncrementalBackup(info *TidbClusterConfig, withD pods, err := oa.kubeCli.CoreV1().Pods(info.Namespace).List(listOps) if err != nil { - glog.Errorf("failed to get pods via pump labels %s ,%v", pumpStatefulSetName, err) + klog.Errorf("failed to get pods via pump labels %s ,%v", pumpStatefulSetName, err) return false, nil } @@ -2905,7 +2905,7 @@ func (oa *operatorActions) CheckIncrementalBackup(info *TidbClusterConfig, withD for _, pod := range pods.Items { if !oa.pumpHealth(info, pod.Name) { - glog.Errorf("some pods is not health %s", pumpStatefulSetName) + klog.Errorf("some pods is not health %s", pumpStatefulSetName) return false, nil } @@ -2913,11 +2913,11 @@ func (oa *operatorActions) CheckIncrementalBackup(info *TidbClusterConfig, withD continue } - glog.Info(pod.Spec.Affinity) + klog.Info(pod.Spec.Affinity) if pod.Spec.Affinity == nil || pod.Spec.Affinity.PodAntiAffinity == nil || len(pod.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution) != 1 { return true, fmt.Errorf("pump pod %s/%s should have affinity set", pod.Namespace, pod.Name) } - glog.Info(pod.Spec.Tolerations) + klog.Info(pod.Spec.Tolerations) foundKey := false for _, tor := range pod.Spec.Tolerations { if tor.Key == "node-role" { @@ -2937,11 +2937,11 @@ func (oa *operatorActions) CheckIncrementalBackup(info *TidbClusterConfig, withD drainerStatefulSetName := fmt.Sprintf("%s-drainer", info.ClusterName) drainerStatefulSet, err := oa.kubeCli.AppsV1().StatefulSets(info.Namespace).Get(drainerStatefulSetName, metav1.GetOptions{}) if err != nil { - glog.Errorf("failed to get jobs %s ,%v", pumpStatefulSetName, err) + klog.Errorf("failed to get jobs %s ,%v", pumpStatefulSetName, err) return false, nil } if drainerStatefulSet.Status.Replicas != drainerStatefulSet.Status.ReadyReplicas { - glog.Errorf("drainer replicas is not ready, please wait ! %s ", pumpStatefulSetName) + klog.Errorf("drainer replicas is not ready, please wait ! %s ", pumpStatefulSetName) return false, nil } @@ -2961,7 +2961,7 @@ func (oa *operatorActions) CheckIncrementalBackup(info *TidbClusterConfig, withD } for _, pod := range pods.Items { if !oa.drainerHealth(info, pod.Name) { - glog.Errorf("some pods is not health %s", drainerStatefulSetName) + klog.Errorf("some pods is not health %s", drainerStatefulSetName) return false, nil } @@ -2969,11 +2969,11 @@ func (oa *operatorActions) CheckIncrementalBackup(info *TidbClusterConfig, withD continue } - glog.Info(pod.Spec.Affinity) + klog.Info(pod.Spec.Affinity) if pod.Spec.Affinity == nil || pod.Spec.Affinity.PodAntiAffinity == nil || len(pod.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution) != 1 { return true, fmt.Errorf("drainer pod %s/%s should have spec.affinity set", pod.Namespace, pod.Name) } - glog.Info(pod.Spec.Tolerations) + klog.Info(pod.Spec.Tolerations) foundKey := false for _, tor := range pod.Spec.Tolerations { if tor.Key == "node-role" { @@ -3007,7 +3007,7 @@ func (oa *operatorActions) RegisterWebHookAndServiceOrDie(configName, namespace, func (oa *operatorActions) RegisterWebHookAndService(configName, namespace, service string, context *apimachinery.CertContext) error { client := oa.kubeCli - glog.Infof("Registering the webhook via the AdmissionRegistration API") + klog.Infof("Registering the webhook via the AdmissionRegistration API") failurePolicy := admissionV1beta1.Fail @@ -3040,7 +3040,7 @@ func (oa *operatorActions) RegisterWebHookAndService(configName, namespace, serv }) if err != nil { - glog.Errorf("registering webhook config %s with namespace %s error %v", configName, namespace, err) + klog.Errorf("registering webhook config %s with namespace %s error %v", configName, namespace, err) return err } @@ -3079,7 +3079,7 @@ func (oa *operatorActions) pumpHealth(info *TidbClusterConfig, podName string) b if oa.fw != nil { localHost, localPort, cancel, err := portforward.ForwardOnePort(oa.fw, info.Namespace, fmt.Sprintf("pod/%s", podName), 8250) if err != nil { - glog.Errorf("failed to forward port %d for %s/%s", 8250, info.Namespace, podName) + klog.Errorf("failed to forward port %d for %s/%s", 8250, info.Namespace, podName) return false } defer cancel() @@ -3090,27 +3090,27 @@ func (oa *operatorActions) pumpHealth(info *TidbClusterConfig, podName string) b pumpHealthURL := fmt.Sprintf("http://%s/status", addr) res, err := http.Get(pumpHealthURL) if err != nil { - glog.Errorf("cluster:[%s] call %s failed,error:%v", info.ClusterName, pumpHealthURL, err) + klog.Errorf("cluster:[%s] call %s failed,error:%v", info.ClusterName, pumpHealthURL, err) return false } if res.StatusCode >= 400 { - glog.Errorf("Error response %v", res.StatusCode) + klog.Errorf("Error response %v", res.StatusCode) return false } body, err := ioutil.ReadAll(res.Body) if err != nil { - glog.Errorf("cluster:[%s] read response body failed,error:%v", info.ClusterName, err) + klog.Errorf("cluster:[%s] read response body failed,error:%v", info.ClusterName, err) return false } healths := pumpStatus{} err = json.Unmarshal(body, &healths) if err != nil { - glog.Errorf("cluster:[%s] unmarshal failed,error:%v", info.ClusterName, err) + klog.Errorf("cluster:[%s] unmarshal failed,error:%v", info.ClusterName, err) return false } for _, status := range healths.StatusMap { if status.State != "online" { - glog.Errorf("cluster:[%s] pump's state is not online", info.ClusterName) + klog.Errorf("cluster:[%s] pump's state is not online", info.ClusterName) return false } } @@ -3145,30 +3145,30 @@ func (oa *operatorActions) drainerHealth(info *TidbClusterConfig, podName string PreserveWhitespace: false, }) if err != nil { - glog.Errorf("failed to run command '%s' in pod %s/%q", cmd, info.Namespace, podName) + klog.Errorf("failed to run command '%s' in pod %s/%q", cmd, info.Namespace, podName) return false } body = []byte(stdout) } else { res, err := http.Get(drainerHealthURL) if err != nil { - glog.Errorf("cluster:[%s] call %s failed,error:%v", info.ClusterName, drainerHealthURL, err) + klog.Errorf("cluster:[%s] call %s failed,error:%v", info.ClusterName, drainerHealthURL, err) return false } if res.StatusCode >= 400 { - glog.Errorf("Error response %v", res.StatusCode) + klog.Errorf("Error response %v", res.StatusCode) return false } body, err = ioutil.ReadAll(res.Body) if err != nil { - glog.Errorf("cluster:[%s] read response body failed,error:%v", info.ClusterName, err) + klog.Errorf("cluster:[%s] read response body failed,error:%v", info.ClusterName, err) return false } } healths := drainerStatus{} err = json.Unmarshal(body, &healths) if err != nil { - glog.Errorf("cluster:[%s] unmarshal failed,error:%v", info.ClusterName, err) + klog.Errorf("cluster:[%s] unmarshal failed,error:%v", info.ClusterName, err) return false } return len(healths.PumpPos) > 0 @@ -3178,7 +3178,7 @@ func (oa *operatorActions) EmitEvent(info *TidbClusterConfig, message string) { oa.lock.Lock() defer oa.lock.Unlock() - glog.Infof("Event: %s", message) + klog.Infof("Event: %s", message) if !oa.eventWorkerRunning { return @@ -3215,7 +3215,7 @@ func (oa *operatorActions) RunEventWorker() { oa.lock.Lock() oa.eventWorkerRunning = true oa.lock.Unlock() - glog.Infof("Event worker started") + klog.Infof("Event worker started") wait.Forever(oa.eventWorker, 10*time.Second) } @@ -3245,12 +3245,12 @@ func (oa *operatorActions) eventWorker() { }, } if err := client.AddAnnotation(anno); err != nil { - glog.V(4).Infof("cluster:[%s/%s] error recording event: %s, reason: %v", + klog.V(4).Infof("cluster:[%s/%s] error recording event: %s, reason: %v", ns, clusterName, ev.message, err) retryEvents = append(retryEvents, ev) continue } - glog.Infof("cluster: [%s/%s] recoding event: %s", ns, clusterName, ev.message) + klog.Infof("cluster: [%s/%s] recoding event: %s", ns, clusterName, ev.message) } ce := oa.clusterEvents[key] @@ -3279,7 +3279,7 @@ func (oa *operatorActions) checkManualPauseComponent(info *TidbClusterConfig, co fn := func() (bool, error) { if tc, err = oa.cli.PingcapV1alpha1().TidbClusters(ns).Get(info.ClusterName, metav1.GetOptions{}); err != nil { - glog.Infof("failed to get tidbcluster: [%s/%s], %v", ns, info.ClusterName, err) + klog.Infof("failed to get tidbcluster: [%s/%s], %v", ns, info.ClusterName, err) return false, nil } @@ -3289,19 +3289,19 @@ func (oa *operatorActions) checkManualPauseComponent(info *TidbClusterConfig, co setName = controller.TiDBMemberName(info.ClusterName) tidbPod, err := oa.kubeCli.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) if err != nil { - glog.Infof("fail to get pod in CheckManualPauseCompoent tidb [%s/%s]", ns, podName) + klog.Infof("fail to get pod in CheckManualPauseCompoent tidb [%s/%s]", ns, podName) return false, nil } if tidbPod.Labels[v1.ControllerRevisionHashLabelKey] == tc.Status.TiDB.StatefulSet.UpdateRevision && tc.Status.TiDB.Phase == v1alpha1.UpgradePhase { if member, ok := tc.Status.TiDB.Members[tidbPod.Name]; !ok || !member.Health { - glog.Infof("wait for tidb pod [%s/%s] ready member health %t ok %t", ns, podName, member.Health, ok) + klog.Infof("wait for tidb pod [%s/%s] ready member health %t ok %t", ns, podName, member.Health, ok) } else { return true, nil } } else { - glog.Infof("tidbset is not in upgrade phase or pod is not upgrade done [%s/%s]", ns, podName) + klog.Infof("tidbset is not in upgrade phase or pod is not upgrade done [%s/%s]", ns, podName) } return false, nil @@ -3310,7 +3310,7 @@ func (oa *operatorActions) checkManualPauseComponent(info *TidbClusterConfig, co setName = controller.TiKVMemberName(info.ClusterName) tikvPod, err := oa.kubeCli.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) if err != nil { - glog.Infof("fail to get pod in CheckManualPauseCompoent tikv [%s/%s]", ns, podName) + klog.Infof("fail to get pod in CheckManualPauseCompoent tikv [%s/%s]", ns, podName) return false, nil } @@ -3324,12 +3324,12 @@ func (oa *operatorActions) checkManualPauseComponent(info *TidbClusterConfig, co } } if tikvStore == nil || tikvStore.State != v1alpha1.TiKVStateUp { - glog.Infof("wait for tikv pod [%s/%s] ready store state %s", ns, podName, tikvStore.State) + klog.Infof("wait for tikv pod [%s/%s] ready store state %s", ns, podName, tikvStore.State) } else { return true, nil } } else { - glog.Infof("tikvset is not in upgrade phase or pod is not upgrade done [%s/%s]", ns, podName) + klog.Infof("tikvset is not in upgrade phase or pod is not upgrade done [%s/%s]", ns, podName) } return false, nil @@ -3362,24 +3362,24 @@ func (oa *operatorActions) CheckUpgradeComplete(info *TidbClusterConfig) error { if err := wait.PollImmediate(15*time.Second, 30*time.Minute, func() (done bool, err error) { tc, err := oa.cli.PingcapV1alpha1().TidbClusters(ns).Get(tcName, metav1.GetOptions{}) if err != nil { - glog.Errorf("checkUpgradeComplete, [%s/%s] cannot get tidbcluster, %v", ns, tcName, err) + klog.Errorf("checkUpgradeComplete, [%s/%s] cannot get tidbcluster, %v", ns, tcName, err) return false, nil } if tc.Status.PD.Phase == v1alpha1.UpgradePhase { - glog.Errorf("checkUpgradeComplete, [%s/%s] PD is still upgrading", ns, tcName) + klog.Errorf("checkUpgradeComplete, [%s/%s] PD is still upgrading", ns, tcName) return false, nil } if tc.Status.TiKV.Phase == v1alpha1.UpgradePhase { - glog.Errorf("checkUpgradeComplete, [%s/%s] TiKV is still upgrading", ns, tcName) + klog.Errorf("checkUpgradeComplete, [%s/%s] TiKV is still upgrading", ns, tcName) return false, nil } if tc.Status.TiDB.Phase == v1alpha1.UpgradePhase { - glog.Errorf("checkUpgradeComplete, [%s/%s] TiDB is still upgrading", ns, tcName) + klog.Errorf("checkUpgradeComplete, [%s/%s] TiDB is still upgrading", ns, tcName) return false, nil } return true, nil }); err != nil { - glog.Errorf("failed to wait upgrade complete [%s/%s], %v", ns, tcName, err) + klog.Errorf("failed to wait upgrade complete [%s/%s], %v", ns, tcName, err) return err } return nil @@ -3407,7 +3407,7 @@ func (oa *operatorActions) CheckInitSQL(info *TidbClusterConfig) error { return true, nil }); err != nil { - glog.Errorf("failed to check init sql complete [%s/%s], %v", ns, tcName, err) + klog.Errorf("failed to check init sql complete [%s/%s], %v", ns, tcName, err) return err } return nil @@ -3427,7 +3427,7 @@ func (oa *operatorActions) WaitForTidbClusterReady(tc *v1alpha1.TidbCluster, tim var local *v1alpha1.TidbCluster var err error if local, err = oa.cli.PingcapV1alpha1().TidbClusters(tc.Namespace).Get(tc.Name, metav1.GetOptions{}); err != nil { - glog.Errorf("failed to get tidbcluster: %s/%s, %v", tc.Namespace, tc.Name, err) + klog.Errorf("failed to get tidbcluster: %s/%s, %v", tc.Namespace, tc.Name, err) return false, nil } @@ -3484,7 +3484,7 @@ func StartValidatingAdmissionWebhookServerOrDie(context *apimachinery.CertContex if err := server.ListenAndServeTLS("", ""); err != nil { sendErr := slack.SendErrMsg(err.Error()) if sendErr != nil { - glog.Error(sendErr) + klog.Error(sendErr) } panic(fmt.Sprintf("failed to start webhook server %v", err)) } diff --git a/tests/backup.go b/tests/backup.go index c13e0a7d1d..3e4ea7cff9 100644 --- a/tests/backup.go +++ b/tests/backup.go @@ -27,7 +27,7 @@ import ( "golang.org/x/sync/errgroup" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" - glog "k8s.io/klog" + "k8s.io/klog" ) const ( @@ -70,13 +70,13 @@ func (oa *operatorActions) BackupAndRestoreToMultipleClusters(source *TidbCluste err = oa.DeployAdHocBackup(source) if err != nil { - glog.Errorf("cluster:[%s] deploy happen error: %v", source.ClusterName, err) + klog.Errorf("cluster:[%s] deploy happen error: %v", source.ClusterName, err) return err } ts, err := oa.CheckAdHocBackup(source) if err != nil { - glog.Errorf("cluster:[%s] deploy happen error: %v", source.ClusterName, err) + klog.Errorf("cluster:[%s] deploy happen error: %v", source.ClusterName, err) return err } @@ -86,19 +86,19 @@ func (oa *operatorActions) BackupAndRestoreToMultipleClusters(source *TidbCluste prepareIncremental := func(source *TidbClusterConfig, target BackupTarget) error { err = oa.CheckTidbClusterStatus(target.TargetCluster) if err != nil { - glog.Errorf("cluster:[%s] deploy faild error: %v", target.TargetCluster.ClusterName, err) + klog.Errorf("cluster:[%s] deploy faild error: %v", target.TargetCluster.ClusterName, err) return err } err = oa.Restore(source, target.TargetCluster) if err != nil { - glog.Errorf("from cluster:[%s] to cluster [%s] restore happen error: %v", + klog.Errorf("from cluster:[%s] to cluster [%s] restore happen error: %v", source.ClusterName, target.TargetCluster.ClusterName, err) return err } err = oa.CheckRestore(source, target.TargetCluster) if err != nil { - glog.Errorf("from cluster:[%s] to cluster [%s] restore failed error: %v", + klog.Errorf("from cluster:[%s] to cluster [%s] restore failed error: %v", source.ClusterName, target.TargetCluster.ClusterName, err) return err } @@ -158,13 +158,13 @@ func (oa *operatorActions) BackupAndRestoreToMultipleClusters(source *TidbCluste if err != nil { return err } - glog.Infof("waiting 30 seconds to insert into more records") + klog.Infof("waiting 30 seconds to insert into more records") time.Sleep(30 * time.Second) - glog.Infof("cluster[%s] stop insert data", source.ClusterName) + klog.Infof("cluster[%s] stop insert data", source.ClusterName) oa.StopInsertDataTo(source) - glog.Infof("wait on-going inserts to be drained for 60 seconds") + klog.Infof("wait on-going inserts to be drained for 60 seconds") time.Sleep(60 * time.Second) dsn, cancel, err := oa.getTiDBDSN(source.Namespace, source.ClusterName, "test", source.Password) @@ -190,7 +190,7 @@ func (oa *operatorActions) BackupAndRestoreToMultipleClusters(source *TidbCluste oa.BeginInsertDataToOrDie(source) err = oa.DeployScheduledBackup(source) if err != nil { - glog.Errorf("cluster:[%s] scheduler happen error: %v", source.ClusterName, err) + klog.Errorf("cluster:[%s] scheduler happen error: %v", source.ClusterName, err) return err } @@ -240,7 +240,7 @@ func (oa *operatorActions) CheckDataConsistency(from, to *TidbClusterConfig, tim fn := func() (bool, error) { b, err := oa.DataIsTheSameAs(to, from) if err != nil { - glog.Error(err) + klog.Error(err) return false, nil } if b { @@ -254,7 +254,7 @@ func (oa *operatorActions) CheckDataConsistency(from, to *TidbClusterConfig, tim func (oa *operatorActions) DeployDrainer(info *DrainerConfig, source *TidbClusterConfig) error { oa.EmitEvent(source, "DeployDrainer") - glog.Infof("begin to deploy drainer [%s] namespace[%s], source cluster [%s]", info.DrainerName, + klog.Infof("begin to deploy drainer [%s] namespace[%s], source cluster [%s]", info.DrainerName, source.Namespace, source.ClusterName) valuesPath, err := info.BuildSubValues(oa.drainerChartPath(source.OperatorTag)) @@ -269,7 +269,7 @@ func (oa *operatorActions) DeployDrainer(info *DrainerConfig, source *TidbCluste cmd := fmt.Sprintf("helm install %s --name %s --namespace %s --set-string %s -f %s", oa.drainerChartPath(source.OperatorTag), info.DrainerName, source.Namespace, info.DrainerHelmString(override, source), valuesPath) - glog.Info(cmd) + klog.Info(cmd) if res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput(); err != nil { return fmt.Errorf("failed to deploy drainer [%s/%s], %v, %s", @@ -286,23 +286,23 @@ func (oa *operatorActions) DeployDrainerOrDie(info *DrainerConfig, source *TidbC } func (oa *operatorActions) CheckDrainer(info *DrainerConfig, source *TidbClusterConfig) error { - glog.Infof("checking drainer [%s/%s]", info.DrainerName, source.Namespace) + klog.Infof("checking drainer [%s/%s]", info.DrainerName, source.Namespace) ns := source.Namespace stsName := fmt.Sprintf("%s-%s-drainer", source.ClusterName, info.DrainerName) fn := func() (bool, error) { sts, err := oa.kubeCli.AppsV1().StatefulSets(source.Namespace).Get(stsName, v1.GetOptions{}) if err != nil { - glog.Errorf("failed to get drainer StatefulSet %s ,%v", sts, err) + klog.Errorf("failed to get drainer StatefulSet %s ,%v", sts, err) return false, nil } if *sts.Spec.Replicas != DrainerReplicas { - glog.Infof("StatefulSet: %s/%s .spec.Replicas(%d) != %d", + klog.Infof("StatefulSet: %s/%s .spec.Replicas(%d) != %d", ns, sts.Name, *sts.Spec.Replicas, DrainerReplicas) return false, nil } if sts.Status.ReadyReplicas != DrainerReplicas { - glog.Infof("StatefulSet: %s/%s .state.ReadyReplicas(%d) != %d", + klog.Infof("StatefulSet: %s/%s .state.ReadyReplicas(%d) != %d", ns, sts.Name, sts.Status.ReadyReplicas, DrainerReplicas) } return true, nil @@ -317,7 +317,7 @@ func (oa *operatorActions) CheckDrainer(info *DrainerConfig, source *TidbCluster } func (oa *operatorActions) RestoreIncrementalFiles(from *DrainerConfig, to *TidbClusterConfig, stopTSO int64) error { - glog.Infof("restoring incremental data from drainer [%s/%s] to TiDB cluster [%s/%s]", + klog.Infof("restoring incremental data from drainer [%s/%s] to TiDB cluster [%s/%s]", from.Namespace, from.DrainerName, to.Namespace, to.ClusterName) // TODO: better incremental files restore solution @@ -354,7 +354,7 @@ func (oa *operatorActions) RestoreIncrementalFiles(from *DrainerConfig, to *Tidb } cmd := buff.String() - glog.Infof("Restore incremental data, command: \n%s", cmd) + klog.Infof("Restore incremental data, command: \n%s", cmd) if res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput(); err != nil { return fmt.Errorf("failed to restore incremental files from dainer [%s/%s] to TiDB cluster [%s/%s], %v, %s", diff --git a/tests/cluster_info.go b/tests/cluster_info.go index 009aeb2847..2b6a613615 100644 --- a/tests/cluster_info.go +++ b/tests/cluster_info.go @@ -18,7 +18,7 @@ import ( "os" "strconv" - glog "k8s.io/klog" + "k8s.io/klog" ) func (tc *TidbClusterConfig) set(name string, value string) (string, bool) { @@ -161,6 +161,6 @@ func (tc *TidbClusterConfig) BuildSubValues(path string) (string, error) { if err != nil { return "", err } - glog.V(4).Infof("subValues:\n %s", subValues) + klog.V(4).Infof("subValues:\n %s", subValues) return subVaulesPath, nil } diff --git a/tests/cmd/fault-trigger/main.go b/tests/cmd/fault-trigger/main.go index 1e36dc4cb8..c5a1d1e9bf 100644 --- a/tests/cmd/fault-trigger/main.go +++ b/tests/cmd/fault-trigger/main.go @@ -24,7 +24,7 @@ import ( "github.com/pingcap/tidb-operator/tests/pkg/fault-trigger/manager" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/component-base/logs" - glog "k8s.io/klog" + "k8s.io/klog" ) var ( @@ -52,5 +52,5 @@ func main() { server.StartServer() }, 5*time.Second) - glog.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", pprofPort), nil)) + klog.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", pprofPort), nil)) } diff --git a/tests/cmd/stability/main.go b/tests/cmd/stability/main.go index 567e193b3c..fca200e173 100644 --- a/tests/cmd/stability/main.go +++ b/tests/cmd/stability/main.go @@ -31,7 +31,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/component-base/logs" - glog "k8s.io/klog" + "k8s.io/klog" ) var cfg *tests.Config @@ -46,7 +46,7 @@ func main() { logs.InitLogs() defer logs.FlushLogs() go func() { - glog.Info(http.ListenAndServe(":6060", nil)) + klog.Info(http.ListenAndServe(":6060", nil)) }() metrics.StartServer() cfg = tests.ParseConfigOrDie() @@ -381,7 +381,7 @@ func run() { } slack.SuccessCount++ - glog.Infof("################## Stability test finished at: %v\n\n\n\n", time.Now().Format(time.RFC3339)) + klog.Infof("################## Stability test finished at: %v\n\n\n\n", time.Now().Format(time.RFC3339)) } func newOperatorConfig() *tests.OperatorConfig { diff --git a/tests/config.go b/tests/config.go index 38fb686bc8..8b704e6395 100644 --- a/tests/config.go +++ b/tests/config.go @@ -26,7 +26,7 @@ import ( "github.com/pingcap/tidb-operator/tests/pkg/blockwriter" "gopkg.in/yaml.v2" - glog "k8s.io/klog" + "k8s.io/klog" ) const ( @@ -158,7 +158,7 @@ func ParseConfigOrDie() *Config { slack.NotifyAndPanic(err) } - glog.Infof("using config: %+v", cfg) + klog.Infof("using config: %+v", cfg) return cfg } diff --git a/tests/drainer_info.go b/tests/drainer_info.go index 8ad4f87c5e..7b072f2be7 100644 --- a/tests/drainer_info.go +++ b/tests/drainer_info.go @@ -18,7 +18,7 @@ import ( "io/ioutil" "strings" - glog "k8s.io/klog" + "k8s.io/klog" ) type DbType string @@ -72,6 +72,6 @@ func (d *DrainerConfig) BuildSubValues(dir string) (string, error) { if err := ioutil.WriteFile(path, []byte(values), 0644); err != nil { return "", err } - glog.Infof("Values of drainer %s:\n %s", d.DrainerName, values) + klog.Infof("Values of drainer %s:\n %s", d.DrainerName, values) return path, nil } diff --git a/tests/dt.go b/tests/dt.go index 39eb51701a..5ecf247cad 100644 --- a/tests/dt.go +++ b/tests/dt.go @@ -25,7 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" - glog "k8s.io/klog" + "k8s.io/klog" ) const ( @@ -66,14 +66,14 @@ func (oa *operatorActions) LabelNodes() error { err := wait.PollImmediate(3*time.Second, time.Minute, func() (bool, error) { n, err := oa.kubeCli.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{}) if err != nil { - glog.Errorf("get node:[%s] failed! error: %v", node.Name, err) + klog.Errorf("get node:[%s] failed! error: %v", node.Name, err) return false, nil } index := i % RackNum n.Labels[RackLabel] = fmt.Sprintf("rack%d", index) _, err = oa.kubeCli.CoreV1().Nodes().Update(n) if err != nil { - glog.Errorf("label node:[%s] failed! error: %v", node.Name, err) + klog.Errorf("label node:[%s] failed! error: %v", node.Name, err) return false, nil } return true, nil diff --git a/tests/failover.go b/tests/failover.go index c61981d33c..271370a9cf 100644 --- a/tests/failover.go +++ b/tests/failover.go @@ -35,7 +35,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" - glog "k8s.io/klog" + "k8s.io/klog" ) func (oa *operatorActions) DeletePDDataThenCheckFailover(info *TidbClusterConfig, pdFailoverPeriod time.Duration) error { @@ -50,7 +50,7 @@ func (oa *operatorActions) DeletePDDataThenCheckFailover(info *TidbClusterConfig deletePDDataCmd := fmt.Sprintf("kubectl exec -n %s %s -- rm -rf /var/lib/pd/member", ns, podName) result, err = exec.Command("/bin/sh", "-c", deletePDDataCmd).CombinedOutput() if err != nil { - glog.Error(err) + klog.Error(err) return false, nil } return true, nil @@ -58,17 +58,17 @@ func (oa *operatorActions) DeletePDDataThenCheckFailover(info *TidbClusterConfig if err != nil { return fmt.Errorf("failed to delete pod %s/%s data, %s", ns, podName, string(result)) } - glog.Infof("delete pod %s/%s data successfully", ns, podName) + klog.Infof("delete pod %s/%s data successfully", ns, podName) err = wait.Poll(10*time.Second, failoverTimeout+pdFailoverPeriod, func() (bool, error) { tc, err := oa.cli.PingcapV1alpha1().TidbClusters(ns).Get(tcName, metav1.GetOptions{}) if err != nil { - glog.Error(err) + klog.Error(err) return false, nil } if len(tc.Status.PD.FailureMembers) == 1 { - glog.Infof("%#v", tc.Status.PD.FailureMembers) + klog.Infof("%#v", tc.Status.PD.FailureMembers) return true, nil } return false, nil @@ -76,7 +76,7 @@ func (oa *operatorActions) DeletePDDataThenCheckFailover(info *TidbClusterConfig if err != nil { return fmt.Errorf("failed to check pd %s/%s failover", ns, podName) } - glog.Infof("check pd %s/%s failover successfully", ns, podName) + klog.Infof("check pd %s/%s failover successfully", ns, podName) tc, err := oa.cli.PingcapV1alpha1().TidbClusters(ns).Get(tcName, metav1.GetOptions{}) if err != nil { @@ -92,7 +92,7 @@ func (oa *operatorActions) DeletePDDataThenCheckFailover(info *TidbClusterConfig return err } - glog.Infof("recover %s/%s successfully", ns, podName) + klog.Infof("recover %s/%s successfully", ns, podName) return nil } @@ -111,21 +111,21 @@ func (oa *operatorActions) TruncateSSTFileThenCheckFailover(info *TidbClusterCon // checkout latest tidb cluster tc, err := cli.PingcapV1alpha1().TidbClusters(info.Namespace).Get(info.ClusterName, metav1.GetOptions{}) if err != nil { - glog.Errorf("failed to get the cluster: ns=%s tc=%s err=%s", info.Namespace, info.ClusterName, err.Error()) + klog.Errorf("failed to get the cluster: ns=%s tc=%s err=%s", info.Namespace, info.ClusterName, err.Error()) return err } // checkout pd config pdCfg, err := oa.pdControl.GetPDClient(pdapi.Namespace(tc.GetNamespace()), tc.GetName(), tc.IsTLSClusterEnabled()).GetConfig() if err != nil { - glog.Errorf("failed to get the pd config: tc=%s err=%s", info.ClusterName, err.Error()) + klog.Errorf("failed to get the pd config: tc=%s err=%s", info.ClusterName, err.Error()) return err } maxStoreDownTime, err := time.ParseDuration(pdCfg.Schedule.MaxStoreDownTime) if err != nil { return err } - glog.Infof("truncate sst file failover config: maxStoreDownTime=%v tikvFailoverPeriod=%v", maxStoreDownTime, tikvFailoverPeriod) + klog.Infof("truncate sst file failover config: maxStoreDownTime=%v tikvFailoverPeriod=%v", maxStoreDownTime, tikvFailoverPeriod) // find an up store var store v1alpha1.TiKVStore @@ -139,16 +139,16 @@ func (oa *operatorActions) TruncateSSTFileThenCheckFailover(info *TidbClusterCon break } if len(store.ID) == 0 { - glog.Errorf("failed to find an up store") + klog.Errorf("failed to find an up store") return errors.New("no up store for truncating sst file") } - glog.Infof("truncate sst file target store: id=%s pod=%s", store.ID, store.PodName) + klog.Infof("truncate sst file target store: id=%s pod=%s", store.ID, store.PodName) oa.EmitEvent(info, fmt.Sprintf("TruncateSSTFile: tikv: %s", store.PodName)) - glog.Infof("deleting pod: [%s/%s] and wait 1 minute for the pod to terminate", info.Namespace, store.PodName) + klog.Infof("deleting pod: [%s/%s] and wait 1 minute for the pod to terminate", info.Namespace, store.PodName) err = cli.CoreV1().Pods(info.Namespace).Delete(store.PodName, nil) if err != nil { - glog.Errorf("failed to get delete the pod: ns=%s tc=%s pod=%s err=%s", + klog.Errorf("failed to get delete the pod: ns=%s tc=%s pod=%s err=%s", info.Namespace, info.ClusterName, store.PodName, err.Error()) return err } @@ -162,14 +162,14 @@ func (oa *operatorActions) TruncateSSTFileThenCheckFailover(info *TidbClusterCon Store: store.ID, }) if err != nil { - glog.Errorf("failed to truncate the sst file: ns=%s tc=%s store=%s err=%s", + klog.Errorf("failed to truncate the sst file: ns=%s tc=%s store=%s err=%s", info.Namespace, info.ClusterName, store.ID, err.Error()) return err } oa.EmitEvent(info, fmt.Sprintf("TruncateSSTFile: tikv: %s/%s", info.Namespace, store.PodName)) // delete tikv pod - glog.Infof("deleting pod: [%s/%s] again", info.Namespace, store.PodName) + klog.Infof("deleting pod: [%s/%s] again", info.Namespace, store.PodName) wait.Poll(10*time.Second, time.Minute, func() (bool, error) { err = oa.kubeCli.CoreV1().Pods(info.Namespace).Delete(store.PodName, &metav1.DeleteOptions{}) if err != nil { @@ -183,7 +183,7 @@ func (oa *operatorActions) TruncateSSTFileThenCheckFailover(info *TidbClusterCon err = tikvOps.PollTiDBCluster(info.Namespace, info.ClusterName, func(tc *v1alpha1.TidbCluster, err error) (bool, error) { _, ok := tc.Status.TiKV.FailureStores[store.ID] - glog.Infof("cluster: [%s/%s] check if target store failed: %t", + klog.Infof("cluster: [%s/%s] check if target store failed: %t", info.Namespace, info.ClusterName, ok) if !ok { return false, nil @@ -191,13 +191,13 @@ func (oa *operatorActions) TruncateSSTFileThenCheckFailover(info *TidbClusterCon return true, nil }) if err != nil { - glog.Errorf("failed to check truncate sst file: %v", err) + klog.Errorf("failed to check truncate sst file: %v", err) return err } if err := wait.Poll(1*time.Minute, 30*time.Minute, func() (bool, error) { if err := tikvOps.RecoverSSTFile(info.Namespace, podName); err != nil { - glog.Errorf("failed to recovery sst file %s/%s, %v", info.Namespace, podName, err) + klog.Errorf("failed to recovery sst file %s/%s, %v", info.Namespace, podName, err) return false, nil } @@ -206,7 +206,7 @@ func (oa *operatorActions) TruncateSSTFileThenCheckFailover(info *TidbClusterCon return err } - glog.Infof("deleting pod: [%s/%s] again", info.Namespace, store.PodName) + klog.Infof("deleting pod: [%s/%s] again", info.Namespace, store.PodName) return wait.Poll(10*time.Second, time.Minute, func() (bool, error) { err = oa.kubeCli.CoreV1().Pods(info.Namespace).Delete(store.PodName, &metav1.DeleteOptions{}) if err != nil { @@ -225,14 +225,14 @@ func (oa *operatorActions) TruncateSSTFileThenCheckFailoverOrDie(info *TidbClust func (oa *operatorActions) CheckFailoverPending(info *TidbClusterConfig, node string, faultPoint *time.Time) (bool, error) { affectedPods, err := oa.getPodsByNode(info, node) if err != nil { - glog.Infof("cluster:[%s] query pods failed,error:%v", info.FullName(), err) + klog.Infof("cluster:[%s] query pods failed,error:%v", info.FullName(), err) return false, nil } tc, err := oa.cli.PingcapV1alpha1().TidbClusters(info.Namespace).Get(info.ClusterName, metav1.GetOptions{}) if err != nil { - glog.Infof("pending failover,failed to get tidbcluster:[%s], error: %v", info.FullName(), err) + klog.Infof("pending failover,failed to get tidbcluster:[%s], error: %v", info.FullName(), err) if strings.Contains(err.Error(), "Client.Timeout exceeded while awaiting headers") { - glog.Info("create new client") + klog.Info("create new client") newCli, _, _, _, _ := client.NewCliOrDie() oa.cli = newCli } @@ -244,7 +244,7 @@ func (oa *operatorActions) CheckFailoverPending(info *TidbClusterConfig, node st for _, failureMember := range tc.Status.PD.FailureMembers { if _, exist := affectedPods[failureMember.PodName]; exist { err := fmt.Errorf("cluster: [%s] the pd member[%s] should be mark failure after %s", info.FullName(), failureMember.PodName, deadline.Format(time.RFC3339)) - glog.Errorf(err.Error()) + klog.Errorf(err.Error()) return false, err } } @@ -253,7 +253,7 @@ func (oa *operatorActions) CheckFailoverPending(info *TidbClusterConfig, node st for _, failureStore := range tc.Status.TiKV.FailureStores { if _, exist := affectedPods[failureStore.PodName]; exist { err := fmt.Errorf("cluster: [%s] the tikv store[%s] should be mark failure after %s", info.FullName(), failureStore.PodName, deadline.Format(time.RFC3339)) - glog.Errorf(err.Error()) + klog.Errorf(err.Error()) // There may have been a failover before return false, nil } @@ -264,13 +264,13 @@ func (oa *operatorActions) CheckFailoverPending(info *TidbClusterConfig, node st for _, failureMember := range tc.Status.TiDB.FailureMembers { if _, exist := affectedPods[failureMember.PodName]; exist { err := fmt.Errorf("cluster: [%s] the tidb member[%s] should be mark failure after %s", info.FullName(), failureMember.PodName, deadline.Format(time.RFC3339)) - glog.Errorf(err.Error()) + klog.Errorf(err.Error()) return false, err } } } - glog.Infof("cluster: [%s] operator's failover feature is pending", info.FullName()) + klog.Infof("cluster: [%s] operator's failover feature is pending", info.FullName()) return false, nil } return true, nil @@ -300,18 +300,18 @@ func (oa *operatorActions) CheckFailoverPendingOrDie(clusters []*TidbClusterConf func (oa *operatorActions) CheckFailover(info *TidbClusterConfig, node string) (bool, error) { affectedPods, err := oa.getPodsByNode(info, node) if err != nil { - glog.Infof("cluster:[%s] query pods failed,error:%v", info.FullName(), err) + klog.Infof("cluster:[%s] query pods failed,error:%v", info.FullName(), err) return false, nil } if len(affectedPods) == 0 { - glog.Infof("the cluster:[%s] can not be affected by node:[%s]", info.FullName(), node) + klog.Infof("the cluster:[%s] can not be affected by node:[%s]", info.FullName(), node) return true, nil } tc, err := oa.cli.PingcapV1alpha1().TidbClusters(info.Namespace).Get(info.ClusterName, metav1.GetOptions{}) if err != nil { - glog.Errorf("query tidbcluster: [%s] failed, error: %v", info.FullName(), err) + klog.Errorf("query tidbcluster: [%s] failed, error: %v", info.FullName(), err) return false, nil } @@ -332,19 +332,19 @@ func (oa *operatorActions) CheckFailover(info *TidbClusterConfig, node string) ( } } - glog.Infof("cluster: [%s]'s failover feature has complete", info.FullName()) + klog.Infof("cluster: [%s]'s failover feature has complete", info.FullName()) return true, nil } func (oa *operatorActions) getPodsByNode(info *TidbClusterConfig, node string) (map[string]*corev1.Pod, error) { selector, err := label.New().Instance(info.ClusterName).Selector() if err != nil { - glog.Errorf("cluster:[%s] create selector failed, error:%v", info.FullName(), err) + klog.Errorf("cluster:[%s] create selector failed, error:%v", info.FullName(), err) return nil, err } pods, err := oa.kubeCli.CoreV1().Pods(info.Namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { - glog.Errorf("cluster:[%s] query pods failed, error:%v", info.FullName(), err) + klog.Errorf("cluster:[%s] query pods failed, error:%v", info.FullName(), err) return nil, err } podsOfNode := map[string]*corev1.Pod{} @@ -385,12 +385,12 @@ func (oa *operatorActions) CheckRecover(cluster *TidbClusterConfig) (bool, error } if tc.Status.PD.FailureMembers != nil && len(tc.Status.PD.FailureMembers) > 0 { - glog.Infof("cluster: [%s]'s pd FailureMembers is not nil, continue to wait", cluster.FullName()) + klog.Infof("cluster: [%s]'s pd FailureMembers is not nil, continue to wait", cluster.FullName()) return false, nil } if tc.Status.TiDB.FailureMembers != nil && len(tc.Status.TiDB.FailureMembers) > 0 { - glog.Infof("cluster: [%s]'s tidb FailureMembers is not nil, continue to wait", cluster.FullName()) + klog.Infof("cluster: [%s]'s tidb FailureMembers is not nil, continue to wait", cluster.FullName()) return false, nil } @@ -399,7 +399,7 @@ func (oa *operatorActions) CheckRecover(cluster *TidbClusterConfig) (bool, error tc.Status.TiKV.FailureStores = nil tc, err = oa.cli.PingcapV1alpha1().TidbClusters(cluster.Namespace).Update(tc) if err != nil { - glog.Errorf("failed to set status.tikv.failureStore to nil, %v", err) + klog.Errorf("failed to set status.tikv.failureStore to nil, %v", err) return false, nil } } @@ -437,13 +437,13 @@ func (oa *operatorActions) pdFailover(pod *corev1.Pod, tc *v1alpha1.TidbCluster) } } if !failure { - glog.Infof("tidbCluster:[%s/%s]'s member:[%s] have not become failuremember", tc.Namespace, tc.Name, pod.Name) + klog.Infof("tidbCluster:[%s/%s]'s member:[%s] have not become failuremember", tc.Namespace, tc.Name, pod.Name) return false } for _, member := range tc.Status.PD.Members { if member.Name == pod.GetName() { - glog.Infof("tidbCluster:[%s/%s]'s status.members still have pd member:[%s]", tc.Namespace, tc.Name, pod.Name) + klog.Infof("tidbCluster:[%s/%s]'s status.members still have pd member:[%s]", tc.Namespace, tc.Name, pod.Name) return false } } @@ -452,7 +452,7 @@ func (oa *operatorActions) pdFailover(pod *corev1.Pod, tc *v1alpha1.TidbCluster) return true } - glog.Infof("cluster: [%s/%s] pd:[%s] failover still not complete", tc.Namespace, tc.Name, pod.GetName()) + klog.Infof("cluster: [%s/%s] pd:[%s] failover still not complete", tc.Namespace, tc.Name, pod.GetName()) return false } @@ -469,7 +469,7 @@ func (oa *operatorActions) tikvFailover(pod *corev1.Pod, tc *v1alpha1.TidbCluste } } if !failure { - glog.Infof("tidbCluster:[%s/%s]'s store pod:[%s] have not become failuremember", tc.Namespace, tc.Name, pod.Name) + klog.Infof("tidbCluster:[%s/%s]'s store pod:[%s] have not become failuremember", tc.Namespace, tc.Name, pod.Name) return false } @@ -483,7 +483,7 @@ func (oa *operatorActions) tikvFailover(pod *corev1.Pod, tc *v1alpha1.TidbCluste return true } - glog.Infof("cluster: [%s/%s] tikv:[%s] failover still not complete", tc.Namespace, tc.Name, pod.GetName()) + klog.Infof("cluster: [%s/%s] tikv:[%s] failover still not complete", tc.Namespace, tc.Name, pod.GetName()) return false } @@ -491,7 +491,7 @@ func (oa *operatorActions) tidbFailover(pod *corev1.Pod, tc *v1alpha1.TidbCluste failure := false for _, failureMember := range tc.Status.TiDB.FailureMembers { if failureMember.PodName == pod.GetName() { - glog.Infof("tidbCluster:[%s/%s]'s store pod:[%s] have become failuremember", tc.Namespace, tc.Name, pod.Name) + klog.Infof("tidbCluster:[%s/%s]'s store pod:[%s] have become failuremember", tc.Namespace, tc.Name, pod.Name) failure = true break } @@ -510,7 +510,7 @@ func (oa *operatorActions) tidbFailover(pod *corev1.Pod, tc *v1alpha1.TidbCluste if healthCount == int(tc.Spec.TiDB.Replicas) { return true } - glog.Infof("cluster: [%s/%s] tidb:[%s] failover still not complete", tc.Namespace, tc.Name, pod.GetName()) + klog.Infof("cluster: [%s/%s] tidb:[%s] failover still not complete", tc.Namespace, tc.Name, pod.GetName()) return false } @@ -555,30 +555,30 @@ func (oa *operatorActions) GetNodeMap(info *TidbClusterConfig, component string) } func (oa *operatorActions) CheckKubeletDownOrDie(operatorConfig *OperatorConfig, clusters []*TidbClusterConfig, faultNode string) { - glog.Infof("check k8s/operator/tidbCluster status when kubelet down") + klog.Infof("check k8s/operator/tidbCluster status when kubelet down") time.Sleep(10 * time.Minute) KeepOrDie(3*time.Second, 10*time.Minute, func() error { err := oa.CheckK8sAvailable(nil, nil) if err != nil { return err } - glog.V(4).Infof("k8s cluster is available.") + klog.V(4).Infof("k8s cluster is available.") err = oa.CheckOperatorAvailable(operatorConfig) if err != nil { return err } - glog.V(4).Infof("tidb operator is available.") + klog.V(4).Infof("tidb operator is available.") err = oa.CheckTidbClustersAvailable(clusters) if err != nil { return err } - glog.V(4).Infof("all clusters are available") + klog.V(4).Infof("all clusters are available") return nil }) } func (oa *operatorActions) CheckEtcdDownOrDie(operatorConfig *OperatorConfig, clusters []*TidbClusterConfig, faultNode string) { - glog.Infof("check k8s/operator/tidbCluster status when etcd down") + klog.Infof("check k8s/operator/tidbCluster status when etcd down") // kube-apiserver may block 15 min time.Sleep(20 * time.Minute) KeepOrDie(3*time.Second, 10*time.Minute, func() error { @@ -586,23 +586,23 @@ func (oa *operatorActions) CheckEtcdDownOrDie(operatorConfig *OperatorConfig, cl if err != nil { return err } - glog.V(4).Infof("k8s cluster is available.") + klog.V(4).Infof("k8s cluster is available.") err = oa.CheckOperatorAvailable(operatorConfig) if err != nil { return err } - glog.V(4).Infof("tidb operator is available.") + klog.V(4).Infof("tidb operator is available.") err = oa.CheckTidbClustersAvailable(clusters) if err != nil { return err } - glog.V(4).Infof("all clusters are available") + klog.V(4).Infof("all clusters are available") return nil }) } func (oa *operatorActions) CheckKubeProxyDownOrDie(operatorConfig *OperatorConfig, clusters []*TidbClusterConfig) { - glog.Infof("checking k8s/tidbCluster status when kube-proxy down") + klog.Infof("checking k8s/tidbCluster status when kube-proxy down") KeepOrDie(3*time.Second, 10*time.Minute, func() error { err := oa.CheckK8sAvailable(nil, nil) @@ -610,75 +610,75 @@ func (oa *operatorActions) CheckKubeProxyDownOrDie(operatorConfig *OperatorConfi return err } - glog.V(4).Infof("k8s cluster is available.") + klog.V(4).Infof("k8s cluster is available.") err = oa.CheckOperatorAvailable(operatorConfig) if err != nil { return err } - glog.V(4).Infof("tidb operator is available.") + klog.V(4).Infof("tidb operator is available.") err = oa.CheckTidbClustersAvailable(clusters) if err != nil { return err } - glog.V(4).Infof("all clusters are available.") + klog.V(4).Infof("all clusters are available.") return nil }) } func (oa *operatorActions) CheckKubeSchedulerDownOrDie(operatorConfig *OperatorConfig, clusters []*TidbClusterConfig) { - glog.Infof("verify kube-scheduler is not avaiavble") + klog.Infof("verify kube-scheduler is not avaiavble") if err := waitForComponentStatus(oa.kubeCli, "scheduler", corev1.ComponentHealthy, corev1.ConditionFalse); err != nil { slack.NotifyAndPanic(fmt.Errorf("failed to stop kube-scheduler: %v", err)) } - glog.Infof("checking operator/tidbCluster status when kube-scheduler is not available") + klog.Infof("checking operator/tidbCluster status when kube-scheduler is not available") KeepOrDie(3*time.Second, 10*time.Minute, func() error { err := oa.CheckOperatorAvailable(operatorConfig) if err != nil { return err } - glog.V(4).Infof("tidb operator is available.") + klog.V(4).Infof("tidb operator is available.") err = oa.CheckTidbClustersAvailable(clusters) if err != nil { return err } - glog.V(4).Infof("all clusters are available.") + klog.V(4).Infof("all clusters are available.") return nil }) } func (oa *operatorActions) CheckKubeControllerManagerDownOrDie(operatorConfig *OperatorConfig, clusters []*TidbClusterConfig) { - glog.Infof("verify kube-controller-manager is not avaiavble") + klog.Infof("verify kube-controller-manager is not avaiavble") if err := waitForComponentStatus(oa.kubeCli, "controller-manager", corev1.ComponentHealthy, corev1.ConditionFalse); err != nil { slack.NotifyAndPanic(fmt.Errorf("failed to stop kube-controller-manager: %v", err)) } - glog.Infof("checking operator/tidbCluster status when kube-controller-manager is not available") + klog.Infof("checking operator/tidbCluster status when kube-controller-manager is not available") KeepOrDie(3*time.Second, 10*time.Minute, func() error { err := oa.CheckOperatorAvailable(operatorConfig) if err != nil { return err } - glog.V(4).Infof("tidb operator is available.") + klog.V(4).Infof("tidb operator is available.") err = oa.CheckTidbClustersAvailable(clusters) if err != nil { return err } - glog.V(4).Infof("all clusters are available.") + klog.V(4).Infof("all clusters are available.") return nil }) } func (oa *operatorActions) CheckOneApiserverDownOrDie(operatorConfig *OperatorConfig, clusters []*TidbClusterConfig, faultNode string) { - glog.Infof("check k8s/operator/tidbCluster status when one apiserver down") + klog.Infof("check k8s/operator/tidbCluster status when one apiserver down") affectedPods := map[string]*corev1.Pod{} apiserverPod, err := GetKubeApiserverPod(oa.kubeCli, faultNode) if err != nil { @@ -724,17 +724,17 @@ func (oa *operatorActions) CheckOneApiserverDownOrDie(operatorConfig *OperatorCo if err != nil { return err } - glog.V(4).Infof("k8s cluster is available.") + klog.V(4).Infof("k8s cluster is available.") err = oa.CheckOperatorAvailable(operatorConfig) if err != nil { return err } - glog.V(4).Infof("tidb operator is available.") + klog.V(4).Infof("tidb operator is available.") err = oa.CheckTidbClustersAvailable(clusters) if err != nil { return err } - glog.V(4).Infof("all clusters is available") + klog.V(4).Infof("all clusters is available") return nil }) } @@ -745,13 +745,13 @@ func (oa *operatorActions) CheckAllApiserverDownOrDie(operatorConfig *OperatorCo if err != nil { return err } - glog.V(4).Infof("all clusters is available") + klog.V(4).Infof("all clusters is available") return nil }) } func (oa *operatorActions) CheckOperatorDownOrDie(clusters []*TidbClusterConfig) { - glog.Infof("checking k8s/tidbCluster status when operator down") + klog.Infof("checking k8s/tidbCluster status when operator down") KeepOrDie(3*time.Second, 10*time.Minute, func() error { err := oa.CheckK8sAvailable(nil, nil) @@ -773,7 +773,7 @@ func (oa *operatorActions) CheckK8sAvailable(excludeNodes map[string]string, exc return wait.Poll(3*time.Second, time.Minute, func() (bool, error) { nodes, err := oa.kubeCli.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { - glog.Errorf("failed to list nodes,error:%v", err) + klog.Errorf("failed to list nodes,error:%v", err) return false, nil } for _, node := range nodes.Items { @@ -788,7 +788,7 @@ func (oa *operatorActions) CheckK8sAvailable(excludeNodes map[string]string, exc } systemPods, err := oa.kubeCli.CoreV1().Pods("kube-system").List(metav1.ListOptions{}) if err != nil { - glog.Errorf("failed to list kube-system pods,error:%v", err) + klog.Errorf("failed to list kube-system pods,error:%v", err) return false, nil } for _, pod := range systemPods.Items { @@ -813,23 +813,23 @@ func (oa *operatorActions) CheckOperatorAvailable(operatorConfig *OperatorConfig } controllerDeployment, err := oa.kubeCli.AppsV1().Deployments(operatorConfig.Namespace).Get(tidbControllerName, metav1.GetOptions{}) if err != nil { - glog.Errorf("failed to get deployment:%s failed,error:%v", tidbControllerName, err) + klog.Errorf("failed to get deployment:%s failed,error:%v", tidbControllerName, err) return false, nil } if controllerDeployment.Status.AvailableReplicas != *controllerDeployment.Spec.Replicas { e = fmt.Errorf("the %s is not available", tidbControllerName) - glog.Error(e) + klog.Error(e) errCount++ return false, nil } schedulerDeployment, err := oa.kubeCli.AppsV1().Deployments(operatorConfig.Namespace).Get(tidbSchedulerName, metav1.GetOptions{}) if err != nil { - glog.Errorf("failed to get deployment:%s failed,error:%v", tidbSchedulerName, err) + klog.Errorf("failed to get deployment:%s failed,error:%v", tidbSchedulerName, err) return false, nil } if schedulerDeployment.Status.AvailableReplicas != *schedulerDeployment.Spec.Replicas { e = fmt.Errorf("the %s is not available", tidbSchedulerName) - glog.Error(e) + klog.Error(e) errCount++ return false, nil } @@ -864,26 +864,26 @@ var testTableName = "testTable" func (oa *operatorActions) addDataToCluster(info *TidbClusterConfig) (bool, error) { dsn, cancel, err := oa.getTiDBDSN(info.Namespace, info.ClusterName, "test", info.Password) if err != nil { - glog.Errorf("failed to get TiDB DSN: %v", err) + klog.Errorf("failed to get TiDB DSN: %v", err) return false, nil } defer cancel() db, err := sql.Open("mysql", dsn) if err != nil { - glog.Errorf("cluster:[%s] can't open connection to mysql: %v", info.FullName(), err) + klog.Errorf("cluster:[%s] can't open connection to mysql: %v", info.FullName(), err) return false, nil } defer db.Close() _, err = db.Exec(fmt.Sprintf("CREATE TABLE %s (name VARCHAR(64))", testTableName)) if err != nil && !tableAlreadyExist(err) { - glog.Errorf("cluster:[%s] can't create table to mysql: %v", info.FullName(), err) + klog.Errorf("cluster:[%s] can't create table to mysql: %v", info.FullName(), err) return false, nil } _, err = db.Exec(fmt.Sprintf("INSERT INTO %s VALUES (?)", testTableName), "testValue") if err != nil { - glog.Errorf("cluster:[%s] can't insert data to mysql: %v", info.FullName(), err) + klog.Errorf("cluster:[%s] can't insert data to mysql: %v", info.FullName(), err) return false, nil } diff --git a/tests/fault.go b/tests/fault.go index 33298ad436..611053fa5a 100644 --- a/tests/fault.go +++ b/tests/fault.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" - glog "k8s.io/klog" + "k8s.io/klog" ) const ( @@ -95,7 +95,7 @@ type faultTriggerActions struct { } func (fa *faultTriggerActions) CheckAndRecoverEnv() error { - glog.Infof("ensure all nodes are running") + klog.Infof("ensure all nodes are running") for _, physicalNode := range fa.cfg.Nodes { for _, vNode := range physicalNode.Nodes { err := fa.StartNode(physicalNode.PhysicalNode, vNode.IP) @@ -104,21 +104,21 @@ func (fa *faultTriggerActions) CheckAndRecoverEnv() error { } } } - glog.Infof("ensure all etcds are running") + klog.Infof("ensure all etcds are running") err := fa.StartETCD() if err != nil { return err } allK8sNodes := getAllK8sNodes(fa.cfg) - glog.Infof("ensure all kubelets are running") + klog.Infof("ensure all kubelets are running") for _, node := range allK8sNodes { err := fa.StartKubelet(node) if err != nil { return err } } - glog.Infof("ensure all static pods are running") + klog.Infof("ensure all static pods are running") for _, physicalNode := range fa.cfg.APIServers { for _, vNode := range physicalNode.Nodes { err := fa.StartKubeAPIServer(vNode.IP) @@ -135,7 +135,7 @@ func (fa *faultTriggerActions) CheckAndRecoverEnv() error { } } } - glog.Infof("ensure all kube-proxy are running") + klog.Infof("ensure all kube-proxy are running") err = fa.StartKubeProxy() if err != nil { return err @@ -146,7 +146,7 @@ func (fa *faultTriggerActions) CheckAndRecoverEnv() error { func (fa *faultTriggerActions) CheckAndRecoverEnvOrDie() { if err := fa.CheckAndRecoverEnv(); err != nil { - glog.Fatal(err) + klog.Fatal(err) } } @@ -156,7 +156,7 @@ func (fa *faultTriggerActions) StopNode() (string, string, time.Time, error) { if err != nil { return "", "", now, err } - glog.Infof("selecting %s as the node to failover", node) + klog.Infof("selecting %s as the node to failover", node) physicalNode := getPhysicalNode(node, fa.cfg) @@ -176,11 +176,11 @@ func (fa *faultTriggerActions) StopNode() (string, string, time.Time, error) { if err := faultCli.StopVM(&manager.VM{ Name: name, }); err != nil { - glog.Errorf("failed to stop node %s on physical node: %s: %v", node, physicalNode, err) + klog.Errorf("failed to stop node %s on physical node: %s: %v", node, physicalNode, err) return "", "", now, err } - glog.Infof("node %s on physical node %s is stopped", node, physicalNode) + klog.Infof("node %s on physical node %s is stopped", node, physicalNode) return physicalNode, node, now, nil } @@ -216,11 +216,11 @@ func (fa *faultTriggerActions) StartNode(physicalNode string, node string) error if err := faultCli.StartVM(&manager.VM{ Name: name, }); err != nil { - glog.Errorf("failed to start node %s on physical node %s: %v", node, physicalNode, err) + klog.Errorf("failed to start node %s on physical node %s: %v", node, physicalNode, err) return err } - glog.Infof("node %s on physical node %s is started", node, physicalNode) + klog.Infof("node %s on physical node %s is started", node, physicalNode) return nil } @@ -244,7 +244,7 @@ func (fa *faultTriggerActions) getAllKubeProxyPods() ([]v1.Pod, error) { // StopKubeProxy stops the kube-proxy service. func (fa *faultTriggerActions) StopKubeProxy() error { - glog.Infof("stopping all kube-proxy pods") + klog.Infof("stopping all kube-proxy pods") nodes := getAllK8sNodes(fa.cfg) pods, err := fa.getAllKubeProxyPods() if err != nil { @@ -277,13 +277,13 @@ func (fa *faultTriggerActions) StopKubeProxy() error { return err } for _, pod := range pods { - glog.Infof("waiting for kube-proxy pod %s/%s to be terminated", pod.Namespace, pod.Name) + klog.Infof("waiting for kube-proxy pod %s/%s to be terminated", pod.Namespace, pod.Name) err = waitForPodNotFoundInNamespace(fa.kubeCli, pod.Name, pod.Namespace, PodTimeout) if err != nil { return err } } - glog.Infof("kube-proxy on vm nodes %v are stopped", nodes) + klog.Infof("kube-proxy on vm nodes %v are stopped", nodes) return nil } @@ -295,7 +295,7 @@ func (fa *faultTriggerActions) StopKubeProxyOrDie() { // StartKubeProxy starts the kube-proxy service. func (fa *faultTriggerActions) StartKubeProxy() error { - glog.Infof("starting all kube-proxy pods") + klog.Infof("starting all kube-proxy pods") nodes := getAllK8sNodes(fa.cfg) ds, err := fa.kubeCli.AppsV1().DaemonSets(metav1.NamespaceSystem).Get("kube-proxy", metav1.GetOptions{}) if err != nil { @@ -327,7 +327,7 @@ func (fa *faultTriggerActions) StartKubeProxy() error { if err != nil { return err } - glog.Infof("kube-proxy on vm nodes %v are started", nodes) + klog.Infof("kube-proxy on vm nodes %v are started", nodes) return nil } @@ -356,7 +356,7 @@ func (fa *faultTriggerActions) StopETCD(nodes ...string) error { } func (fa *faultTriggerActions) StopETCDOrDie(nodes ...string) { - glog.Infof("stopping %v etcds", nodes) + klog.Infof("stopping %v etcds", nodes) if err := fa.StopETCD(nodes...); err != nil { slack.NotifyAndPanic(err) } @@ -380,7 +380,7 @@ func (fa *faultTriggerActions) StopKubelet(nodes ...string) error { } func (fa *faultTriggerActions) StopKubeletOrDie(nodes ...string) { - glog.Infof("stopping %v kubelets", nodes) + klog.Infof("stopping %v kubelets", nodes) if err := fa.StopKubelet(nodes...); err != nil { slack.NotifyAndPanic(err) } @@ -553,11 +553,11 @@ func (fa *faultTriggerActions) serviceAction(node string, serverName string, act } if err != nil { - glog.Errorf("failed to %s %s %s: %v", action, serverName, node, err) + klog.Errorf("failed to %s %s %s: %v", action, serverName, node, err) return err } - glog.Infof("%s %s %s successfully", action, serverName, node) + klog.Infof("%s %s %s successfully", action, serverName, node) return nil } @@ -576,7 +576,7 @@ func getFaultNode(kubeCli kubernetes.Interface) (string, error) { err = wait.Poll(2*time.Second, 10*time.Second, func() (bool, error) { nodes, err = kubeCli.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { - glog.Errorf("trigger node stop failed when get all nodes, error: %v", err) + klog.Errorf("trigger node stop failed when get all nodes, error: %v", err) return false, nil } @@ -584,7 +584,7 @@ func getFaultNode(kubeCli kubernetes.Interface) (string, error) { }) if err != nil { - glog.Errorf("failed to list nodes: %v", err) + klog.Errorf("failed to list nodes: %v", err) return "", err } @@ -608,7 +608,7 @@ func getFaultNode(kubeCli kubernetes.Interface) (string, error) { if faultNode == myNode { err := fmt.Errorf("there are at least two nodes with the name %s", myNode) - glog.Error(err.Error()) + klog.Error(err.Error()) return "", err } diff --git a/tests/pkg/apimachinery/certs.go b/tests/pkg/apimachinery/certs.go index 5061b5d836..605c59cd3f 100644 --- a/tests/pkg/apimachinery/certs.go +++ b/tests/pkg/apimachinery/certs.go @@ -20,7 +20,7 @@ import ( "k8s.io/client-go/util/cert" "k8s.io/client-go/util/keyutil" - glog "k8s.io/klog" + "k8s.io/klog" "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil" ) @@ -35,32 +35,32 @@ type CertContext struct { func SetupServerCert(namespaceName, serviceName string) (*CertContext, error) { certDir, err := ioutil.TempDir("", "test-e2e-server-cert") if err != nil { - glog.Errorf("Failed to create a temp dir for cert generation %v", err) + klog.Errorf("Failed to create a temp dir for cert generation %v", err) return nil, err } defer os.RemoveAll(certDir) signingKey, err := pkiutil.NewPrivateKey() if err != nil { - glog.Errorf("Failed to create CA private key %v", err) + klog.Errorf("Failed to create CA private key %v", err) return nil, err } signingCert, err := cert.NewSelfSignedCACert(cert.Config{CommonName: "e2e-server-cert-ca"}, signingKey) if err != nil { - glog.Errorf("Failed to create CA cert for apiserver %v", err) + klog.Errorf("Failed to create CA cert for apiserver %v", err) return nil, err } caCertFile, err := ioutil.TempFile(certDir, "ca.crt") if err != nil { - glog.Errorf("Failed to create a temp file for ca cert generation %v", err) + klog.Errorf("Failed to create a temp file for ca cert generation %v", err) return nil, err } if err := ioutil.WriteFile(caCertFile.Name(), pkiutil.EncodeCertPEM(signingCert), 0644); err != nil { - glog.Errorf("Failed to write CA cert %v", err) + klog.Errorf("Failed to write CA cert %v", err) return nil, err } key, err := pkiutil.NewPrivateKey() if err != nil { - glog.Errorf("Failed to create private key for %v", err) + klog.Errorf("Failed to create private key for %v", err) return nil, err } signedCert, err := pkiutil.NewSignedCert( @@ -71,21 +71,21 @@ func SetupServerCert(namespaceName, serviceName string) (*CertContext, error) { key, signingCert, signingKey, ) if err != nil { - glog.Errorf("Failed to create cert%v", err) + klog.Errorf("Failed to create cert%v", err) return nil, err } certFile, err := ioutil.TempFile(certDir, "server.crt") if err != nil { - glog.Errorf("Failed to create a temp file for cert generation %v", err) + klog.Errorf("Failed to create a temp file for cert generation %v", err) return nil, err } keyFile, err := ioutil.TempFile(certDir, "server.key") if err != nil { - glog.Errorf("Failed to create a temp file for key generation %v", err) + klog.Errorf("Failed to create a temp file for key generation %v", err) return nil, err } if err = ioutil.WriteFile(certFile.Name(), pkiutil.EncodeCertPEM(signedCert), 0600); err != nil { - glog.Errorf("Failed to write cert file %v", err) + klog.Errorf("Failed to write cert file %v", err) return nil, err } keyPEM, err := keyutil.MarshalPrivateKeyToPEM(key) @@ -93,7 +93,7 @@ func SetupServerCert(namespaceName, serviceName string) (*CertContext, error) { return nil, err } if err = ioutil.WriteFile(keyFile.Name(), keyPEM, 0644); err != nil { - glog.Errorf("Failed to write key file %v", err) + klog.Errorf("Failed to write key file %v", err) return nil, err } return &CertContext{ diff --git a/tests/pkg/blockwriter/blockwriter.go b/tests/pkg/blockwriter/blockwriter.go index 00eba3e284..a19c96133e 100644 --- a/tests/pkg/blockwriter/blockwriter.go +++ b/tests/pkg/blockwriter/blockwriter.go @@ -25,7 +25,7 @@ import ( "github.com/pingcap/tidb-operator/tests/pkg/util" "k8s.io/apimachinery/pkg/util/wait" - glog "k8s.io/klog" + "k8s.io/klog" ) const ( @@ -96,7 +96,7 @@ func (c *BlockWriterCase) newBlockWriter() *blockWriter { func (c *BlockWriterCase) generateQuery(ctx context.Context, queryChan chan []string, wg *sync.WaitGroup) { defer func() { - glog.Infof("[%s] [%s] [action: generate Query] stopped", c, c.ClusterName) + klog.Infof("[%s] [%s] [action: generate Query] stopped", c, c.ClusterName) wg.Done() }() @@ -126,7 +126,7 @@ func (c *BlockWriterCase) generateQuery(ctx context.Context, queryChan chan []st case queryChan <- querys: continue default: - glog.V(4).Infof("[%s] [%s] [action: generate Query] query channel is full, sleep 10 seconds", c, c.ClusterName) + klog.V(4).Infof("[%s] [%s] [action: generate Query] query channel is full, sleep 10 seconds", c, c.ClusterName) util.Sleep(ctx, 10*time.Second) } } @@ -135,7 +135,7 @@ func (c *BlockWriterCase) generateQuery(ctx context.Context, queryChan chan []st func (bw *blockWriter) batchExecute(db *sql.DB, query string) error { _, err := db.Exec(query) if err != nil { - glog.V(4).Infof("exec sql [%s] failed, err: %v", query, err) + klog.V(4).Infof("exec sql [%s] failed, err: %v", query, err) return err } @@ -143,7 +143,7 @@ func (bw *blockWriter) batchExecute(db *sql.DB, query string) error { } func (bw *blockWriter) run(ctx context.Context, db *sql.DB, queryChan chan []string) { - defer glog.Infof("run stopped") + defer klog.Infof("run stopped") for { select { case <-ctx.Done(): @@ -163,7 +163,7 @@ func (bw *blockWriter) run(ctx context.Context, db *sql.DB, queryChan chan []str return default: if err := bw.batchExecute(db, query); err != nil { - glog.V(4).Info(err) + klog.V(4).Info(err) time.Sleep(5 * time.Second) continue } @@ -174,10 +174,10 @@ func (bw *blockWriter) run(ctx context.Context, db *sql.DB, queryChan chan []str // Initialize inits case func (c *BlockWriterCase) initialize(db *sql.DB) error { - glog.Infof("[%s] [%s] start to init...", c, c.ClusterName) + klog.Infof("[%s] [%s] start to init...", c, c.ClusterName) defer func() { atomic.StoreUint32(&c.isInit, 1) - glog.Infof("[%s] [%s] init end...", c, c.ClusterName) + klog.Infof("[%s] [%s] init end...", c, c.ClusterName) }() for i := 0; i < c.cfg.TableNum; i++ { @@ -196,7 +196,7 @@ func (c *BlockWriterCase) initialize(db *sql.DB) error { err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) { _, err := db.Exec(tmt) if err != nil { - glog.Warningf("[%s] exec sql [%s] failed, err: %v, retry...", c, tmt, err) + klog.Warningf("[%s] exec sql [%s] failed, err: %v, retry...", c, tmt, err) return false, nil } @@ -204,7 +204,7 @@ func (c *BlockWriterCase) initialize(db *sql.DB) error { }) if err != nil { - glog.Errorf("[%s] exec sql [%s] failed, err: %v", c, tmt, err) + klog.Errorf("[%s] exec sql [%s] failed, err: %v", c, tmt, err) return err } } @@ -216,13 +216,13 @@ func (c *BlockWriterCase) initialize(db *sql.DB) error { func (c *BlockWriterCase) Start(db *sql.DB) error { if !atomic.CompareAndSwapUint32(&c.isRunning, 0, 1) { err := fmt.Errorf("[%s] [%s] is running, you can't start it again", c, c.ClusterName) - glog.Error(err) + klog.Error(err) return nil } defer func() { c.RLock() - glog.Infof("[%s] [%s] stopped", c, c.ClusterName) + klog.Infof("[%s] [%s] stopped", c, c.ClusterName) atomic.SwapUint32(&c.isRunning, 0) }() @@ -232,7 +232,7 @@ func (c *BlockWriterCase) Start(db *sql.DB) error { } } - glog.Infof("[%s] [%s] start to execute case...", c, c.ClusterName) + klog.Infof("[%s] [%s] start to execute case...", c, c.ClusterName) var wg sync.WaitGroup @@ -255,7 +255,7 @@ loop: for { select { case <-c.stopChan: - glog.Infof("[%s] stoping...", c) + klog.Infof("[%s] stoping...", c) cancel() break loop default: diff --git a/tests/pkg/client/client_test.go b/tests/pkg/client/client_test.go index 7ac4b78e3c..905d2a77a2 100644 --- a/tests/pkg/client/client_test.go +++ b/tests/pkg/client/client_test.go @@ -18,7 +18,7 @@ import ( fclient "github.com/pingcap/tidb-operator/tests/pkg/fault-trigger/client" "github.com/pingcap/tidb-operator/tests/pkg/fault-trigger/manager" - glog "k8s.io/klog" + "k8s.io/klog" ) func TestClientConn(t *testing.T) { @@ -29,6 +29,6 @@ func TestClientConn(t *testing.T) { if err := faultCli.StopVM(&manager.VM{ Name: "105", }); err != nil { - glog.Errorf("failed to start node on physical node %v", err) + klog.Errorf("failed to start node on physical node %v", err) } } diff --git a/tests/pkg/fault-trigger/api/response.go b/tests/pkg/fault-trigger/api/response.go index a542246661..da76b3a614 100644 --- a/tests/pkg/fault-trigger/api/response.go +++ b/tests/pkg/fault-trigger/api/response.go @@ -31,7 +31,7 @@ import ( "net/http" "github.com/juju/errors" - glog "k8s.io/klog" + "k8s.io/klog" ) // Response defines a new response struct for http @@ -71,7 +71,7 @@ func ExtractResponse(data []byte) ([]byte, error) { if respData.StatusCode != http.StatusOK { d, err := json.Marshal(respData.Payload) if err != nil { - glog.Errorf("marshal data failed %v", d) + klog.Errorf("marshal data failed %v", d) } return d, errors.New(respData.Message) diff --git a/tests/pkg/fault-trigger/api/server.go b/tests/pkg/fault-trigger/api/server.go index 48b3cfb078..cb39aa0f32 100644 --- a/tests/pkg/fault-trigger/api/server.go +++ b/tests/pkg/fault-trigger/api/server.go @@ -30,7 +30,7 @@ import ( restful "github.com/emicklei/go-restful" "github.com/pingcap/tidb-operator/tests/pkg/fault-trigger/manager" - glog "k8s.io/klog" + "k8s.io/klog" ) // Server is a web service to control fault trigger @@ -54,8 +54,8 @@ func (s *Server) StartServer() { restful.Add(ws) - glog.Infof("starting fault-trigger server, listening on 0.0.0.0:%d", s.port) - glog.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", s.port), nil)) + klog.Infof("starting fault-trigger server, listening on 0.0.0.0:%d", s.port) + klog.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", s.port), nil)) } func (s *Server) listVMs(req *restful.Request, resp *restful.Response) { @@ -64,7 +64,7 @@ func (s *Server) listVMs(req *restful.Request, resp *restful.Response) { if err != nil { res.message(err.Error()).statusCode(http.StatusInternalServerError) if err = resp.WriteEntity(res); err != nil { - glog.Errorf("failed to response, methods: listVMs, error: %v", err) + klog.Errorf("failed to response, methods: listVMs, error: %v", err) } return } @@ -72,7 +72,7 @@ func (s *Server) listVMs(req *restful.Request, resp *restful.Response) { res.payload(vms).statusCode(http.StatusOK) if err = resp.WriteEntity(res); err != nil { - glog.Errorf("failed to response, method: listVMs, error: %v", err) + klog.Errorf("failed to response, method: listVMs, error: %v", err) } } @@ -85,7 +85,7 @@ func (s *Server) startVM(req *restful.Request, resp *restful.Response) { res.message(fmt.Sprintf("failed to get vm %s, error: %v", name, err)). statusCode(http.StatusInternalServerError) if err = resp.WriteEntity(res); err != nil { - glog.Errorf("failed to response, methods: startVM, error: %v", err) + klog.Errorf("failed to response, methods: startVM, error: %v", err) } return } @@ -93,7 +93,7 @@ func (s *Server) startVM(req *restful.Request, resp *restful.Response) { if targetVM == nil { res.message(fmt.Sprintf("vm %s not found", name)).statusCode(http.StatusNotFound) if err = resp.WriteEntity(res); err != nil { - glog.Errorf("failed to response, methods: startVM, error: %v", err) + klog.Errorf("failed to response, methods: startVM, error: %v", err) } return } @@ -110,7 +110,7 @@ func (s *Server) stopVM(req *restful.Request, resp *restful.Response) { res.message(fmt.Sprintf("failed to get vm %s, error: %v", name, err)). statusCode(http.StatusInternalServerError) if err = resp.WriteEntity(res); err != nil { - glog.Errorf("failed to response, methods: stopVM, error: %v", err) + klog.Errorf("failed to response, methods: stopVM, error: %v", err) } return } @@ -118,7 +118,7 @@ func (s *Server) stopVM(req *restful.Request, resp *restful.Response) { if targetVM == nil { res.message(fmt.Sprintf("vm %s not found", name)).statusCode(http.StatusNotFound) if err = resp.WriteEntity(res); err != nil { - glog.Errorf("failed to response, methods: stopVM, error: %v", err) + klog.Errorf("failed to response, methods: stopVM, error: %v", err) } return } @@ -178,7 +178,7 @@ func (s *Server) action( res.message(fmt.Sprintf("failed to %s, error: %v", method, err)). statusCode(http.StatusInternalServerError) if err = resp.WriteEntity(res); err != nil { - glog.Errorf("failed to response, methods: %s, error: %v", method, err) + klog.Errorf("failed to response, methods: %s, error: %v", method, err) } return } @@ -186,7 +186,7 @@ func (s *Server) action( res.message("OK").statusCode(http.StatusOK) if err := resp.WriteEntity(res); err != nil { - glog.Errorf("failed to response, method: %s, error: %v", method, err) + klog.Errorf("failed to response, method: %s, error: %v", method, err) } } @@ -202,7 +202,7 @@ func (s *Server) vmAction( res.message(fmt.Sprintf("failed to %s vm: %s, error: %v", method, targetVM.Name, err)). statusCode(http.StatusInternalServerError) if err = resp.WriteEntity(res); err != nil { - glog.Errorf("failed to response, methods: %s, error: %v", method, err) + klog.Errorf("failed to response, methods: %s, error: %v", method, err) } return } @@ -210,7 +210,7 @@ func (s *Server) vmAction( res.message("OK").statusCode(http.StatusOK) if err := resp.WriteEntity(res); err != nil { - glog.Errorf("failed to response, method: %s, error: %v", method, err) + klog.Errorf("failed to response, method: %s, error: %v", method, err) } } @@ -226,7 +226,7 @@ func (s *Server) kubeProxyAction( res.message(fmt.Sprintf("failed to invoke %s, nodeName: %s, error: %v", method, nodeName, err)). statusCode(http.StatusInternalServerError) if err = resp.WriteEntity(res); err != nil { - glog.Errorf("failed to response, methods: %s, error: %v", method, err) + klog.Errorf("failed to response, methods: %s, error: %v", method, err) } return } @@ -234,7 +234,7 @@ func (s *Server) kubeProxyAction( res.message("OK").statusCode(http.StatusOK) if err := resp.WriteEntity(res); err != nil { - glog.Errorf("failed to response, method: %s, error: %v", method, err) + klog.Errorf("failed to response, method: %s, error: %v", method, err) } } diff --git a/tests/pkg/fault-trigger/client/client.go b/tests/pkg/fault-trigger/client/client.go index 2a8bac3a1b..2a6d151d16 100644 --- a/tests/pkg/fault-trigger/client/client.go +++ b/tests/pkg/fault-trigger/client/client.go @@ -23,7 +23,7 @@ import ( "github.com/pingcap/tidb-operator/tests/pkg/fault-trigger/api" "github.com/pingcap/tidb-operator/tests/pkg/fault-trigger/manager" "github.com/pingcap/tidb-operator/tests/pkg/util" - glog "k8s.io/klog" + "k8s.io/klog" ) // Client is a fault-trigger client @@ -153,7 +153,7 @@ func (c *client) ListVMs() ([]*manager.VM, error) { url := util.GenURL(fmt.Sprintf("%s%s/vms", c.cfg.Addr, api.APIPrefix)) data, err := c.get(url) if err != nil { - glog.Errorf("failed to get %s: %v", url, err) + klog.Errorf("failed to get %s: %v", url, err) return nil, err } @@ -174,7 +174,7 @@ func (c *client) StartVM(vm *manager.VM) error { url := util.GenURL(fmt.Sprintf("%s%s/vm/%s/start", c.cfg.Addr, api.APIPrefix, vmName)) if _, err := c.post(url, nil); err != nil { - glog.Errorf("faled to post %s: %v", url, err) + klog.Errorf("faled to post %s: %v", url, err) return err } @@ -190,7 +190,7 @@ func (c *client) StopVM(vm *manager.VM) error { url := util.GenURL(fmt.Sprintf("%s%s/vm/%s/stop", c.cfg.Addr, api.APIPrefix, vmName)) if _, err := c.post(url, nil); err != nil { - glog.Errorf("faled to post %s: %v", url, err) + klog.Errorf("faled to post %s: %v", url, err) return err } @@ -240,7 +240,7 @@ func (c *client) StopKubeControllerManager() error { func (c *client) startService(serviceName string) error { url := util.GenURL(fmt.Sprintf("%s%s/%s/start", c.cfg.Addr, api.APIPrefix, serviceName)) if _, err := c.post(url, nil); err != nil { - glog.Errorf("failed to post %s: %v", url, err) + klog.Errorf("failed to post %s: %v", url, err) return err } @@ -250,7 +250,7 @@ func (c *client) startService(serviceName string) error { func (c *client) stopService(serviceName string) error { url := util.GenURL(fmt.Sprintf("%s%s/%s/stop", c.cfg.Addr, api.APIPrefix, serviceName)) if _, err := c.post(url, nil); err != nil { - glog.Errorf("failed to post %s: %v", url, err) + klog.Errorf("failed to post %s: %v", url, err) return err } diff --git a/tests/pkg/fault-trigger/manager/static_pod_service.go b/tests/pkg/fault-trigger/manager/static_pod_service.go index e07df0e5c4..49d0a09cd7 100644 --- a/tests/pkg/fault-trigger/manager/static_pod_service.go +++ b/tests/pkg/fault-trigger/manager/static_pod_service.go @@ -18,7 +18,7 @@ import ( "os" "os/exec" - glog "k8s.io/klog" + "k8s.io/klog" ) const ( @@ -66,7 +66,7 @@ func (m *Manager) StopKubeControllerManager() error { func (m *Manager) stopStaticPodService(serviceName string, fileName string) error { manifest := fmt.Sprintf("%s/%s", staticPodPath, fileName) if _, err := os.Stat(manifest); os.IsNotExist(err) { - glog.Infof("%s had been stopped before", serviceName) + klog.Infof("%s had been stopped before", serviceName) return nil } shell := fmt.Sprintf("mkdir -p %s && mv %s %s", staticPodTmpPath, manifest, staticPodTmpPath) @@ -74,11 +74,11 @@ func (m *Manager) stopStaticPodService(serviceName string, fileName string) erro cmd := exec.Command("/bin/sh", "-c", shell) output, err := cmd.CombinedOutput() if err != nil { - glog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err) + klog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err) return err } - glog.Infof("%s is stopped", serviceName) + klog.Infof("%s is stopped", serviceName) return nil } @@ -86,7 +86,7 @@ func (m *Manager) stopStaticPodService(serviceName string, fileName string) erro func (m *Manager) startStaticPodService(serviceName string, fileName string) error { manifest := fmt.Sprintf("%s/%s", staticPodTmpPath, fileName) if _, err := os.Stat(manifest); os.IsNotExist(err) { - glog.Infof("%s had been started before", serviceName) + klog.Infof("%s had been started before", serviceName) return nil } shell := fmt.Sprintf("mv %s %s", manifest, staticPodPath) @@ -94,11 +94,11 @@ func (m *Manager) startStaticPodService(serviceName string, fileName string) err cmd := exec.Command("/bin/sh", "-c", shell) output, err := cmd.CombinedOutput() if err != nil { - glog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err) + klog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err) return err } - glog.Infof("%s is started", serviceName) + klog.Infof("%s is started", serviceName) return nil } diff --git a/tests/pkg/fault-trigger/manager/systemctl_service.go b/tests/pkg/fault-trigger/manager/systemctl_service.go index b38a046701..52b8f2122e 100644 --- a/tests/pkg/fault-trigger/manager/systemctl_service.go +++ b/tests/pkg/fault-trigger/manager/systemctl_service.go @@ -17,7 +17,7 @@ import ( "fmt" "os/exec" - glog "k8s.io/klog" + "k8s.io/klog" ) const ( @@ -50,11 +50,11 @@ func (m *Manager) systemctlStartService(serviceName string) error { cmd := exec.Command("/bin/sh", "-c", shell) output, err := cmd.CombinedOutput() if err != nil { - glog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err) + klog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err) return err } - glog.Infof("%s is started", serviceName) + klog.Infof("%s is started", serviceName) return nil } @@ -64,11 +64,11 @@ func (m *Manager) systemctlStopService(serviceName string) error { cmd := exec.Command("/bin/sh", "-c", shell) output, err := cmd.CombinedOutput() if err != nil { - glog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err) + klog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err) return err } - glog.Infof("%s is stopped", serviceName) + klog.Infof("%s is stopped", serviceName) return nil } diff --git a/tests/pkg/fault-trigger/manager/vm_qm.go b/tests/pkg/fault-trigger/manager/vm_qm.go index 91411c89d7..c5f822b42c 100644 --- a/tests/pkg/fault-trigger/manager/vm_qm.go +++ b/tests/pkg/fault-trigger/manager/vm_qm.go @@ -18,7 +18,7 @@ import ( "os/exec" "strings" - glog "k8s.io/klog" + "k8s.io/klog" ) type QMVMManager struct { @@ -33,7 +33,7 @@ func (qm *QMVMManager) ListVMs() ([]*VM, error) { cmd := exec.Command("/bin/sh", "-c", shell) output, err := cmd.CombinedOutput() if err != nil { - glog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err) + klog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err) return nil, err } vms := qm.parserVMs(string(output)) @@ -45,11 +45,11 @@ func (qm *QMVMManager) StartVM(vm *VM) error { cmd := exec.Command("/bin/sh", "-c", shell) output, err := cmd.CombinedOutput() if err != nil { - glog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err) + klog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err) return err } - glog.Infof("virtual machine %s is started", vm.Name) + klog.Infof("virtual machine %s is started", vm.Name) return nil } @@ -59,11 +59,11 @@ func (qm *QMVMManager) StopVM(vm *VM) error { cmd := exec.Command("/bin/sh", "-c", shell) output, err := cmd.CombinedOutput() if err != nil { - glog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err) + klog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err) return err } - glog.Infof("virtual machine %s is stopped", vm.Name) + klog.Infof("virtual machine %s is stopped", vm.Name) return nil } diff --git a/tests/pkg/fault-trigger/manager/vm_virsh.go b/tests/pkg/fault-trigger/manager/vm_virsh.go index 8272b4cba2..dd482373f4 100644 --- a/tests/pkg/fault-trigger/manager/vm_virsh.go +++ b/tests/pkg/fault-trigger/manager/vm_virsh.go @@ -18,7 +18,7 @@ import ( "os/exec" "strings" - glog "k8s.io/klog" + "k8s.io/klog" ) type VirshVMManager struct { @@ -34,7 +34,7 @@ func (m *VirshVMManager) ListVMs() ([]*VM, error) { cmd := exec.Command("/bin/sh", "-c", shell) output, err := cmd.CombinedOutput() if err != nil { - glog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err) + klog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err) return nil, err } vms := m.parserVMs(string(output)) @@ -47,11 +47,11 @@ func (m *VirshVMManager) StopVM(v *VM) error { cmd := exec.Command("/bin/sh", "-c", shell) output, err := cmd.CombinedOutput() if err != nil { - glog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err) + klog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err) return err } - glog.Infof("virtual machine %s is stopped", v.Name) + klog.Infof("virtual machine %s is stopped", v.Name) return nil } @@ -62,11 +62,11 @@ func (m *VirshVMManager) StartVM(v *VM) error { cmd := exec.Command("/bin/sh", "-c", shell) output, err := cmd.CombinedOutput() if err != nil { - glog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err) + klog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err) return err } - glog.Infof("virtual machine %s is started", v.Name) + klog.Infof("virtual machine %s is started", v.Name) return nil } diff --git a/tests/pkg/ops/exec.go b/tests/pkg/ops/exec.go index 297feafd30..896503a065 100644 --- a/tests/pkg/ops/exec.go +++ b/tests/pkg/ops/exec.go @@ -25,7 +25,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/rest" "k8s.io/client-go/tools/remotecommand" - glog "k8s.io/klog" + "k8s.io/klog" ) // ExecOptions passed to ExecWithOptions @@ -47,7 +47,7 @@ type ExecOptions struct { // returning stdout, stderr and error. `options` allowed for // additional parameters to be passed. func (cli *ClientOps) ExecWithOptions(options ExecOptions) (string, string, error) { - glog.Infof("ExecWithOptions %+v", options) + klog.Infof("ExecWithOptions %+v", options) config, err := client.LoadConfig() if err != nil { diff --git a/tests/pkg/ops/tikv.go b/tests/pkg/ops/tikv.go index 43ed9a7c78..de7922b6ad 100644 --- a/tests/pkg/ops/tikv.go +++ b/tests/pkg/ops/tikv.go @@ -22,7 +22,7 @@ import ( "github.com/pingcap/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - glog "k8s.io/klog" + "k8s.io/klog" ) const ( @@ -70,7 +70,7 @@ func (ops *TiKVOps) TruncateSSTFile(opts TruncateOptions) error { } stdout, stderr, err := exec("find", "/var/lib/tikv/db", "-name", "*.sst", "-o", "-name", "*.save") if err != nil { - glog.Warningf(logHdr+"list sst files: stderr=%s err=%s", stderr, err.Error()) + klog.Warningf(logHdr+"list sst files: stderr=%s err=%s", stderr, err.Error()) continue } @@ -93,7 +93,7 @@ func (ops *TiKVOps) TruncateSSTFile(opts TruncateOptions) error { } } if len(ssts) == 0 { - glog.Warning(logHdr + "cannot find a sst file") + klog.Warning(logHdr + "cannot find a sst file") continue } @@ -102,17 +102,17 @@ func (ops *TiKVOps) TruncateSSTFile(opts TruncateOptions) error { _, stderr, err = exec("sh", "-c", fmt.Sprintf("cp %s %s.save && truncate -s 0 %s", sst, sst, sst)) if err != nil { - glog.Warningf(logHdr+"truncate sst file: sst=%s stderr=%s err=%s", sst, stderr, err.Error()) + klog.Warningf(logHdr+"truncate sst file: sst=%s stderr=%s err=%s", sst, stderr, err.Error()) continue } truncated++ } if truncated == 0 { - glog.Warningf(logHdr + "no sst file has been truncated") + klog.Warningf(logHdr + "no sst file has been truncated") continue } - glog.Infof(logHdr+"%d sst files got truncated", truncated) + klog.Infof(logHdr+"%d sst files got truncated", truncated) break } @@ -125,14 +125,14 @@ func (ops *TiKVOps) TruncateSSTFile(opts TruncateOptions) error { func (ops *TiKVOps) RecoverSSTFile(ns, podName string) error { annotateCmd := fmt.Sprintf("kubectl annotate pod %s -n %s runmode=debug --overwrite", podName, ns) - glog.Info(annotateCmd) + klog.Info(annotateCmd) res, err := exec.Command("/bin/sh", "-c", annotateCmd).CombinedOutput() if err != nil { return fmt.Errorf("failed to annotation pod: %s/%s, %v, %s", ns, podName, err, string(res)) } findCmd := fmt.Sprintf("kubectl exec -n %s %s -- find /var/lib/tikv/db -name '*.sst.save'", ns, podName) - glog.Info(findCmd) + klog.Info(findCmd) findData, err := exec.Command("/bin/sh", "-c", findCmd).CombinedOutput() if err != nil { return fmt.Errorf("failed to find .save files: %s/%s, %v, %s", ns, podName, err, string(findData)) @@ -145,7 +145,7 @@ func (ops *TiKVOps) RecoverSSTFile(ns, podName string) error { } sstFile := strings.TrimSuffix(saveFile, ".save") mvCmd := fmt.Sprintf("kubectl exec -n %s %s -- mv %s %s", ns, podName, saveFile, sstFile) - glog.Info(mvCmd) + klog.Info(mvCmd) res, err := exec.Command("/bin/sh", "-c", mvCmd).CombinedOutput() if err != nil { return fmt.Errorf("failed to recovery .sst files: %s/%s, %s, %s, %v, %s", diff --git a/tests/pkg/util/db.go b/tests/pkg/util/db.go index 4f94d6d14a..ace69cdbe1 100644 --- a/tests/pkg/util/db.go +++ b/tests/pkg/util/db.go @@ -18,7 +18,7 @@ import ( "fmt" "strings" - glog "k8s.io/klog" + "k8s.io/klog" ) // OpenDB opens db @@ -29,7 +29,7 @@ func OpenDB(dsn string, maxIdleConns int) (*sql.DB, error) { } db.SetMaxIdleConns(maxIdleConns) - glog.V(4).Info("DB opens successfully") + klog.V(4).Info("DB opens successfully") return db, nil } diff --git a/tests/pkg/util/utils.go b/tests/pkg/util/utils.go index 8f18244e03..87e8e8741b 100644 --- a/tests/pkg/util/utils.go +++ b/tests/pkg/util/utils.go @@ -19,7 +19,7 @@ import ( "os/exec" "strings" - glog "k8s.io/klog" + "k8s.io/klog" ) const ( @@ -70,7 +70,7 @@ func ListK8sNodes(kubectlPath, labels string) ([]string, error) { if len(nodes) == 0 { return nil, fmt.Errorf("get k8s nodes is empty") } - glog.Infof("get k8s nodes success: %s, labels: %s", nodes, labels) + klog.Infof("get k8s nodes success: %s, labels: %s", nodes, labels) return nodes, nil } diff --git a/tests/pkg/webhook/pods.go b/tests/pkg/webhook/pods.go index 799c5e8c87..057b0d35ec 100644 --- a/tests/pkg/webhook/pods.go +++ b/tests/pkg/webhook/pods.go @@ -27,18 +27,18 @@ import ( "github.com/pingcap/tidb-operator/tests/pkg/client" "k8s.io/api/admission/v1beta1" - glog "k8s.io/klog" + "k8s.io/klog" ) // only allow pods to be delete when it is not ddlowner of tidb, not leader of pd and not // master of tikv. func (wh *webhook) admitPods(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { - glog.V(4).Infof("admitting pods") + klog.V(4).Infof("admitting pods") podResource := metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} if ar.Request.Resource != podResource { err := fmt.Errorf("expect resource to be %s", podResource) - glog.Errorf("%v", err) + klog.Errorf("%v", err) return toAdmissionResponse(err) } @@ -51,22 +51,22 @@ func (wh *webhook) admitPods(ar v1beta1.AdmissionReview) *v1beta1.AdmissionRespo reviewResponse.Allowed = false if !wh.namespaces.Has(namespace) { - glog.V(4).Infof("%q is not in our namespaces %v, skip", namespace, wh.namespaces.List()) + klog.V(4).Infof("%q is not in our namespaces %v, skip", namespace, wh.namespaces.List()) reviewResponse.Allowed = true return &reviewResponse } pod, err := kubeCli.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) if err != nil { - glog.Infof("api server send wrong pod info namespace %s name %s err %v", namespace, name, err) + klog.Infof("api server send wrong pod info namespace %s name %s err %v", namespace, name, err) return &reviewResponse } - glog.V(4).Infof("delete %s pod [%s]", pod.Labels[label.ComponentLabelKey], pod.GetName()) + klog.V(4).Infof("delete %s pod [%s]", pod.Labels[label.ComponentLabelKey], pod.GetName()) tc, err := versionCli.PingcapV1alpha1().TidbClusters(namespace).Get(pod.Labels[label.InstanceLabelKey], metav1.GetOptions{}) if err != nil { - glog.Infof("fail to fetch tidbcluster info namespace %s clustername(instance) %s err %v", namespace, pod.Labels[label.InstanceLabelKey], err) + klog.Infof("fail to fetch tidbcluster info namespace %s clustername(instance) %s err %v", namespace, pod.Labels[label.InstanceLabelKey], err) return &reviewResponse } @@ -74,7 +74,7 @@ func (wh *webhook) admitPods(ar v1beta1.AdmissionReview) *v1beta1.AdmissionRespo // if pod is already deleting, return Allowed if pod.DeletionTimestamp != nil { - glog.V(4).Infof("pod:[%s/%s] status is timestamp %s", namespace, name, pod.DeletionTimestamp) + klog.V(4).Infof("pod:[%s/%s] status is timestamp %s", namespace, name, pod.DeletionTimestamp) reviewResponse.Allowed = true return &reviewResponse } @@ -83,22 +83,22 @@ func (wh *webhook) admitPods(ar v1beta1.AdmissionReview) *v1beta1.AdmissionRespo leader, err := pdClient.GetPDLeader() if err != nil { - glog.Errorf("fail to get pd leader %v", err) + klog.Errorf("fail to get pd leader %v", err) return &reviewResponse } if leader.Name == name && tc.Status.PD.StatefulSet.Replicas > 1 { time.Sleep(10 * time.Second) err := fmt.Errorf("pd is leader, can't be deleted namespace %s name %s", namespace, name) - glog.Error(err) + klog.Error(err) sendErr := slack.SendErrMsg(err.Error()) if sendErr != nil { - glog.Error(sendErr) + klog.Error(sendErr) } // TODO use context instead os.Exit(3) } - glog.Infof("savely delete pod namespace %s name %s leader name %s", namespace, name, leader.Name) + klog.Infof("savely delete pod namespace %s name %s leader name %s", namespace, name, leader.Name) } reviewResponse.Allowed = true return &reviewResponse diff --git a/tests/pkg/webhook/route.go b/tests/pkg/webhook/route.go index 22b32aecdc..32ab3ef332 100644 --- a/tests/pkg/webhook/route.go +++ b/tests/pkg/webhook/route.go @@ -21,7 +21,7 @@ import ( "k8s.io/api/admission/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - glog "k8s.io/klog" + "k8s.io/klog" ) // toAdmissionResponse is a helper function to create an AdmissionResponse @@ -83,10 +83,10 @@ func serve(w http.ResponseWriter, r *http.Request, admit admitFunc) { returnData: respBytes, err := json.Marshal(responseAdmissionReview) if err != nil { - glog.Error(err) + klog.Error(err) } if _, err := w.Write(respBytes); err != nil { - glog.Error(err) + klog.Error(err) } } diff --git a/tests/slack/slack.go b/tests/slack/slack.go index 6c0d948abb..fc465834cb 100644 --- a/tests/slack/slack.go +++ b/tests/slack/slack.go @@ -20,7 +20,7 @@ import ( "net/http" "time" - glog "k8s.io/klog" + "k8s.io/klog" ) var ( @@ -163,7 +163,7 @@ func SendWarnMsg(msg string) error { func NotifyAndPanic(err error) { sendErr := SendErrMsg(fmt.Sprintf("Succeed %d times, then failed: %s", SuccessCount, err.Error())) if sendErr != nil { - glog.Warningf("failed to notify slack[%s] the massage: %v,error: %v", WebhookURL, err, sendErr) + klog.Warningf("failed to notify slack[%s] the massage: %v,error: %v", WebhookURL, err, sendErr) } time.Sleep(3 * time.Second) panic(err) @@ -173,7 +173,7 @@ func NotifyAndCompletedf(format string, args ...interface{}) { msg := fmt.Sprintf(format, args...) sendErr := SendGoodMsg(msg) if sendErr != nil { - glog.Warningf("failed to notify slack[%s] the massage: %s,error: %v", WebhookURL, msg, sendErr) + klog.Warningf("failed to notify slack[%s] the massage: %s,error: %v", WebhookURL, msg, sendErr) } - glog.Infof(msg) + klog.Infof(msg) }