diff --git a/go.mod b/go.mod index bef073147e..f68556cd59 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22.0 require ( cloud.google.com/go/storage v1.40.0 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 diff --git a/go.sum b/go.sum index 184f015c32..f454beeef8 100644 --- a/go.sum +++ b/go.sum @@ -50,8 +50,8 @@ cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2u dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 h1:ui3YNbxfW7J3tTFIZMH6LIGRjCngp+J+nIFlnizfNTE= diff --git a/pkg/cmd/cli/nodeagent/server.go b/pkg/cmd/cli/nodeagent/server.go index 20a4654fb6..006030c4e2 100644 --- a/pkg/cmd/cli/nodeagent/server.go +++ b/pkg/cmd/cli/nodeagent/server.go @@ -53,6 +53,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/buildinfo" "github.com/vmware-tanzu/velero/pkg/client" "github.com/vmware-tanzu/velero/pkg/cmd" + "github.com/vmware-tanzu/velero/pkg/cmd/server/config" "github.com/vmware-tanzu/velero/pkg/cmd/util/signals" "github.com/vmware-tanzu/velero/pkg/controller" "github.com/vmware-tanzu/velero/pkg/datapath" @@ -285,7 +286,7 @@ func (s *nodeAgentServer) run() { credentialGetter, s.nodeName, s.mgr.GetScheme(), s.metrics, s.logger) if err := pvbReconciler.SetupWithManager(s.mgr); err != nil { - s.logger.Fatal(err, "unable to create controller", "controller", controller.PodVolumeBackup) + s.logger.Fatal(err, "unable to create controller", "controller", config.ControllerPodVolumeBackup) } if err = controller.NewPodVolumeRestoreReconciler(s.mgr.GetClient(), s.dataPathMgr, repoEnsurer, credentialGetter, s.logger).SetupWithManager(s.mgr); err != nil { diff --git a/pkg/cmd/server/config/config.go b/pkg/cmd/server/config/config.go new file mode 100644 index 0000000000..96143085db --- /dev/null +++ b/pkg/cmd/server/config/config.go @@ -0,0 +1,263 @@ +package config + +import ( + "fmt" + "strings" + "time" + + "github.com/sirupsen/logrus" + "github.com/spf13/pflag" + + "github.com/vmware-tanzu/velero/pkg/cmd/util/flag" + "github.com/vmware-tanzu/velero/pkg/podvolume" + "github.com/vmware-tanzu/velero/pkg/repository" + "github.com/vmware-tanzu/velero/pkg/uploader" + "github.com/vmware-tanzu/velero/pkg/util/logging" +) + +const ( + // the port where prometheus metrics are exposed + defaultMetricsAddress = ":8085" + + defaultBackupSyncPeriod = time.Minute + defaultStoreValidationFrequency = time.Minute + defaultPodVolumeOperationTimeout = 240 * time.Minute + defaultResourceTerminatingTimeout = 10 * time.Minute + + // server's client default qps and burst + defaultClientQPS float32 = 100.0 + defaultClientBurst int = 100 + defaultClientPageSize int = 500 + + defaultProfilerAddress = "localhost:6060" + + // the default TTL for a backup + defaultBackupTTL = 30 * 24 * time.Hour + + defaultCSISnapshotTimeout = 10 * time.Minute + defaultItemOperationTimeout = 4 * time.Hour + + resourceTimeout = 10 * time.Minute + + defaultMaxConcurrentK8SConnections = 30 + defaultDisableInformerCache = false + + // defaultCredentialsDirectory is the path on disk where credential + // files will be written to + defaultCredentialsDirectory = "/tmp/credentials" + + ControllerBackup = "backup" + ControllerBackupOperations = "backup-operations" + ControllerBackupDeletion = "backup-deletion" + ControllerBackupFinalizer = "backup-finalizer" + ControllerBackupRepo = "backup-repo" + ControllerBackupStorageLocation = "backup-storage-location" + ControllerBackupSync = "backup-sync" + ControllerDownloadRequest = "download-request" + ControllerGarbageCollection = "gc" + ControllerPodVolumeBackup = "pod-volume-backup" + ControllerPodVolumeRestore = "pod-volume-restore" + ControllerRestore = "restore" + ControllerRestoreOperations = "restore-operations" + ControllerSchedule = "schedule" + ControllerServerStatusRequest = "server-status-request" + ControllerRestoreFinalizer = "restore-finalizer" +) + +var ( + // DisableableControllers is a list of controllers that can be disabled + DisableableControllers = []string{ + ControllerBackup, + ControllerBackupOperations, + ControllerBackupDeletion, + ControllerBackupFinalizer, + ControllerBackupSync, + ControllerDownloadRequest, + ControllerGarbageCollection, + ControllerBackupRepo, + ControllerRestore, + ControllerRestoreOperations, + ControllerSchedule, + ControllerServerStatusRequest, + ControllerRestoreFinalizer, + } + + /* + High priorities: + - Custom Resource Definitions come before Custom Resource so that they can be + restored with their corresponding CRD. + - Namespaces go second because all namespaced resources depend on them. + - Storage Classes are needed to create PVs and PVCs correctly. + - VolumeSnapshotClasses are needed to provision volumes using volumesnapshots + - VolumeSnapshotContents are needed as they contain the handle to the volume snapshot in the + storage provider + - VolumeSnapshots are needed to create PVCs using the VolumeSnapshot as their data source. + - DataUploads need to restore before PVC for Snapshot DataMover to work, because PVC needs the DataUploadResults to create DataDownloads. + - PVs go before PVCs because PVCs depend on them. + - PVCs go before pods or controllers so they can be mounted as volumes. + - Service accounts go before secrets so service account token secrets can be filled automatically. + - Secrets and ConfigMaps go before pods or controllers so they can be mounted + as volumes. + - Limit ranges go before pods or controllers so pods can use them. + - Pods go before controllers so they can be explicitly restored and potentially + have pod volume restores run before controllers adopt the pods. + - Replica sets go before deployments/other controllers so they can be explicitly + restored and be adopted by controllers. + - CAPI ClusterClasses go before Clusters. + - Endpoints go before Services so no new Endpoints will be created + - Services go before Clusters so they can be adopted by AKO-operator and no new Services will be created + for the same clusters + + Low priorities: + - Tanzu ClusterBootstraps go last as it can reference any other kind of resources. + - ClusterBootstraps go before CAPI Clusters otherwise a new default ClusterBootstrap object is created for the cluster + - CAPI Clusters come before ClusterResourceSets because failing to do so means the CAPI controller-manager will panic. + Both Clusters and ClusterResourceSets need to come before ClusterResourceSetBinding in order to properly restore workload clusters. + See https://github.com/kubernetes-sigs/cluster-api/issues/4105 + */ + defaultRestorePriorities = Priorities{ + HighPriorities: []string{ + "customresourcedefinitions", + "namespaces", + "storageclasses", + "volumesnapshotclass.snapshot.storage.k8s.io", + "volumesnapshotcontents.snapshot.storage.k8s.io", + "volumesnapshots.snapshot.storage.k8s.io", + "datauploads.velero.io", + "persistentvolumes", + "persistentvolumeclaims", + "serviceaccounts", + "secrets", + "configmaps", + "limitranges", + "pods", + // we fully qualify replicasets.apps because prior to Kubernetes 1.16, replicasets also + // existed in the extensions API group, but we back up replicasets from "apps" so we want + // to ensure that we prioritize restoring from "apps" too, since this is how they're stored + // in the backup. + "replicasets.apps", + "clusterclasses.cluster.x-k8s.io", + "endpoints", + "services", + }, + LowPriorities: []string{ + "clusterbootstraps.run.tanzu.vmware.com", + "clusters.cluster.x-k8s.io", + "clusterresourcesets.addons.cluster.x-k8s.io", + }, + } +) + +type Config struct { + PluginDir string + MetricsAddress string + DefaultBackupLocation string // TODO(2.0) Deprecate defaultBackupLocation + BackupSyncPeriod time.Duration + PodVolumeOperationTimeout time.Duration + ResourceTerminatingTimeout time.Duration + DefaultBackupTTL time.Duration + StoreValidationFrequency time.Duration + DefaultCSISnapshotTimeout time.Duration + DefaultItemOperationTimeout time.Duration + ResourceTimeout time.Duration + RestoreResourcePriorities Priorities + DefaultVolumeSnapshotLocations flag.Map + RestoreOnly bool + DisabledControllers []string + ClientQPS float32 + ClientBurst int + ClientPageSize int + ProfilerAddress string + LogLevel *logging.LevelFlag + LogFormat *logging.FormatFlag + RepoMaintenanceFrequency time.Duration + GarbageCollectionFrequency time.Duration + ItemOperationSyncFrequency time.Duration + DefaultVolumesToFsBackup bool + UploaderType string + MaxConcurrentK8SConnections int + DefaultSnapshotMoveData bool + DisableInformerCache bool + ScheduleSkipImmediately bool + MaintenanceCfg repository.MaintenanceConfig + BackukpRepoConfig string + CredentialsDirectory string +} + +func GetDefaultConfig() *Config { + config := &Config{ + PluginDir: "/plugins", + MetricsAddress: defaultMetricsAddress, + DefaultBackupLocation: "default", + DefaultVolumeSnapshotLocations: flag.NewMap().WithKeyValueDelimiter(':'), + BackupSyncPeriod: defaultBackupSyncPeriod, + DefaultBackupTTL: defaultBackupTTL, + DefaultCSISnapshotTimeout: defaultCSISnapshotTimeout, + DefaultItemOperationTimeout: defaultItemOperationTimeout, + ResourceTimeout: resourceTimeout, + StoreValidationFrequency: defaultStoreValidationFrequency, + PodVolumeOperationTimeout: defaultPodVolumeOperationTimeout, + RestoreResourcePriorities: defaultRestorePriorities, + ClientQPS: defaultClientQPS, + ClientBurst: defaultClientBurst, + ClientPageSize: defaultClientPageSize, + ProfilerAddress: defaultProfilerAddress, + ResourceTerminatingTimeout: defaultResourceTerminatingTimeout, + LogLevel: logging.LogLevelFlag(logrus.InfoLevel), + LogFormat: logging.NewFormatFlag(), + DefaultVolumesToFsBackup: podvolume.DefaultVolumesToFsBackup, + UploaderType: uploader.ResticType, + MaxConcurrentK8SConnections: defaultMaxConcurrentK8SConnections, + DefaultSnapshotMoveData: false, + DisableInformerCache: defaultDisableInformerCache, + ScheduleSkipImmediately: false, + CredentialsDirectory: defaultCredentialsDirectory, + } + + config.MaintenanceCfg = repository.MaintenanceConfig{ + KeepLatestMaitenanceJobs: repository.DefaultKeepLatestMaitenanceJobs, + // maintenance job log setting inherited from velero server + FormatFlag: config.LogFormat, + LogLevelFlag: config.LogLevel, + } + + return config +} + +func (c *Config) BindFlags(flags *pflag.FlagSet) { + flags.Var(c.LogLevel, "log-level", fmt.Sprintf("The level at which to log. Valid values are %s.", strings.Join(c.LogLevel.AllowedValues(), ", "))) + flags.Var(c.LogFormat, "log-format", fmt.Sprintf("The format for log output. Valid values are %s.", strings.Join(c.LogFormat.AllowedValues(), ", "))) + flags.StringVar(&c.PluginDir, "plugin-dir", c.PluginDir, "Directory containing Velero plugins") + flags.StringVar(&c.MetricsAddress, "metrics-address", c.MetricsAddress, "The address to expose prometheus metrics") + flags.DurationVar(&c.BackupSyncPeriod, "backup-sync-period", c.BackupSyncPeriod, "How often to ensure all Velero backups in object storage exist as Backup API objects in the cluster. This is the default sync period if none is explicitly specified for a backup storage location.") + flags.DurationVar(&c.PodVolumeOperationTimeout, "fs-backup-timeout", c.PodVolumeOperationTimeout, "How long pod volume file system backups/restores should be allowed to run before timing out.") + flags.BoolVar(&c.RestoreOnly, "restore-only", c.RestoreOnly, "Run in a mode where only restores are allowed; backups, schedules, and garbage-collection are all disabled. DEPRECATED: this flag will be removed in v2.0. Use read-only backup storage locations instead.") + flags.StringSliceVar(&c.DisabledControllers, "disable-controllers", c.DisabledControllers, fmt.Sprintf("List of controllers to disable on startup. Valid values are %s", strings.Join(DisableableControllers, ","))) + flags.Var(&c.RestoreResourcePriorities, "restore-resource-priorities", "Desired order of resource restores, the priority list contains two parts which are split by \"-\" element. The resources before \"-\" element are restored first as high priorities, the resources after \"-\" element are restored last as low priorities, and any resource not in the list will be restored alphabetically between the high and low priorities.") + flags.StringVar(&c.DefaultBackupLocation, "default-backup-storage-location", c.DefaultBackupLocation, "Name of the default backup storage location. DEPRECATED: this flag will be removed in v2.0. Use \"velero backup-location set --default\" instead.") + flags.DurationVar(&c.StoreValidationFrequency, "store-validation-frequency", c.StoreValidationFrequency, "How often to verify if the storage is valid. Optional. Set this to `0s` to disable sync. Default 1 minute.") + flags.Float32Var(&c.ClientQPS, "client-qps", c.ClientQPS, "Maximum number of requests per second by the server to the Kubernetes API once the burst limit has been reached.") + flags.IntVar(&c.ClientBurst, "client-burst", c.ClientBurst, "Maximum number of requests by the server to the Kubernetes API in a short period of time.") + flags.IntVar(&c.ClientPageSize, "client-page-size", c.ClientPageSize, "Page size of requests by the server to the Kubernetes API when listing objects during a backup. Set to 0 to disable paging.") + flags.StringVar(&c.ProfilerAddress, "profiler-address", c.ProfilerAddress, "The address to expose the pprof profiler.") + flags.DurationVar(&c.ResourceTerminatingTimeout, "terminating-resource-timeout", c.ResourceTerminatingTimeout, "How long to wait on persistent volumes and namespaces to terminate during a restore before timing out.") + flags.DurationVar(&c.DefaultBackupTTL, "default-backup-ttl", c.DefaultBackupTTL, "How long to wait by default before backups can be garbage collected.") + flags.DurationVar(&c.RepoMaintenanceFrequency, "default-repo-maintain-frequency", c.RepoMaintenanceFrequency, "How often 'maintain' is run for backup repositories by default.") + flags.DurationVar(&c.GarbageCollectionFrequency, "garbage-collection-frequency", c.GarbageCollectionFrequency, "How often garbage collection is run for expired backups.") + flags.DurationVar(&c.ItemOperationSyncFrequency, "item-operation-sync-frequency", c.ItemOperationSyncFrequency, "How often to check status on backup/restore operations after backup/restore processing. Default is 10 seconds") + flags.BoolVar(&c.DefaultVolumesToFsBackup, "default-volumes-to-fs-backup", c.DefaultVolumesToFsBackup, "Backup all volumes with pod volume file system backup by default.") + flags.StringVar(&c.UploaderType, "uploader-type", c.UploaderType, "Type of uploader to handle the transfer of data of pod volumes") + flags.DurationVar(&c.DefaultItemOperationTimeout, "default-item-operation-timeout", c.DefaultItemOperationTimeout, "How long to wait on asynchronous BackupItemActions and RestoreItemActions to complete before timing out. Default is 4 hours") + flags.DurationVar(&c.ResourceTimeout, "resource-timeout", c.ResourceTimeout, "How long to wait for resource processes which are not covered by other specific timeout parameters. Default is 10 minutes.") + flags.IntVar(&c.MaxConcurrentK8SConnections, "max-concurrent-k8s-connections", c.MaxConcurrentK8SConnections, "Max concurrent connections number that Velero can create with kube-apiserver. Default is 30.") + flags.BoolVar(&c.DefaultSnapshotMoveData, "default-snapshot-move-data", c.DefaultSnapshotMoveData, "Move data by default for all snapshots supporting data movement.") + flags.BoolVar(&c.DisableInformerCache, "disable-informer-cache", c.DisableInformerCache, "Disable informer cache for Get calls on restore. With this enabled, it will speed up restore in cases where there are backup resources which already exist in the cluster, but for very large clusters this will increase velero memory usage. Default is false (don't disable).") + flags.BoolVar(&c.ScheduleSkipImmediately, "schedule-skip-immediately", c.ScheduleSkipImmediately, "Skip the first scheduled backup immediately after creating a schedule. Default is false (don't skip).") + flags.IntVar(&c.MaintenanceCfg.KeepLatestMaitenanceJobs, "keep-latest-maintenance-jobs", c.MaintenanceCfg.KeepLatestMaitenanceJobs, "Number of latest maintenance jobs to keep each repository. Optional.") + flags.StringVar(&c.MaintenanceCfg.CPURequest, "maintenance-job-cpu-request", c.MaintenanceCfg.CPURequest, "CPU request for maintenance job. Default is no limit.") + flags.StringVar(&c.MaintenanceCfg.MemRequest, "maintenance-job-mem-request", c.MaintenanceCfg.MemRequest, "Memory request for maintenance job. Default is no limit.") + flags.StringVar(&c.MaintenanceCfg.CPULimit, "maintenance-job-cpu-limit", c.MaintenanceCfg.CPULimit, "CPU limit for maintenance job. Default is no limit.") + flags.StringVar(&c.MaintenanceCfg.MemLimit, "maintenance-job-mem-limit", c.MaintenanceCfg.MemLimit, "Memory limit for maintenance job. Default is no limit.") + flags.StringVar(&c.BackukpRepoConfig, "backup-repository-config", c.BackukpRepoConfig, "The name of configMap containing backup repository configurations.") + flags.Var(&c.DefaultVolumeSnapshotLocations, "default-volume-snapshot-locations", "List of unique volume providers and default volume snapshot location (provider1:location-01,provider2:location-02,...)") +} diff --git a/pkg/restore/priority.go b/pkg/cmd/server/config/priority.go similarity index 99% rename from pkg/restore/priority.go rename to pkg/cmd/server/config/priority.go index 5897c3af7b..a072429b4a 100644 --- a/pkg/restore/priority.go +++ b/pkg/cmd/server/config/priority.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restore +package config import ( "fmt" diff --git a/pkg/restore/priority_test.go b/pkg/cmd/server/config/priority_test.go similarity index 99% rename from pkg/restore/priority_test.go rename to pkg/cmd/server/config/priority_test.go index 9472cebd8d..ced2feb438 100644 --- a/pkg/restore/priority_test.go +++ b/pkg/cmd/server/config/priority_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restore +package config import ( "testing" diff --git a/pkg/cmd/server/plugin/plugin.go b/pkg/cmd/server/plugin/plugin.go index 265d526075..076be10d4d 100644 --- a/pkg/cmd/server/plugin/plugin.go +++ b/pkg/cmd/server/plugin/plugin.go @@ -45,6 +45,9 @@ func NewCommand(f client.Factory) *cobra.Command { Hidden: true, Short: "INTERNAL COMMAND ONLY - not intended to be run directly by users", Run: func(c *cobra.Command, args []string) { + config := pluginServer.GetConfig() + f.SetClientQPS(config.ClientQPS) + f.SetClientBurst(config.ClientBurst) pluginServer = pluginServer. RegisterBackupItemAction( "velero.io/pv", @@ -197,6 +200,9 @@ func NewCommand(f client.Factory) *cobra.Command { } pluginServer.Serve() }, + FParseErrWhitelist: cobra.FParseErrWhitelist{ + UnknownFlags: true, + }, } pluginServer.BindFlags(c.Flags()) return c diff --git a/pkg/cmd/server/server.go b/pkg/cmd/server/server.go index 3a2b070243..45520ed4c9 100644 --- a/pkg/cmd/server/server.go +++ b/pkg/cmd/server/server.go @@ -62,7 +62,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/buildinfo" "github.com/vmware-tanzu/velero/pkg/client" "github.com/vmware-tanzu/velero/pkg/cmd" - "github.com/vmware-tanzu/velero/pkg/cmd/util/flag" + "github.com/vmware-tanzu/velero/pkg/cmd/server/config" "github.com/vmware-tanzu/velero/pkg/cmd/util/signals" "github.com/vmware-tanzu/velero/pkg/controller" velerodiscovery "github.com/vmware-tanzu/velero/pkg/discovery" @@ -83,100 +83,8 @@ import ( "github.com/vmware-tanzu/velero/pkg/util/logging" ) -const ( - // the port where prometheus metrics are exposed - defaultMetricsAddress = ":8085" - - defaultBackupSyncPeriod = time.Minute - defaultStoreValidationFrequency = time.Minute - defaultPodVolumeOperationTimeout = 240 * time.Minute - defaultResourceTerminatingTimeout = 10 * time.Minute - - // server's client default qps and burst - defaultClientQPS float32 = 100.0 - defaultClientBurst int = 100 - defaultClientPageSize int = 500 - - defaultProfilerAddress = "localhost:6060" - - // the default TTL for a backup - defaultBackupTTL = 30 * 24 * time.Hour - - defaultCSISnapshotTimeout = 10 * time.Minute - defaultItemOperationTimeout = 4 * time.Hour - - resourceTimeout = 10 * time.Minute - - // defaultCredentialsDirectory is the path on disk where credential - // files will be written to - defaultCredentialsDirectory = "/tmp/credentials" - - defaultMaxConcurrentK8SConnections = 30 - defaultDisableInformerCache = false -) - -type serverConfig struct { - // TODO(2.0) Deprecate defaultBackupLocation - pluginDir, metricsAddress, defaultBackupLocation string - backupSyncPeriod, podVolumeOperationTimeout, resourceTerminatingTimeout time.Duration - defaultBackupTTL, storeValidationFrequency, defaultCSISnapshotTimeout time.Duration - defaultItemOperationTimeout, resourceTimeout time.Duration - restoreResourcePriorities restore.Priorities - defaultVolumeSnapshotLocations map[string]string - restoreOnly bool - disabledControllers []string - clientQPS float32 - clientBurst int - clientPageSize int - profilerAddress string - formatFlag *logging.FormatFlag - repoMaintenanceFrequency time.Duration - garbageCollectionFrequency time.Duration - itemOperationSyncFrequency time.Duration - defaultVolumesToFsBackup bool - uploaderType string - maxConcurrentK8SConnections int - defaultSnapshotMoveData bool - disableInformerCache bool - scheduleSkipImmediately bool - maintenanceCfg repository.MaintenanceConfig - backukpRepoConfig string -} - func NewCommand(f client.Factory) *cobra.Command { - var ( - volumeSnapshotLocations = flag.NewMap().WithKeyValueDelimiter(':') - logLevelFlag = logging.LogLevelFlag(logrus.InfoLevel) - config = serverConfig{ - pluginDir: "/plugins", - metricsAddress: defaultMetricsAddress, - defaultBackupLocation: "default", - defaultVolumeSnapshotLocations: make(map[string]string), - backupSyncPeriod: defaultBackupSyncPeriod, - defaultBackupTTL: defaultBackupTTL, - defaultCSISnapshotTimeout: defaultCSISnapshotTimeout, - defaultItemOperationTimeout: defaultItemOperationTimeout, - resourceTimeout: resourceTimeout, - storeValidationFrequency: defaultStoreValidationFrequency, - podVolumeOperationTimeout: defaultPodVolumeOperationTimeout, - restoreResourcePriorities: defaultRestorePriorities, - clientQPS: defaultClientQPS, - clientBurst: defaultClientBurst, - clientPageSize: defaultClientPageSize, - profilerAddress: defaultProfilerAddress, - resourceTerminatingTimeout: defaultResourceTerminatingTimeout, - formatFlag: logging.NewFormatFlag(), - defaultVolumesToFsBackup: podvolume.DefaultVolumesToFsBackup, - uploaderType: uploader.ResticType, - maxConcurrentK8SConnections: defaultMaxConcurrentK8SConnections, - defaultSnapshotMoveData: false, - disableInformerCache: defaultDisableInformerCache, - scheduleSkipImmediately: false, - maintenanceCfg: repository.MaintenanceConfig{ - KeepLatestMaitenanceJobs: repository.DefaultKeepLatestMaitenanceJobs, - }, - } - ) + config := config.GetDefaultConfig() var command = &cobra.Command{ Use: "server", @@ -188,8 +96,8 @@ func NewCommand(f client.Factory) *cobra.Command { // set its output to stdout. log.SetOutput(os.Stdout) - logLevel := logLevelFlag.Parse() - format := config.formatFlag.Parse() + logLevel := config.LogLevel.Parse() + format := config.LogFormat.Parse() // Make sure we log to stdout so cloud log dashboards don't show this as an error. logrus.SetOutput(os.Stdout) @@ -206,10 +114,6 @@ func NewCommand(f client.Factory) *cobra.Command { logger.Info("No feature flags enabled") } - if volumeSnapshotLocations.Data() != nil { - config.defaultVolumeSnapshotLocations = volumeSnapshotLocations.Data() - } - f.SetBasename(fmt.Sprintf("%s-%s", c.Parent().Name(), c.Name())) s, err := newServer(f, config, logger) @@ -219,46 +123,8 @@ func NewCommand(f client.Factory) *cobra.Command { }, } - command.Flags().Var(logLevelFlag, "log-level", fmt.Sprintf("The level at which to log. Valid values are %s.", strings.Join(logLevelFlag.AllowedValues(), ", "))) - command.Flags().Var(config.formatFlag, "log-format", fmt.Sprintf("The format for log output. Valid values are %s.", strings.Join(config.formatFlag.AllowedValues(), ", "))) - command.Flags().StringVar(&config.pluginDir, "plugin-dir", config.pluginDir, "Directory containing Velero plugins") - command.Flags().StringVar(&config.metricsAddress, "metrics-address", config.metricsAddress, "The address to expose prometheus metrics") - command.Flags().DurationVar(&config.backupSyncPeriod, "backup-sync-period", config.backupSyncPeriod, "How often to ensure all Velero backups in object storage exist as Backup API objects in the cluster. This is the default sync period if none is explicitly specified for a backup storage location.") - command.Flags().DurationVar(&config.podVolumeOperationTimeout, "fs-backup-timeout", config.podVolumeOperationTimeout, "How long pod volume file system backups/restores should be allowed to run before timing out.") - command.Flags().BoolVar(&config.restoreOnly, "restore-only", config.restoreOnly, "Run in a mode where only restores are allowed; backups, schedules, and garbage-collection are all disabled. DEPRECATED: this flag will be removed in v2.0. Use read-only backup storage locations instead.") - command.Flags().StringSliceVar(&config.disabledControllers, "disable-controllers", config.disabledControllers, fmt.Sprintf("List of controllers to disable on startup. Valid values are %s", strings.Join(controller.DisableableControllers, ","))) - command.Flags().Var(&config.restoreResourcePriorities, "restore-resource-priorities", "Desired order of resource restores, the priority list contains two parts which are split by \"-\" element. The resources before \"-\" element are restored first as high priorities, the resources after \"-\" element are restored last as low priorities, and any resource not in the list will be restored alphabetically between the high and low priorities.") - command.Flags().StringVar(&config.defaultBackupLocation, "default-backup-storage-location", config.defaultBackupLocation, "Name of the default backup storage location. DEPRECATED: this flag will be removed in v2.0. Use \"velero backup-location set --default\" instead.") - command.Flags().DurationVar(&config.storeValidationFrequency, "store-validation-frequency", config.storeValidationFrequency, "How often to verify if the storage is valid. Optional. Set this to `0s` to disable sync. Default 1 minute.") - command.Flags().Var(&volumeSnapshotLocations, "default-volume-snapshot-locations", "List of unique volume providers and default volume snapshot location (provider1:location-01,provider2:location-02,...)") - command.Flags().Float32Var(&config.clientQPS, "client-qps", config.clientQPS, "Maximum number of requests per second by the server to the Kubernetes API once the burst limit has been reached.") - command.Flags().IntVar(&config.clientBurst, "client-burst", config.clientBurst, "Maximum number of requests by the server to the Kubernetes API in a short period of time.") - command.Flags().IntVar(&config.clientPageSize, "client-page-size", config.clientPageSize, "Page size of requests by the server to the Kubernetes API when listing objects during a backup. Set to 0 to disable paging.") - command.Flags().StringVar(&config.profilerAddress, "profiler-address", config.profilerAddress, "The address to expose the pprof profiler.") - command.Flags().DurationVar(&config.resourceTerminatingTimeout, "terminating-resource-timeout", config.resourceTerminatingTimeout, "How long to wait on persistent volumes and namespaces to terminate during a restore before timing out.") - command.Flags().DurationVar(&config.defaultBackupTTL, "default-backup-ttl", config.defaultBackupTTL, "How long to wait by default before backups can be garbage collected.") - command.Flags().DurationVar(&config.repoMaintenanceFrequency, "default-repo-maintain-frequency", config.repoMaintenanceFrequency, "How often 'maintain' is run for backup repositories by default.") - command.Flags().DurationVar(&config.garbageCollectionFrequency, "garbage-collection-frequency", config.garbageCollectionFrequency, "How often garbage collection is run for expired backups.") - command.Flags().DurationVar(&config.itemOperationSyncFrequency, "item-operation-sync-frequency", config.itemOperationSyncFrequency, "How often to check status on backup/restore operations after backup/restore processing. Default is 10 seconds") - command.Flags().BoolVar(&config.defaultVolumesToFsBackup, "default-volumes-to-fs-backup", config.defaultVolumesToFsBackup, "Backup all volumes with pod volume file system backup by default.") - command.Flags().StringVar(&config.uploaderType, "uploader-type", config.uploaderType, "Type of uploader to handle the transfer of data of pod volumes") - command.Flags().DurationVar(&config.defaultItemOperationTimeout, "default-item-operation-timeout", config.defaultItemOperationTimeout, "How long to wait on asynchronous BackupItemActions and RestoreItemActions to complete before timing out. Default is 4 hours") - command.Flags().DurationVar(&config.resourceTimeout, "resource-timeout", config.resourceTimeout, "How long to wait for resource processes which are not covered by other specific timeout parameters. Default is 10 minutes.") - command.Flags().IntVar(&config.maxConcurrentK8SConnections, "max-concurrent-k8s-connections", config.maxConcurrentK8SConnections, "Max concurrent connections number that Velero can create with kube-apiserver. Default is 30.") - command.Flags().BoolVar(&config.defaultSnapshotMoveData, "default-snapshot-move-data", config.defaultSnapshotMoveData, "Move data by default for all snapshots supporting data movement.") - command.Flags().BoolVar(&config.disableInformerCache, "disable-informer-cache", config.disableInformerCache, "Disable informer cache for Get calls on restore. With this enabled, it will speed up restore in cases where there are backup resources which already exist in the cluster, but for very large clusters this will increase velero memory usage. Default is false (don't disable).") - command.Flags().BoolVar(&config.scheduleSkipImmediately, "schedule-skip-immediately", config.scheduleSkipImmediately, "Skip the first scheduled backup immediately after creating a schedule. Default is false (don't skip).") - command.Flags().IntVar(&config.maintenanceCfg.KeepLatestMaitenanceJobs, "keep-latest-maintenance-jobs", config.maintenanceCfg.KeepLatestMaitenanceJobs, "Number of latest maintenance jobs to keep each repository. Optional.") - command.Flags().StringVar(&config.maintenanceCfg.CPURequest, "maintenance-job-cpu-request", config.maintenanceCfg.CPURequest, "CPU request for maintenance job. Default is no limit.") - command.Flags().StringVar(&config.maintenanceCfg.MemRequest, "maintenance-job-mem-request", config.maintenanceCfg.MemRequest, "Memory request for maintenance job. Default is no limit.") - command.Flags().StringVar(&config.maintenanceCfg.CPULimit, "maintenance-job-cpu-limit", config.maintenanceCfg.CPULimit, "CPU limit for maintenance job. Default is no limit.") - command.Flags().StringVar(&config.maintenanceCfg.MemLimit, "maintenance-job-mem-limit", config.maintenanceCfg.MemLimit, "Memory limit for maintenance job. Default is no limit.") - - command.Flags().StringVar(&config.backukpRepoConfig, "backup-repository-config", config.backukpRepoConfig, "The name of configMap containing backup repository configurations.") - - // maintenance job log setting inherited from velero server - config.maintenanceCfg.FormatFlag = config.formatFlag - config.maintenanceCfg.LogLevelFlag = logLevelFlag + config.BindFlags(command.Flags()) + return command } @@ -284,30 +150,30 @@ type server struct { repoLocker *repository.RepoLocker repoEnsurer *repository.Ensurer metrics *metrics.ServerMetrics - config serverConfig + config *config.Config mgr manager.Manager credentialFileStore credentials.FileStore credentialSecretStore credentials.SecretStore } -func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*server, error) { - if msg, err := uploader.ValidateUploaderType(config.uploaderType); err != nil { +func newServer(f client.Factory, config *config.Config, logger *logrus.Logger) (*server, error) { + if msg, err := uploader.ValidateUploaderType(config.UploaderType); err != nil { return nil, err } else if msg != "" { logger.Warn(msg) } - if config.clientQPS < 0.0 { + if config.ClientQPS < 0.0 { return nil, errors.New("client-qps must be positive") } - f.SetClientQPS(config.clientQPS) + f.SetClientQPS(config.ClientQPS) - if config.clientBurst <= 0 { + if config.ClientBurst <= 0 { return nil, errors.New("client-burst must be positive") } - f.SetClientBurst(config.clientBurst) + f.SetClientBurst(config.ClientBurst) - if config.clientPageSize < 0 { + if config.ClientPageSize < 0 { return nil, errors.New("client-page-size must not be negative") } @@ -326,7 +192,7 @@ func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*s return nil, err } - pluginRegistry := process.NewRegistry(config.pluginDir, logger, logger.Level) + pluginRegistry := process.NewRegistry(config.PluginDir, logger, logger.Level) if err := pluginRegistry.DiscoverPlugins(); err != nil { return nil, err } @@ -387,7 +253,7 @@ func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*s credentialFileStore, err := credentials.NewNamespacedFileStore( mgr.GetClient(), f.Namespace(), - defaultCredentialsDirectory, + config.CredentialsDirectory, filesystem.NewFileSystem(), ) if err != nil { @@ -409,7 +275,7 @@ func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*s s := &server{ namespace: f.Namespace(), - metricsAddress: config.metricsAddress, + metricsAddress: config.MetricsAddress, kubeClientConfig: clientConfig, kubeClient: kubeClient, discoveryClient: discoveryClient, @@ -432,7 +298,7 @@ func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*s func (s *server) run() error { signals.CancelOnShutdown(s.cancelFunc, s.logger) - if s.config.profilerAddress != "" { + if s.config.ProfilerAddress != "" { go s.runProfiler() } @@ -461,7 +327,7 @@ func (s *server) run() error { return err } - if err := s.runControllers(s.config.defaultVolumeSnapshotLocations); err != nil { + if err := s.runControllers(s.config.DefaultVolumeSnapshotLocations.Data()); err != nil { return err } @@ -478,7 +344,7 @@ func (s *server) setupBeforeControllerRun() error { markInProgressCRsFailed(s.ctx, client, s.namespace, s.logger) - if err := setDefaultBackupLocation(s.ctx, client, s.namespace, s.config.defaultBackupLocation, s.logger); err != nil { + if err := setDefaultBackupLocation(s.ctx, client, s.namespace, s.config.DefaultBackupLocation, s.logger); err != nil { return err } return nil @@ -584,71 +450,6 @@ func (s *server) veleroResourcesExist() error { return nil } -/* -High priorities: - - Custom Resource Definitions come before Custom Resource so that they can be - restored with their corresponding CRD. - - Namespaces go second because all namespaced resources depend on them. - - Storage Classes are needed to create PVs and PVCs correctly. - - VolumeSnapshotClasses are needed to provision volumes using volumesnapshots - - VolumeSnapshotContents are needed as they contain the handle to the volume snapshot in the - storage provider - - VolumeSnapshots are needed to create PVCs using the VolumeSnapshot as their data source. - - DataUploads need to restore before PVC for Snapshot DataMover to work, because PVC needs the DataUploadResults to create DataDownloads. - - PVs go before PVCs because PVCs depend on them. - - PVCs go before pods or controllers so they can be mounted as volumes. - - Service accounts go before secrets so service account token secrets can be filled automatically. - - Secrets and ConfigMaps go before pods or controllers so they can be mounted - as volumes. - - Limit ranges go before pods or controllers so pods can use them. - - Pods go before controllers so they can be explicitly restored and potentially - have pod volume restores run before controllers adopt the pods. - - Replica sets go before deployments/other controllers so they can be explicitly - restored and be adopted by controllers. - - CAPI ClusterClasses go before Clusters. - - Endpoints go before Services so no new Endpoints will be created - - Services go before Clusters so they can be adopted by AKO-operator and no new Services will be created - for the same clusters - -Low priorities: - - Tanzu ClusterBootstraps go last as it can reference any other kind of resources. - - ClusterBootstraps go before CAPI Clusters otherwise a new default ClusterBootstrap object is created for the cluster - - CAPI Clusters come before ClusterResourceSets because failing to do so means the CAPI controller-manager will panic. - Both Clusters and ClusterResourceSets need to come before ClusterResourceSetBinding in order to properly restore workload clusters. - See https://github.com/kubernetes-sigs/cluster-api/issues/4105 -*/ -var defaultRestorePriorities = restore.Priorities{ - HighPriorities: []string{ - "customresourcedefinitions", - "namespaces", - "storageclasses", - "volumesnapshotclass.snapshot.storage.k8s.io", - "volumesnapshotcontents.snapshot.storage.k8s.io", - "volumesnapshots.snapshot.storage.k8s.io", - "datauploads.velero.io", - "persistentvolumes", - "persistentvolumeclaims", - "serviceaccounts", - "secrets", - "configmaps", - "limitranges", - "pods", - // we fully qualify replicasets.apps because prior to Kubernetes 1.16, replicasets also - // existed in the extensions API group, but we back up replicasets from "apps" so we want - // to ensure that we prioritize restoring from "apps" too, since this is how they're stored - // in the backup. - "replicasets.apps", - "clusterclasses.cluster.x-k8s.io", - "endpoints", - "services", - }, - LowPriorities: []string{ - "clusterbootstraps.run.tanzu.vmware.com", - "clusters.cluster.x-k8s.io", - "clusterresourcesets.addons.cluster.x-k8s.io", - }, -} - func (s *server) checkNodeAgent() { // warn if node agent does not exist if err := nodeagent.IsRunning(s.ctx, s.kubeClient, s.namespace); err == nodeagent.ErrDaemonSetNotFound { @@ -665,9 +466,9 @@ func (s *server) initRepoManager() error { } s.repoLocker = repository.NewRepoLocker() - s.repoEnsurer = repository.NewEnsurer(s.mgr.GetClient(), s.logger, s.config.resourceTimeout) + s.repoEnsurer = repository.NewEnsurer(s.mgr.GetClient(), s.logger, s.config.ResourceTimeout) - s.repoManager = repository.NewManager(s.namespace, s.mgr.GetClient(), s.repoLocker, s.repoEnsurer, s.credentialFileStore, s.credentialSecretStore, s.config.maintenanceCfg, s.logger) + s.repoManager = repository.NewManager(s.namespace, s.mgr.GetClient(), s.repoLocker, s.repoEnsurer, s.credentialFileStore, s.credentialSecretStore, s.config.MaintenanceCfg, s.logger) return nil } @@ -707,36 +508,36 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string // and BSL controller is mandatory for Velero to work. // Note: all runtime type controllers that can be disabled are grouped separately, below: enabledRuntimeControllers := map[string]struct{}{ - controller.Backup: {}, - controller.BackupDeletion: {}, - controller.BackupFinalizer: {}, - controller.BackupOperations: {}, - controller.BackupRepo: {}, - controller.BackupSync: {}, - controller.DownloadRequest: {}, - controller.GarbageCollection: {}, - controller.Restore: {}, - controller.RestoreOperations: {}, - controller.Schedule: {}, - controller.ServerStatusRequest: {}, - controller.RestoreFinalizer: {}, - } - - if s.config.restoreOnly { + config.ControllerBackup: {}, + config.ControllerBackupDeletion: {}, + config.ControllerBackupFinalizer: {}, + config.ControllerBackupOperations: {}, + config.ControllerBackupRepo: {}, + config.ControllerBackupSync: {}, + config.ControllerDownloadRequest: {}, + config.ControllerGarbageCollection: {}, + config.ControllerRestore: {}, + config.ControllerRestoreOperations: {}, + config.ControllerSchedule: {}, + config.ControllerServerStatusRequest: {}, + config.ControllerRestoreFinalizer: {}, + } + + if s.config.RestoreOnly { s.logger.Info("Restore only mode - not starting the backup, schedule, delete-backup, or GC controllers") - s.config.disabledControllers = append(s.config.disabledControllers, - controller.Backup, - controller.BackupDeletion, - controller.BackupFinalizer, - controller.BackupOperations, - controller.GarbageCollection, - controller.Schedule, + s.config.DisabledControllers = append(s.config.DisabledControllers, + config.ControllerBackup, + config.ControllerBackupDeletion, + config.ControllerBackupFinalizer, + config.ControllerBackupOperations, + config.ControllerGarbageCollection, + config.ControllerSchedule, ) } // Remove disabled controllers so they are not initialized. If a match is not found we want // to halt the system so the user knows this operation was not possible. - if err := removeControllers(s.config.disabledControllers, enabledRuntimeControllers, s.logger); err != nil { + if err := removeControllers(s.config.DisabledControllers, enabledRuntimeControllers, s.logger); err != nil { log.Fatal(err, "unable to disable a controller") } @@ -745,15 +546,15 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string s.ctx, s.mgr.GetClient(), storage.DefaultBackupLocationInfo{ - StorageLocation: s.config.defaultBackupLocation, - ServerValidationFrequency: s.config.storeValidationFrequency, + StorageLocation: s.config.DefaultBackupLocation, + ServerValidationFrequency: s.config.StoreValidationFrequency, }, newPluginManager, backupStoreGetter, s.logger, ) if err := bslr.SetupWithManager(s.mgr); err != nil { - s.logger.Fatal(err, "unable to create controller", "controller", controller.BackupStorageLocation) + s.logger.Fatal(err, "unable to create controller", "controller", config.ControllerBackupStorageLocation) } pvbInformer, err := s.mgr.GetCache().GetInformer(s.ctx, &velerov1api.PodVolumeBackup{}) @@ -761,7 +562,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string s.logger.Fatal(err, "fail to get controller-runtime informer from manager for PVB") } - if _, ok := enabledRuntimeControllers[controller.Backup]; ok { + if _, ok := enabledRuntimeControllers[config.ControllerBackup]; ok { backupper, err := backup.NewKubernetesBackupper( s.crClient, s.discoveryHelper, @@ -774,10 +575,10 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string pvbInformer, s.logger, ), - s.config.podVolumeOperationTimeout, - s.config.defaultVolumesToFsBackup, - s.config.clientPageSize, - s.config.uploaderType, + s.config.PodVolumeOperationTimeout, + s.config.DefaultVolumesToFsBackup, + s.config.ClientPageSize, + s.config.UploaderType, newPluginManager, backupStoreGetter, ) @@ -791,26 +592,26 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string newPluginManager, backupTracker, s.mgr.GetClient(), - s.config.defaultBackupLocation, - s.config.defaultVolumesToFsBackup, - s.config.defaultBackupTTL, - s.config.defaultCSISnapshotTimeout, - s.config.resourceTimeout, - s.config.defaultItemOperationTimeout, + s.config.DefaultBackupLocation, + s.config.DefaultVolumesToFsBackup, + s.config.DefaultBackupTTL, + s.config.DefaultCSISnapshotTimeout, + s.config.ResourceTimeout, + s.config.DefaultItemOperationTimeout, defaultVolumeSnapshotLocations, s.metrics, backupStoreGetter, - s.config.formatFlag.Parse(), + s.config.LogFormat.Parse(), s.credentialFileStore, - s.config.maxConcurrentK8SConnections, - s.config.defaultSnapshotMoveData, + s.config.MaxConcurrentK8SConnections, + s.config.DefaultSnapshotMoveData, s.crClient, ).SetupWithManager(s.mgr); err != nil { - s.logger.Fatal(err, "unable to create controller", "controller", controller.Backup) + s.logger.Fatal(err, "unable to create controller", "controller", config.ControllerBackup) } } - if _, ok := enabledRuntimeControllers[controller.BackupDeletion]; ok { + if _, ok := enabledRuntimeControllers[config.ControllerBackupDeletion]; ok { if err := controller.NewBackupDeletionReconciler( s.logger, s.mgr.GetClient(), @@ -823,27 +624,27 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string s.credentialFileStore, s.repoEnsurer, ).SetupWithManager(s.mgr); err != nil { - s.logger.Fatal(err, "unable to create controller", "controller", controller.BackupDeletion) + s.logger.Fatal(err, "unable to create controller", "controller", config.ControllerBackupDeletion) } } backupOpsMap := itemoperationmap.NewBackupItemOperationsMap() - if _, ok := enabledRuntimeControllers[controller.BackupOperations]; ok { + if _, ok := enabledRuntimeControllers[config.ControllerBackupOperations]; ok { r := controller.NewBackupOperationsReconciler( s.logger, s.mgr.GetClient(), - s.config.itemOperationSyncFrequency, + s.config.ItemOperationSyncFrequency, newPluginManager, backupStoreGetter, s.metrics, backupOpsMap, ) if err := r.SetupWithManager(s.mgr); err != nil { - s.logger.Fatal(err, "unable to create controller", "controller", controller.BackupOperations) + s.logger.Fatal(err, "unable to create controller", "controller", config.ControllerBackupOperations) } } - if _, ok := enabledRuntimeControllers[controller.BackupFinalizer]; ok { + if _, ok := enabledRuntimeControllers[config.ControllerBackupFinalizer]; ok { backupper, err := backup.NewKubernetesBackupper( s.mgr.GetClient(), s.discoveryHelper, @@ -856,10 +657,10 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string pvbInformer, s.logger, ), - s.config.podVolumeOperationTimeout, - s.config.defaultVolumesToFsBackup, - s.config.clientPageSize, - s.config.uploaderType, + s.config.PodVolumeOperationTimeout, + s.config.DefaultVolumesToFsBackup, + s.config.ClientPageSize, + s.config.UploaderType, newPluginManager, backupStoreGetter, ) @@ -876,18 +677,18 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string s.metrics, ) if err := r.SetupWithManager(s.mgr); err != nil { - s.logger.Fatal(err, "unable to create controller", "controller", controller.BackupFinalizer) + s.logger.Fatal(err, "unable to create controller", "controller", config.ControllerBackupFinalizer) } } - if _, ok := enabledRuntimeControllers[controller.BackupRepo]; ok { - if err := controller.NewBackupRepoReconciler(s.namespace, s.logger, s.mgr.GetClient(), s.config.repoMaintenanceFrequency, s.config.backukpRepoConfig, s.repoManager).SetupWithManager(s.mgr); err != nil { - s.logger.Fatal(err, "unable to create controller", "controller", controller.BackupRepo) + if _, ok := enabledRuntimeControllers[config.ControllerBackupRepo]; ok { + if err := controller.NewBackupRepoReconciler(s.namespace, s.logger, s.mgr.GetClient(), s.config.RepoMaintenanceFrequency, s.config.BackukpRepoConfig, s.repoManager).SetupWithManager(s.mgr); err != nil { + s.logger.Fatal(err, "unable to create controller", "controller", config.ControllerBackupRepo) } } - if _, ok := enabledRuntimeControllers[controller.BackupSync]; ok { - syncPeriod := s.config.backupSyncPeriod + if _, ok := enabledRuntimeControllers[config.ControllerBackupSync]; ok { + syncPeriod := s.config.BackupSyncPeriod if syncPeriod <= 0 { syncPeriod = time.Minute } @@ -901,28 +702,28 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string s.logger, ) if err := backupSyncReconciler.SetupWithManager(s.mgr); err != nil { - s.logger.Fatal(err, " unable to create controller ", "controller ", controller.BackupSync) + s.logger.Fatal(err, " unable to create controller ", "controller ", config.ControllerBackupSync) } } restoreOpsMap := itemoperationmap.NewRestoreItemOperationsMap() - if _, ok := enabledRuntimeControllers[controller.RestoreOperations]; ok { + if _, ok := enabledRuntimeControllers[config.ControllerRestoreOperations]; ok { r := controller.NewRestoreOperationsReconciler( s.logger, s.namespace, s.mgr.GetClient(), - s.config.itemOperationSyncFrequency, + s.config.ItemOperationSyncFrequency, newPluginManager, backupStoreGetter, s.metrics, restoreOpsMap, ) if err := r.SetupWithManager(s.mgr); err != nil { - s.logger.Fatal(err, "unable to create controller", "controller", controller.RestoreOperations) + s.logger.Fatal(err, "unable to create controller", "controller", config.ControllerRestoreOperations) } } - if _, ok := enabledRuntimeControllers[controller.DownloadRequest]; ok { + if _, ok := enabledRuntimeControllers[config.ControllerDownloadRequest]; ok { r := controller.NewDownloadRequestReconciler( s.mgr.GetClient(), clock.RealClock{}, @@ -933,14 +734,14 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string restoreOpsMap, ) if err := r.SetupWithManager(s.mgr); err != nil { - s.logger.Fatal(err, "unable to create controller", "controller", controller.DownloadRequest) + s.logger.Fatal(err, "unable to create controller", "controller", config.ControllerDownloadRequest) } } - if _, ok := enabledRuntimeControllers[controller.GarbageCollection]; ok { - r := controller.NewGCReconciler(s.logger, s.mgr.GetClient(), s.config.garbageCollectionFrequency) + if _, ok := enabledRuntimeControllers[config.ControllerGarbageCollection]; ok { + r := controller.NewGCReconciler(s.logger, s.mgr.GetClient(), s.config.GarbageCollectionFrequency) if err := r.SetupWithManager(s.mgr); err != nil { - s.logger.Fatal(err, "unable to create controller", "controller", controller.GarbageCollection) + s.logger.Fatal(err, "unable to create controller", "controller", config.ControllerGarbageCollection) } } @@ -951,11 +752,11 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string multiHookTracker := hook.NewMultiHookTracker() - if _, ok := enabledRuntimeControllers[controller.Restore]; ok { + if _, ok := enabledRuntimeControllers[config.ControllerRestore]; ok { restorer, err := restore.NewKubernetesRestorer( s.discoveryHelper, client.NewDynamicFactory(s.dynamicClient), - s.config.restoreResourcePriorities, + s.config.RestoreResourcePriorities, s.kubeClient.CoreV1().Namespaces(), podvolume.NewRestorerFactory( s.repoLocker, @@ -965,9 +766,9 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string pvrInformer, s.logger, ), - s.config.podVolumeOperationTimeout, - s.config.resourceTerminatingTimeout, - s.config.resourceTimeout, + s.config.PodVolumeOperationTimeout, + s.config.ResourceTerminatingTimeout, + s.config.ResourceTimeout, s.logger, podexec.NewPodCommandExecutor(s.kubeClientConfig, s.kubeClient.CoreV1().RESTClient()), s.kubeClient.CoreV1().RESTClient(), @@ -988,24 +789,24 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string newPluginManager, backupStoreGetter, s.metrics, - s.config.formatFlag.Parse(), - s.config.defaultItemOperationTimeout, - s.config.disableInformerCache, + s.config.LogFormat.Parse(), + s.config.DefaultItemOperationTimeout, + s.config.DisableInformerCache, s.crClient, ) if err = r.SetupWithManager(s.mgr); err != nil { - s.logger.Fatal(err, "fail to create controller", "controller", controller.Restore) + s.logger.Fatal(err, "fail to create controller", "controller", config.ControllerRestore) } } - if _, ok := enabledRuntimeControllers[controller.Schedule]; ok { - if err := controller.NewScheduleReconciler(s.namespace, s.logger, s.mgr.GetClient(), s.metrics, s.config.scheduleSkipImmediately).SetupWithManager(s.mgr); err != nil { - s.logger.Fatal(err, "unable to create controller", "controller", controller.Schedule) + if _, ok := enabledRuntimeControllers[config.ControllerSchedule]; ok { + if err := controller.NewScheduleReconciler(s.namespace, s.logger, s.mgr.GetClient(), s.metrics, s.config.ScheduleSkipImmediately).SetupWithManager(s.mgr); err != nil { + s.logger.Fatal(err, "unable to create controller", "controller", config.ControllerSchedule) } } - if _, ok := enabledRuntimeControllers[controller.ServerStatusRequest]; ok { + if _, ok := enabledRuntimeControllers[config.ControllerServerStatusRequest]; ok { if err := controller.NewServerStatusRequestReconciler( s.ctx, s.mgr.GetClient(), @@ -1013,11 +814,11 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string clock.RealClock{}, s.logger, ).SetupWithManager(s.mgr); err != nil { - s.logger.Fatal(err, "unable to create controller", "controller", controller.ServerStatusRequest) + s.logger.Fatal(err, "unable to create controller", "controller", config.ControllerServerStatusRequest) } } - if _, ok := enabledRuntimeControllers[controller.RestoreFinalizer]; ok { + if _, ok := enabledRuntimeControllers[config.ControllerRestoreFinalizer]; ok { if err := controller.NewRestoreFinalizerReconciler( s.logger, s.namespace, @@ -1027,9 +828,9 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string s.metrics, s.crClient, multiHookTracker, - s.config.resourceTimeout, + s.config.ResourceTimeout, ).SetupWithManager(s.mgr); err != nil { - s.logger.Fatal(err, "unable to create controller", "controller", controller.RestoreFinalizer) + s.logger.Fatal(err, "unable to create controller", "controller", config.ControllerRestoreFinalizer) } } @@ -1050,7 +851,7 @@ func removeControllers(disabledControllers []string, enabledRuntimeControllers m logger.Infof("Disabling controller: %s", controllerName) delete(enabledRuntimeControllers, controllerName) } else { - msg := fmt.Sprintf("Invalid value for --disable-controllers flag provided: %s. Valid values are: %s", controllerName, strings.Join(controller.DisableableControllers, ",")) + msg := fmt.Sprintf("Invalid value for --disable-controllers flag provided: %s. Valid values are: %s", controllerName, strings.Join(config.DisableableControllers, ",")) return errors.New(msg) } } @@ -1066,7 +867,7 @@ func (s *server) runProfiler() { mux.HandleFunc("/debug/pprof/trace", pprof.Trace) server := &http.Server{ - Addr: s.config.profilerAddress, + Addr: s.config.ProfilerAddress, Handler: mux, ReadHeaderTimeout: 3 * time.Second, } diff --git a/pkg/cmd/server/server_test.go b/pkg/cmd/server/server_test.go index 6e59955861..10c935b7fc 100644 --- a/pkg/cmd/server/server_test.go +++ b/pkg/cmd/server/server_test.go @@ -35,7 +35,7 @@ import ( velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" velerov2alpha1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" "github.com/vmware-tanzu/velero/pkg/client/mocks" - "github.com/vmware-tanzu/velero/pkg/controller" + "github.com/vmware-tanzu/velero/pkg/cmd/server/config" discovery_mocks "github.com/vmware-tanzu/velero/pkg/discovery/mocks" velerotest "github.com/vmware-tanzu/velero/pkg/test" "github.com/vmware-tanzu/velero/pkg/uploader" @@ -112,31 +112,31 @@ func TestRemoveControllers(t *testing.T) { { name: "Remove one disable controller", disabledControllers: []string{ - controller.Backup, + config.ControllerBackup, }, errorExpected: false, }, { name: "Remove all disable controllers", disabledControllers: []string{ - controller.BackupOperations, - controller.Backup, - controller.BackupDeletion, - controller.BackupSync, - controller.DownloadRequest, - controller.GarbageCollection, - controller.BackupRepo, - controller.Restore, - controller.Schedule, - controller.ServerStatusRequest, + config.ControllerBackupOperations, + config.ControllerBackup, + config.ControllerBackupDeletion, + config.ControllerBackupSync, + config.ControllerDownloadRequest, + config.ControllerGarbageCollection, + config.ControllerBackupRepo, + config.ControllerRestore, + config.ControllerSchedule, + config.ControllerServerStatusRequest, }, errorExpected: false, }, { name: "Remove with a non-disable controller included", disabledControllers: []string{ - controller.Backup, - controller.BackupStorageLocation, + config.ControllerBackup, + config.ControllerBackupStorageLocation, }, errorExpected: true, }, @@ -151,16 +151,16 @@ func TestRemoveControllers(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { enabledRuntimeControllers := map[string]struct{}{ - controller.BackupSync: {}, - controller.Backup: {}, - controller.GarbageCollection: {}, - controller.Restore: {}, - controller.ServerStatusRequest: {}, - controller.Schedule: {}, - controller.BackupDeletion: {}, - controller.BackupRepo: {}, - controller.DownloadRequest: {}, - controller.BackupOperations: {}, + config.ControllerBackupSync: {}, + config.ControllerBackup: {}, + config.ControllerGarbageCollection: {}, + config.ControllerRestore: {}, + config.ControllerServerStatusRequest: {}, + config.ControllerSchedule: {}, + config.ControllerBackupDeletion: {}, + config.ControllerBackupRepo: {}, + config.ControllerDownloadRequest: {}, + config.ControllerBackupOperations: {}, } totalNumOriginalControllers := len(enabledRuntimeControllers) @@ -191,42 +191,42 @@ func Test_newServer(t *testing.T) { logger := logrus.New() // invalid uploader type - _, err := newServer(factory, serverConfig{ - uploaderType: "invalid", + _, err := newServer(factory, &config.Config{ + UploaderType: "invalid", }, logger) assert.Error(t, err) // invalid clientQPS - _, err = newServer(factory, serverConfig{ - uploaderType: uploader.KopiaType, - clientQPS: -1, + _, err = newServer(factory, &config.Config{ + UploaderType: uploader.KopiaType, + ClientQPS: -1, }, logger) assert.Error(t, err) // invalid clientQPS Restic uploader - _, err = newServer(factory, serverConfig{ - uploaderType: uploader.ResticType, - clientQPS: -1, + _, err = newServer(factory, &config.Config{ + UploaderType: uploader.ResticType, + ClientQPS: -1, }, logger) assert.Error(t, err) // invalid clientBurst factory.On("SetClientQPS", mock.Anything).Return() - _, err = newServer(factory, serverConfig{ - uploaderType: uploader.KopiaType, - clientQPS: 1, - clientBurst: -1, + _, err = newServer(factory, &config.Config{ + UploaderType: uploader.KopiaType, + ClientQPS: 1, + ClientBurst: -1, }, logger) assert.Error(t, err) // invalid clientBclientPageSizeurst factory.On("SetClientQPS", mock.Anything).Return(). On("SetClientBurst", mock.Anything).Return() - _, err = newServer(factory, serverConfig{ - uploaderType: uploader.KopiaType, - clientQPS: 1, - clientBurst: 1, - clientPageSize: -1, + _, err = newServer(factory, &config.Config{ + UploaderType: uploader.KopiaType, + ClientQPS: 1, + ClientBurst: 1, + ClientPageSize: -1, }, logger) assert.Error(t, err) @@ -236,11 +236,11 @@ func Test_newServer(t *testing.T) { On("KubeClient").Return(nil, nil). On("Client").Return(nil, nil). On("DynamicClient").Return(nil, errors.New("error")) - _, err = newServer(factory, serverConfig{ - uploaderType: uploader.KopiaType, - clientQPS: 1, - clientBurst: 1, - clientPageSize: 100, + _, err = newServer(factory, &config.Config{ + UploaderType: uploader.KopiaType, + ClientQPS: 1, + ClientBurst: 1, + ClientPageSize: 100, }, logger) assert.Error(t, err) } diff --git a/pkg/controller/backup_controller.go b/pkg/controller/backup_controller.go index 989617722c..4103e1286c 100644 --- a/pkg/controller/backup_controller.go +++ b/pkg/controller/backup_controller.go @@ -42,6 +42,7 @@ import ( "github.com/vmware-tanzu/velero/internal/volume" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" pkgbackup "github.com/vmware-tanzu/velero/pkg/backup" + "github.com/vmware-tanzu/velero/pkg/cmd/server/config" "github.com/vmware-tanzu/velero/pkg/discovery" "github.com/vmware-tanzu/velero/pkg/features" "github.com/vmware-tanzu/velero/pkg/label" @@ -201,7 +202,7 @@ func getLastSuccessBySchedule(backups []velerov1api.Backup) map[string]time.Time func (b *backupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { log := b.logger.WithFields(logrus.Fields{ - "controller": Backup, + "controller": config.ControllerBackup, "backuprequest": req.String(), }) @@ -591,16 +592,16 @@ func (b *backupReconciler) validateAndGetSnapshotLocations(backup *velerov1api.B // field is checked to see if the backup was a partial failure. func (b *backupReconciler) runBackup(backup *pkgbackup.Request) error { - b.logger.WithField(Backup, kubeutil.NamespaceAndName(backup)).Info("Setting up backup log") + b.logger.WithField(config.ControllerBackup, kubeutil.NamespaceAndName(backup)).Info("Setting up backup log") // Log the backup to both a backup log file and to stdout. This will help see what happened if the upload of the // backup log failed for whatever reason. logCounter := logging.NewLogHook() - backupLog, err := logging.NewTempFileLogger(b.backupLogLevel, b.formatFlag, logCounter, logrus.Fields{Backup: kubeutil.NamespaceAndName(backup)}) + backupLog, err := logging.NewTempFileLogger(b.backupLogLevel, b.formatFlag, logCounter, logrus.Fields{config.ControllerBackup: kubeutil.NamespaceAndName(backup)}) if err != nil { return errors.Wrap(err, "error creating dual mode logger for backup") } - defer backupLog.Dispose(b.logger.WithField(Backup, kubeutil.NamespaceAndName(backup))) + defer backupLog.Dispose(b.logger.WithField(config.ControllerBackup, kubeutil.NamespaceAndName(backup))) backupLog.Info("Setting up backup temp file") backupFile, err := os.CreateTemp("", "") @@ -678,7 +679,7 @@ func (b *backupReconciler) runBackup(backup *pkgbackup.Request) error { "errors": backupErrors, } - backupLog.DoneForPersist(b.logger.WithField(Backup, kubeutil.NamespaceAndName(backup))) + backupLog.DoneForPersist(b.logger.WithField(config.ControllerBackup, kubeutil.NamespaceAndName(backup))) // Assign finalize phase as close to end as possible so that any errors // logged to backupLog are captured. This is done before uploading the @@ -725,7 +726,7 @@ func (b *backupReconciler) runBackup(backup *pkgbackup.Request) error { } } - b.logger.WithField(Backup, kubeutil.NamespaceAndName(backup)).Infof("Initial backup processing complete, moving to %s", backup.Status.Phase) + b.logger.WithField(config.ControllerBackup, kubeutil.NamespaceAndName(backup)).Infof("Initial backup processing complete, moving to %s", backup.Status.Phase) // if we return a non-nil error, the calling function will update // the backup's phase to Failed. diff --git a/pkg/controller/backup_deletion_controller.go b/pkg/controller/backup_deletion_controller.go index f3a7f32b5d..fc917890f5 100644 --- a/pkg/controller/backup_deletion_controller.go +++ b/pkg/controller/backup_deletion_controller.go @@ -40,6 +40,7 @@ import ( "github.com/vmware-tanzu/velero/internal/volume" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" velerov2alpha1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" + "github.com/vmware-tanzu/velero/pkg/cmd/server/config" "github.com/vmware-tanzu/velero/pkg/discovery" "github.com/vmware-tanzu/velero/pkg/label" "github.com/vmware-tanzu/velero/pkg/metrics" @@ -114,7 +115,7 @@ func (r *backupDeletionReconciler) SetupWithManager(mgr ctrl.Manager) error { func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { log := r.logger.WithFields(logrus.Fields{ - "controller": BackupDeletion, + "controller": config.ControllerBackupDeletion, "deletebackuprequest": req.String(), }) log.Debug("Getting deletebackuprequest") diff --git a/pkg/controller/backup_storage_location_controller.go b/pkg/controller/backup_storage_location_controller.go index 4f742cdba1..caffc08d6d 100644 --- a/pkg/controller/backup_storage_location_controller.go +++ b/pkg/controller/backup_storage_location_controller.go @@ -30,6 +30,7 @@ import ( "github.com/vmware-tanzu/velero/internal/storage" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/cmd/server/config" "github.com/vmware-tanzu/velero/pkg/persistence" "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt" "github.com/vmware-tanzu/velero/pkg/util/kube" @@ -80,7 +81,7 @@ func (r *backupStorageLocationReconciler) Reconcile(ctx context.Context, req ctr var unavailableErrors []string var location velerov1api.BackupStorageLocation - log := r.log.WithField("controller", BackupStorageLocation).WithField(BackupStorageLocation, req.NamespacedName.String()) + log := r.log.WithField("controller", config.ControllerBackupStorageLocation).WithField(config.ControllerBackupStorageLocation, req.NamespacedName.String()) log.Debug("Validating availability of BackupStorageLocation") locationList, err := storage.ListBackupStorageLocations(r.ctx, r.client, req.Namespace) @@ -155,7 +156,7 @@ func (r *backupStorageLocationReconciler) logReconciledPhase(defaultFound bool, var availableBSLs []*velerov1api.BackupStorageLocation var unAvailableBSLs []*velerov1api.BackupStorageLocation var unknownBSLs []*velerov1api.BackupStorageLocation - log := r.log.WithField("controller", BackupStorageLocation) + log := r.log.WithField("controller", config.ControllerBackupStorageLocation) for i, location := range locationList.Items { phase := location.Status.Phase @@ -198,7 +199,7 @@ func (r *backupStorageLocationReconciler) SetupWithManager(mgr ctrl.Manager) err ) gp := kube.NewGenericEventPredicate(func(object client.Object) bool { location := object.(*velerov1api.BackupStorageLocation) - return storage.IsReadyToValidate(location.Spec.ValidationFrequency, location.Status.LastValidationTime, r.defaultBackupLocationInfo.ServerValidationFrequency, r.log.WithField("controller", BackupStorageLocation)) + return storage.IsReadyToValidate(location.Spec.ValidationFrequency, location.Status.LastValidationTime, r.defaultBackupLocationInfo.ServerValidationFrequency, r.log.WithField("controller", config.ControllerBackupStorageLocation)) }) return ctrl.NewControllerManagedBy(mgr). // As the "status.LastValidationTime" field is always updated, this triggers new reconciling process, skip the update event that include no spec change to avoid the reconcile loop diff --git a/pkg/controller/backup_sync_controller.go b/pkg/controller/backup_sync_controller.go index 70c8101dd0..06b391aa00 100644 --- a/pkg/controller/backup_sync_controller.go +++ b/pkg/controller/backup_sync_controller.go @@ -34,6 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/builder" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/cmd/server/config" "github.com/vmware-tanzu/velero/pkg/features" "github.com/vmware-tanzu/velero/pkg/label" "github.com/vmware-tanzu/velero/pkg/persistence" @@ -78,7 +79,7 @@ func NewBackupSyncReconciler( // Reconcile syncs between the backups in cluster and backups metadata in object store. func (b *backupSyncReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - log := b.logger.WithField("controller", BackupSync) + log := b.logger.WithField("controller", config.ControllerBackupSync) log = log.WithField("backupLocation", req.String()) log.Debug("Begin to sync between backups' metadata in BSL object storage and cluster's existing backups.") diff --git a/pkg/controller/constants.go b/pkg/controller/constants.go deleted file mode 100644 index 1a769cf51f..0000000000 --- a/pkg/controller/constants.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2020 the Velero contributors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -const ( - Backup = "backup" - BackupOperations = "backup-operations" - BackupDeletion = "backup-deletion" - BackupFinalizer = "backup-finalizer" - BackupRepo = "backup-repo" - BackupStorageLocation = "backup-storage-location" - BackupSync = "backup-sync" - DownloadRequest = "download-request" - GarbageCollection = "gc" - PodVolumeBackup = "pod-volume-backup" - PodVolumeRestore = "pod-volume-restore" - Restore = "restore" - RestoreOperations = "restore-operations" - Schedule = "schedule" - ServerStatusRequest = "server-status-request" - RestoreFinalizer = "restore-finalizer" -) - -// DisableableControllers is a list of controllers that can be disabled -var DisableableControllers = []string{ - Backup, - BackupOperations, - BackupDeletion, - BackupFinalizer, - BackupSync, - DownloadRequest, - GarbageCollection, - BackupRepo, - Restore, - RestoreOperations, - Schedule, - ServerStatusRequest, - RestoreFinalizer, -} diff --git a/pkg/controller/server_status_request_controller.go b/pkg/controller/server_status_request_controller.go index 10fbf88475..f09b01b113 100644 --- a/pkg/controller/server_status_request_controller.go +++ b/pkg/controller/server_status_request_controller.go @@ -32,6 +32,7 @@ import ( "github.com/vmware-tanzu/velero/internal/velero" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/buildinfo" + "github.com/vmware-tanzu/velero/pkg/cmd/server/config" "github.com/vmware-tanzu/velero/pkg/plugin/framework" "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" ) @@ -77,7 +78,7 @@ func NewServerStatusRequestReconciler( func (r *serverStatusRequestReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { log := r.log.WithFields(logrus.Fields{ - "controller": ServerStatusRequest, + "controller": config.ControllerServerStatusRequest, "serverStatusRequest": req.NamespacedName, }) @@ -95,7 +96,7 @@ func (r *serverStatusRequestReconciler) Reconcile(ctx context.Context, req ctrl. } log = r.log.WithFields(logrus.Fields{ - "controller": ServerStatusRequest, + "controller": config.ControllerServerStatusRequest, "serverStatusRequest": req.NamespacedName, "phase": statusRequest.Status.Phase, }) diff --git a/pkg/plugin/clientmgmt/process/client_builder.go b/pkg/plugin/clientmgmt/process/client_builder.go index 1ae3f5af1f..5bffa52595 100644 --- a/pkg/plugin/clientmgmt/process/client_builder.go +++ b/pkg/plugin/clientmgmt/process/client_builder.go @@ -25,7 +25,6 @@ import ( hcplugin "github.com/hashicorp/go-plugin" "github.com/sirupsen/logrus" - "github.com/vmware-tanzu/velero/pkg/features" "github.com/vmware-tanzu/velero/pkg/plugin/framework" biav2 "github.com/vmware-tanzu/velero/pkg/plugin/framework/backupitemaction/v2" "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" @@ -53,11 +52,8 @@ func newClientBuilder(command string, logger logrus.FieldLogger, logLevel logrus // For plugins compiled into the velero executable, we need to run "velero run-plugins" b.commandArgs = []string{"run-plugins"} } - - b.commandArgs = append(b.commandArgs, "--log-level", logLevel.String()) - if len(features.All()) > 0 { - b.commandArgs = append(b.commandArgs, "--features", features.Serialize()) - } + // exclude "velero" and "server" from "velero server --flags ..." + b.commandArgs = append(b.commandArgs, os.Args[2:]...) return b } diff --git a/pkg/plugin/clientmgmt/process/client_builder_test.go b/pkg/plugin/clientmgmt/process/client_builder_test.go index b0e7fa7004..d92addf76a 100644 --- a/pkg/plugin/clientmgmt/process/client_builder_test.go +++ b/pkg/plugin/clientmgmt/process/client_builder_test.go @@ -25,7 +25,6 @@ import ( "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" - "github.com/vmware-tanzu/velero/pkg/features" "github.com/vmware-tanzu/velero/pkg/plugin/framework" biav2 "github.com/vmware-tanzu/velero/pkg/plugin/framework/backupitemaction/v2" "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" @@ -39,19 +38,11 @@ func TestNewClientBuilder(t *testing.T) { logLevel := logrus.InfoLevel cb := newClientBuilder("velero", logger, logLevel) assert.Equal(t, "velero", cb.commandName) - assert.Equal(t, []string{"--log-level", "info"}, cb.commandArgs) assert.Equal(t, newLogrusAdapter(logger, logLevel), cb.pluginLogger) cb = newClientBuilder(os.Args[0], logger, logLevel) assert.Equal(t, cb.commandName, os.Args[0]) - assert.Equal(t, []string{"run-plugins", "--log-level", "info"}, cb.commandArgs) assert.Equal(t, newLogrusAdapter(logger, logLevel), cb.pluginLogger) - - features.NewFeatureFlagSet("feature1", "feature2") - cb = newClientBuilder(os.Args[0], logger, logLevel) - assert.Equal(t, []string{"run-plugins", "--log-level", "info", "--features", "feature1,feature2"}, cb.commandArgs) - // Clear the features list in case other tests run in the same process. - features.NewFeatureFlagSet() } func TestClientConfig(t *testing.T) { diff --git a/pkg/plugin/clientmgmt/process/process.go b/pkg/plugin/clientmgmt/process/process.go index dea0195536..5ebb4a7a18 100644 --- a/pkg/plugin/clientmgmt/process/process.go +++ b/pkg/plugin/clientmgmt/process/process.go @@ -17,8 +17,6 @@ limitations under the License. package process import ( - "strings" - plugin "github.com/hashicorp/go-plugin" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -61,32 +59,7 @@ func newProcess(command string, logger logrus.FieldLogger, logLevel logrus.Level // This launches the plugin process. protocolClient, err := client.Client() if err != nil { - if !strings.Contains(err.Error(), "unknown flag: --features") { - return nil, err - } - - // Velero started passing the --features flag to plugins in v1.2, however existing plugins - // may not support that flag and may not silently ignore unknown flags. The plugin server - // code that we make available to plugin authors has since been updated to ignore unknown - // flags, but to avoid breaking plugins that haven't updated to that code and don't support - // the --features flag, we specifically handle not passing the flag if we can detect that - // it's not supported. - - logger.Debug("Plugin process does not support the --features flag, removing it and trying again") - - builder.commandArgs = removeFeaturesFlag(builder.commandArgs) - - logger.Debugf("Updated command args after removing --features flag: %v", builder.commandArgs) - - // re-get the client and protocol client now that --features has been removed - // from the command args. - client = builder.client() - protocolClient, err = client.Client() - if err != nil { - return nil, err - } - - logger.Debug("Plugin process successfully started without the --features flag") + return nil, err } p := &process{ diff --git a/pkg/plugin/framework/server.go b/pkg/plugin/framework/server.go index 0e71c3b247..0231a5122c 100644 --- a/pkg/plugin/framework/server.go +++ b/pkg/plugin/framework/server.go @@ -17,7 +17,6 @@ limitations under the License. package framework import ( - "fmt" "os" "strings" @@ -25,11 +24,11 @@ import ( "github.com/sirupsen/logrus" "github.com/spf13/pflag" + "github.com/vmware-tanzu/velero/pkg/cmd/server/config" biav2 "github.com/vmware-tanzu/velero/pkg/plugin/framework/backupitemaction/v2" "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" ibav1 "github.com/vmware-tanzu/velero/pkg/plugin/framework/itemblockaction/v1" riav2 "github.com/vmware-tanzu/velero/pkg/plugin/framework/restoreitemaction/v2" - "github.com/vmware-tanzu/velero/pkg/util/logging" ) // Server serves registered plugin implementations. @@ -42,6 +41,9 @@ type Server interface { // This method must be called prior to calling .Serve(). BindFlags(flags *pflag.FlagSet) Server + // GetConfig return the config parsed from the flags + GetConfig() *config.Config + // RegisterBackupItemAction registers a backup item action. Accepted format // for the plugin name is /. RegisterBackupItemAction(pluginName string, initializer common.HandlerInitializer) Server @@ -104,8 +106,8 @@ type Server interface { // server implements Server. type server struct { + config *config.Config log *logrus.Logger - logLevelFlag *logging.LevelFlag flagSet *pflag.FlagSet backupItemAction *BackupItemActionPlugin backupItemActionV2 *biav2.BackupItemActionPlugin @@ -122,8 +124,8 @@ func NewServer() Server { log := newLogger() return &server{ + config: config.GetDefaultConfig(), log: log, - logLevelFlag: logging.LogLevelFlag(log.Level), backupItemAction: NewBackupItemActionPlugin(common.ServerLogger(log)), backupItemActionV2: biav2.NewBackupItemActionPlugin(common.ServerLogger(log)), volumeSnapshotter: NewVolumeSnapshotterPlugin(common.ServerLogger(log)), @@ -136,13 +138,16 @@ func NewServer() Server { } func (s *server) BindFlags(flags *pflag.FlagSet) Server { - flags.Var(s.logLevelFlag, "log-level", fmt.Sprintf("The level at which to log. Valid values are %s.", strings.Join(s.logLevelFlag.AllowedValues(), ", "))) s.flagSet = flags + s.config.BindFlags(flags) s.flagSet.ParseErrorsWhitelist.UnknownFlags = true // Velero.io word list : ignore - return s } +func (s *server) GetConfig() *config.Config { + return s.config +} + func (s *server) RegisterBackupItemAction(name string, initializer common.HandlerInitializer) Server { s.backupItemAction.Register(name, initializer) return s @@ -260,7 +265,7 @@ func (s *server) Serve() { } } - s.log.Level = s.logLevelFlag.Parse() + s.log.Level = s.config.LogLevel.Parse() s.log.Debugf("Setting log level to %s", strings.ToUpper(s.log.Level.String())) command := os.Args[0] diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index 63c26538ba..8d748684c0 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -56,6 +56,7 @@ import ( velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/archive" "github.com/vmware-tanzu/velero/pkg/client" + "github.com/vmware-tanzu/velero/pkg/cmd/server/config" "github.com/vmware-tanzu/velero/pkg/discovery" "github.com/vmware-tanzu/velero/pkg/features" "github.com/vmware-tanzu/velero/pkg/itemoperation" @@ -106,7 +107,7 @@ type kubernetesRestorer struct { podVolumeTimeout time.Duration resourceTerminatingTimeout time.Duration resourceTimeout time.Duration - resourcePriorities Priorities + resourcePriorities config.Priorities fileSystem filesystem.Interface pvRenamer func(string) (string, error) logger logrus.FieldLogger @@ -121,7 +122,7 @@ type kubernetesRestorer struct { func NewKubernetesRestorer( discoveryHelper discovery.Helper, dynamicFactory client.DynamicFactory, - resourcePriorities Priorities, + resourcePriorities config.Priorities, namespaceClient corev1.NamespaceInterface, podVolumeRestorerFactory podvolume.RestorerFactory, podVolumeTimeout time.Duration, @@ -359,7 +360,7 @@ type restoreContext struct { renamedPVs map[string]string pvRenamer func(string) (string, error) discoveryHelper discovery.Helper - resourcePriorities Priorities + resourcePriorities config.Priorities kbClient crclient.Client itemOperationsList *[]*itemoperation.RestoreOperation resourceModifiers *resourcemodifiers.ResourceModifiers @@ -386,7 +387,7 @@ type informerFactoryWithContext struct { // begins with all of the high prioritized resources (in order), ends with all of // the low prioritized resources(in order), and an alphabetized list of resources // in the backup(pick out the prioritized resources) is put in the middle. -func getOrderedResources(resourcePriorities Priorities, backupResources map[string]*archive.ResourceItems) []string { +func getOrderedResources(resourcePriorities config.Priorities, backupResources map[string]*archive.ResourceItems) []string { priorities := map[string]struct{}{} for _, priority := range resourcePriorities.HighPriorities { priorities[priority] = struct{}{} @@ -515,7 +516,7 @@ func (ctx *restoreContext) execute() (results.Result, results.Result) { backupResources, make([]restoreableResource, 0), sets.New[string](), - Priorities{HighPriorities: []string{"customresourcedefinitions"}}, + config.Priorities{HighPriorities: []string{"customresourcedefinitions"}}, false, ) warnings.Merge(&w) @@ -2156,7 +2157,7 @@ func (ctx *restoreContext) getOrderedResourceCollection( backupResources map[string]*archive.ResourceItems, restoreResourceCollection []restoreableResource, processedResources sets.Set[string], - resourcePriorities Priorities, + resourcePriorities config.Priorities, includeAllResources bool, ) ([]restoreableResource, sets.Set[string], results.Result, results.Result) { var warnings, errs results.Result diff --git a/pkg/restore/restore_test.go b/pkg/restore/restore_test.go index 4aa0d6d6a8..a83aa90b1e 100644 --- a/pkg/restore/restore_test.go +++ b/pkg/restore/restore_test.go @@ -47,6 +47,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/archive" "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/client" + "github.com/vmware-tanzu/velero/pkg/cmd/server/config" "github.com/vmware-tanzu/velero/pkg/discovery" "github.com/vmware-tanzu/velero/pkg/features" "github.com/vmware-tanzu/velero/pkg/itemoperation" @@ -870,7 +871,7 @@ func TestRestoreResourcePriorities(t *testing.T) { backup *velerov1api.Backup apiResources []*test.APIResource tarball io.Reader - resourcePriorities Priorities + resourcePriorities config.Priorities }{ { name: "resources are restored according to the specified resource priorities", @@ -904,7 +905,7 @@ func TestRestoreResourcePriorities(t *testing.T) { test.Deployments(), test.ServiceAccounts(), }, - resourcePriorities: Priorities{ + resourcePriorities: config.Priorities{ HighPriorities: []string{"persistentvolumes", "persistentvolumeclaims", "serviceaccounts"}, LowPriorities: []string{"deployments.apps"}, }, @@ -3192,7 +3193,7 @@ func TestRestorePersistentVolumes(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { h := newHarness(t) - h.restorer.resourcePriorities = Priorities{HighPriorities: []string{"persistentvolumes", "persistentvolumeclaims"}} + h.restorer.resourcePriorities = config.Priorities{HighPriorities: []string{"persistentvolumes", "persistentvolumeclaims"}} h.restorer.pvRenamer = func(oldName string) (string, error) { renamed := "renamed-" + oldName return renamed, nil @@ -3519,19 +3520,19 @@ func TestIsCompleted(t *testing.T) { func Test_getOrderedResources(t *testing.T) { tests := []struct { name string - resourcePriorities Priorities + resourcePriorities config.Priorities backupResources map[string]*archive.ResourceItems want []string }{ { name: "when only priorities are specified, they're returned in order", - resourcePriorities: Priorities{HighPriorities: []string{"prio-3", "prio-2", "prio-1"}}, + resourcePriorities: config.Priorities{HighPriorities: []string{"prio-3", "prio-2", "prio-1"}}, backupResources: nil, want: []string{"prio-3", "prio-2", "prio-1"}, }, { name: "when only backup resources are specified, they're returned in alphabetical order", - resourcePriorities: Priorities{}, + resourcePriorities: config.Priorities{}, backupResources: map[string]*archive.ResourceItems{ "backup-resource-3": nil, "backup-resource-2": nil, @@ -3541,7 +3542,7 @@ func Test_getOrderedResources(t *testing.T) { }, { name: "when priorities and backup resources are specified, they're returned in the correct order", - resourcePriorities: Priorities{HighPriorities: []string{"prio-3", "prio-2", "prio-1"}}, + resourcePriorities: config.Priorities{HighPriorities: []string{"prio-3", "prio-2", "prio-1"}}, backupResources: map[string]*archive.ResourceItems{ "prio-3": nil, "backup-resource-3": nil, @@ -3552,7 +3553,7 @@ func Test_getOrderedResources(t *testing.T) { }, { name: "when priorities and backup resources are specified, they're returned in the correct order", - resourcePriorities: Priorities{HighPriorities: []string{"prio-3", "prio-2", "prio-1"}, LowPriorities: []string{"prio-0"}}, + resourcePriorities: config.Priorities{HighPriorities: []string{"prio-3", "prio-2", "prio-1"}, LowPriorities: []string{"prio-0"}}, backupResources: map[string]*archive.ResourceItems{ "prio-3": nil, "prio-0": nil,