Skip to content

Commit

Permalink
add node name data mover CR
Browse files Browse the repository at this point in the history
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
  • Loading branch information
Lyndon-Li committed Jul 6, 2023
1 parent daf20b8 commit 8a7aa20
Show file tree
Hide file tree
Showing 7 changed files with 32 additions and 6 deletions.
7 changes: 7 additions & 0 deletions config/crd/v2alpha1/bases/velero.io_datadownloads.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,10 @@ spec:
jsonPath: .metadata.creationTimestamp
name: Age
type: date
- description: Name of the node where the DataDownload is processed
jsonPath: .status.node
name: Node
type: string
name: v2alpha1
schema:
openAPIV3Schema:
Expand Down Expand Up @@ -132,6 +136,9 @@ spec:
message:
description: Message is a message about the DataDownload's status.
type: string
node:
description: Node is name of the node where the DataDownload is processed.
type: string
phase:
description: Phase is the current state of the DataDownload.
enum:
Expand Down
7 changes: 7 additions & 0 deletions config/crd/v2alpha1/bases/velero.io_datauploads.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,10 @@ spec:
jsonPath: .metadata.creationTimestamp
name: Age
type: date
- description: Name of the node where the DataUpload is processed
jsonPath: .status.node
name: Node
type: string
name: v2alpha1
schema:
openAPIV3Schema:
Expand Down Expand Up @@ -147,6 +151,9 @@ spec:
message:
description: Message is a message about the DataUpload's status.
type: string
node:
description: Node is name of the node where the DataUpload is processed.
type: string
path:
description: Path is the full path of the snapshot volume being backed
up.
Expand Down
4 changes: 2 additions & 2 deletions config/crd/v2alpha1/crds/crds.go

Large diffs are not rendered by default.

5 changes: 5 additions & 0 deletions pkg/apis/velero/v2alpha1/data_download_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,10 @@ type DataDownloadStatus struct {
// about the restore operation.
// +optional
Progress shared.DataMoveOperationProgress `json:"progress,omitempty"`

// Node is name of the node where the DataDownload is processed.
// +optional
Node string `json:"node,omitempty"`
}

// TODO(2.0) After converting all resources to use the runtime-controller client, the genclient and k8s:deepcopy markers will no longer be needed and should be removed.
Expand All @@ -125,6 +129,7 @@ type DataDownloadStatus struct {
// +kubebuilder:printcolumn:name="Total Bytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Total bytes"
// +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where the backup data is stored"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since this DataDownload was created"
// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.node",description="Name of the node where the DataDownload is processed"

type DataDownload struct {
metav1.TypeMeta `json:",inline"`
Expand Down
5 changes: 5 additions & 0 deletions pkg/apis/velero/v2alpha1/data_upload_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,10 @@ type DataUploadStatus struct {
// about the backup operation.
// +optional
Progress shared.DataMoveOperationProgress `json:"progress,omitempty"`

// Node is name of the node where the DataUpload is processed.
// +optional
Node string `json:"node,omitempty"`
}

// TODO(2.0) After converting all resources to use the runttime-controller client,
Expand All @@ -155,6 +159,7 @@ type DataUploadStatus struct {
// +kubebuilder:printcolumn:name="Total Bytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Total bytes"
// +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where this backup should be stored"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since this DataUpload was created"
// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.node",description="Name of the node where the DataUpload is processed"

type DataUpload struct {
metav1.TypeMeta `json:",inline"`
Expand Down
5 changes: 3 additions & 2 deletions pkg/controller/data_download_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -400,7 +400,7 @@ func (r *DataDownloadReconciler) findSnapshotRestoreForPod(podObj client.Object)
requests := make([]reconcile.Request, 1)

r.logger.WithField("Restore pod", pod.Name).Infof("Preparing data download %s", dd.Name)
err = r.patchDataDownload(context.Background(), dd, prepareDataDownload)
err = r.patchDataDownload(context.Background(), dd, r.prepareDataDownload)
if err != nil {
r.logger.WithField("Restore pod", pod.Name).WithError(err).Error("unable to patch data download")
return []reconcile.Request{}
Expand All @@ -426,8 +426,9 @@ func (r *DataDownloadReconciler) patchDataDownload(ctx context.Context, req *vel
return nil
}

func prepareDataDownload(ssb *velerov2alpha1api.DataDownload) {
func (r *DataDownloadReconciler) prepareDataDownload(ssb *velerov2alpha1api.DataDownload) {
ssb.Status.Phase = velerov2alpha1api.DataDownloadPhasePrepared
ssb.Status.Node = r.nodeName
}

func (r *DataDownloadReconciler) errorOut(ctx context.Context, dd *velerov2alpha1api.DataDownload, err error, msg string, log logrus.FieldLogger) (ctrl.Result, error) {
Expand Down
5 changes: 3 additions & 2 deletions pkg/controller/data_upload_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -416,7 +416,7 @@ func (r *DataUploadReconciler) findDataUploadForPod(podObj client.Object) []reco
}

r.logger.WithField("Backup pod", pod.Name).Infof("Preparing dataupload %s", du.Name)
if err := r.patchDataUpload(context.Background(), du, prepareDataUpload); err != nil {
if err := r.patchDataUpload(context.Background(), du, r.prepareDataUpload); err != nil {
r.logger.WithField("Backup pod", pod.Name).WithError(err).Error("failed to patch dataupload")
return []reconcile.Request{}
}
Expand All @@ -440,8 +440,9 @@ func (r *DataUploadReconciler) patchDataUpload(ctx context.Context, req *velerov
return nil
}

func prepareDataUpload(du *velerov2alpha1api.DataUpload) {
func (r *DataUploadReconciler) prepareDataUpload(du *velerov2alpha1api.DataUpload) {
du.Status.Phase = velerov2alpha1api.DataUploadPhasePrepared
du.Status.Node = r.nodeName
}

func (r *DataUploadReconciler) errorOut(ctx context.Context, du *velerov2alpha1api.DataUpload, err error, msg string, log logrus.FieldLogger) (ctrl.Result, error) {
Expand Down

0 comments on commit 8a7aa20

Please sign in to comment.