Skip to content

Commit

Permalink
[#4493]: Functionality to delete backups through platform.
Browse files Browse the repository at this point in the history
Summary:
Adding an endpoint as well as schedulable task to delete old backups after a specified time
period.
1) Backup can be deleted through just a stand alone endpoint.
2) For scheduled backups, the user can set an expiry time, and the scheduler will delete the backup
after the specified amount of time.
The backups now have an assosciated schedule (if created via a schedule) as well as an expiry. The
backups only get deleted as long as the universe exists (this is also due to the fact that we don't
have a way to see orphaned backups).

Test Plan:
Created a stand-alone backup and deleted it. Also created a schedule and saw that the old
backups got deleted after the set amount of time. Also tested it out on s3, gcp and azure backups.
UI mocks: {F14065}

{F14066}

{F14067}

Reviewers: wesley, daniel, sanketh

Reviewed By: sanketh

Subscribers: andrew, zyu, jenkins-bot, yugaware

Differential Revision: https://phabricator.dev.yugabyte.com/D9361
  • Loading branch information
Arnav15 committed Sep 23, 2020
1 parent 05ceaea commit 685d448
Show file tree
Hide file tree
Showing 20 changed files with 475 additions and 85 deletions.
60 changes: 52 additions & 8 deletions managed/devops/bin/yb_backup.py
Original file line number Diff line number Diff line change
Expand Up @@ -336,29 +336,38 @@ def storage_type():
return 'az'

def _command_list_prefix(self):
return "azcopy cp"
return "azcopy"

def upload_file_cmd(self, src, dest):
# azcopy requires quotes around the src and dest. This format is necessary to do so.
src = "'{}'".format(src)
dest = "'{}'".format(dest + os.getenv('AZURE_STORAGE_SAS_TOKEN'))
return ["{} {} {}".format(self._command_list_prefix(), src, dest)]
return ["{} {} {} {}".format(self._command_list_prefix(), "cp", src, dest)]

def download_file_cmd(self, src, dest):
src = "'{}'".format(src + os.getenv('AZURE_STORAGE_SAS_TOKEN'))
dest = "'{}'".format(dest)
return ["{} {} {} {}".format(self._command_list_prefix(), src, dest, "--recursive")]
return ["{} {} {} {} {}".format(self._command_list_prefix(), "cp", src,
dest, "--recursive")]

def upload_dir_cmd(self, src, dest):
# azcopy will download the top-level directory as well as the contents without "/*".
src = "'{}'".format(os.path.join(src, '*'))
dest = "'{}'".format(dest + os.getenv('AZURE_STORAGE_SAS_TOKEN'))
return ["{} {} {} {}".format(self._command_list_prefix(), src, dest, "--recursive")]
return ["{} {} {} {} {}".format(self._command_list_prefix(), "cp", src,
dest, "--recursive")]

def download_dir_cmd(self, src, dest):
src = "'{}'".format(os.path.join(src, '*') + os.getenv('AZURE_STORAGE_SAS_TOKEN'))
dest = "'{}'".format(dest)
return ["{} {} {} {}".format(self._command_list_prefix(), src, dest, "--recursive")]
return ["{} {} {} {} {}".format(self._command_list_prefix(), "cp", src,
dest, "--recursive")]

def delete_obj_cmd(self, dest):
if dest is None or dest == '/' or dest == '':
raise BackupException("Destination needs to be well formed.")
dest = "'{}'".format(dest + os.getenv('AZURE_STORAGE_SAS_TOKEN'))
return ["{} {} {} {}".format(self._command_list_prefix(), "rm", dest, "--recursive=true")]


class GcsBackupStorage(AbstractBackupStorage):
Expand All @@ -385,6 +394,11 @@ def upload_dir_cmd(self, src, dest):
def download_dir_cmd(self, src, dest):
return self._command_list_prefix() + ["-m", "rsync", "-r", src, dest]

def delete_obj_cmd(self, dest):
if dest is None or dest == '/' or dest == '':
raise BackupException("Destination needs to be well formed.")
return self._command_list_prefix() + ["rm", "-r", dest]


class S3BackupStorage(AbstractBackupStorage):
def __init__(self, options):
Expand Down Expand Up @@ -418,6 +432,11 @@ def upload_dir_cmd(self, src, dest):
def download_dir_cmd(self, src, dest):
return self._command_list_prefix() + ["sync", "--no-check-md5", src, dest]

def delete_obj_cmd(self, dest):
if dest is None or dest == '/' or dest == '':
raise BackupException("Destination needs to be well formed.")
return self._command_list_prefix() + ["del", "-r", dest]


class NfsBackupStorage(AbstractBackupStorage):
def __init__(self, options):
Expand Down Expand Up @@ -452,6 +471,11 @@ def upload_dir_cmd(self, src, dest):
def download_dir_cmd(self, src, dest):
return self._command_list_prefix() + [src, dest]

def delete_obj_cmd(self, dest):
if dest is None or dest == '/' or dest == '':
raise BackupException("Destination needs to be well formed.")
return ["rm", "-rf", pipes.quote(dest)]


BACKUP_STORAGE_ABSTRACTIONS = {
S3BackupStorage.storage_type(): S3BackupStorage,
Expand Down Expand Up @@ -646,8 +670,8 @@ def parse_arguments(self):
default=S3BackupStorage.storage_type(),
help="Storage backing for backups, eg: s3, nfs, gcs, ..")
parser.add_argument(
'command', choices=['create', 'restore', 'restore_keys'],
help='Create or restore the backup from the provided backup location.')
'command', choices=['create', 'restore', 'restore_keys', 'delete'],
help='Create, restore or delete the backup from the provided backup location.')
parser.add_argument(
'--certs_dir', required=False,
help="The directory containing the certs for secure connections.")
Expand Down Expand Up @@ -743,6 +767,9 @@ def is_gcs(self):
def is_az(self):
return self.args.storage_type == AzBackupStorage.storage_type()

def is_nfs(self):
return self.args.storage_type == NfsBackupStorage.storage_type()

def is_k8s(self):
return self.args.k8s_config is not None

Expand Down Expand Up @@ -1547,6 +1574,13 @@ def download_encryption_key_file(self):
self.storage.download_file_cmd(key_file_src, self.args.restore_keys_destination)
)

def delete_bucket_obj(self):
del_cmd = self.storage.delete_obj_cmd(self.args.backup_location)
if self.is_nfs:
self.run_ssh_cmd(del_cmd, self.get_leader_master_ip())
else:
self.run_program(del_cmd)

def upload_metadata_and_checksum(self, src_path, dest_path):
"""
Upload metadata file and checksum file to the target backup location.
Expand Down Expand Up @@ -2025,7 +2059,14 @@ def restore_table(self):
logging.info('Restored backup successfully!')
print(json.dumps({"success": True}))

# At exit callbacks
def delete_backup(self):
"""
Delete the backup specified by the storage location.
"""
if self.args.backup_location:
self.delete_bucket_obj()
logging.info('Deleted backup %s successfully!', self.args.backup_location)
print(json.dumps({"success": True}))

def restore_keys(self):
"""
Expand All @@ -2037,6 +2078,7 @@ def restore_keys(self):
logging.info('Restored backup universe keys successfully!')
print(json.dumps({"success": True}))

# At exit callbacks
def cleanup_temporary_directory(self, tmp_dir):
"""
Callback run on exit to clean up temporary directories.
Expand Down Expand Up @@ -2074,6 +2116,8 @@ def run(self):
self.backup_table()
elif self.args.command == 'restore_keys':
self.restore_keys()
elif self.args.command == 'delete':
self.delete_backup()
else:
logging.error('Command was not specified')
print(json.dumps({"error": "Command was not specified"}))
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
/*
* Copyright 2019 YugaByte, Inc. and Contributors
*
* Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt
*/

package com.yugabyte.yw.commissioner.tasks;

import com.fasterxml.jackson.databind.JsonNode;
import com.yugabyte.yw.commissioner.AbstractTaskBase;
import com.yugabyte.yw.common.ShellProcessHandler;
import com.yugabyte.yw.common.TableManager;
import com.yugabyte.yw.forms.AbstractTaskParams;
import com.yugabyte.yw.forms.BackupTableParams;
import com.yugabyte.yw.forms.ITaskParams;
import com.yugabyte.yw.models.Backup;
import com.yugabyte.yw.models.Universe;
import play.api.Play;
import play.libs.Json;

import java.util.UUID;


public class DeleteBackup extends AbstractTaskBase {

public static class Params extends AbstractTaskParams {
public UUID customerUUID;
public UUID backupUUID;
}

public Params params() {
return (Params)taskParams;
}
private TableManager tableManager;

@Override
public void initialize(ITaskParams params) {
super.initialize(params);
tableManager = Play.current().injector().instanceOf(TableManager.class);
}

@Override
public void run() {
try {
Backup backup = Backup.get(params().customerUUID, params().backupUUID);
if (backup.state != Backup.BackupState.Completed) {
LOG.error("Cannot delete backup in any other state other than completed.");
throw new RuntimeException("Backup cannot be deleted");
}
backup.transitionState(Backup.BackupState.Deleted);
BackupTableParams backupParams = Json.fromJson(backup.backupInfo, BackupTableParams.class);
backupParams.actionType = BackupTableParams.ActionType.DELETE;
ShellProcessHandler.ShellResponse response = tableManager.deleteBackup(backupParams);
JsonNode jsonNode = Json.parse(response.message);
if (response.code != 0 || jsonNode.has("error")) {
LOG.error("Delete Backup failed. Response code={}, hasError={}.",
response.code, jsonNode.has("error"));
throw new RuntimeException(response.message);
} else {
LOG.info("[" + getName() + "] STDOUT: " + response.message);
}
} catch (Exception e) {
LOG.error("Errored out with: " + e);
throw new RuntimeException(e);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -206,6 +206,7 @@ public void run() {
tableBackupParams.sse = params().sse;
tableBackupParams.parallelism = params().parallelism;
tableBackupParams.transactionalBackup = params().transactionalBackup;
tableBackupParams.scheduleUUID = params().scheduleUUID;
Backup backup = Backup.create(params().customerUUID, tableBackupParams);

for (BackupTableParams backupParams : backupParamsList) {
Expand Down Expand Up @@ -259,6 +260,7 @@ private void populateBackupParams(BackupTableParams backupParams,
backupParams.universeUUID = params().universeUUID;
backupParams.sse = params().sse;
backupParams.parallelism = params().parallelism;
backupParams.scheduleUUID = params().scheduleUUID;
backupParams.keyspace = tableKeySpace;
backupParams.transactionalBackup = params().transactionalBackup;

Expand Down Expand Up @@ -301,6 +303,7 @@ private BackupTableParams createBackupParams(String tableKeySpace, String tableN
backupParams.universeUUID = params().universeUUID;
backupParams.sse = params().sse;
backupParams.parallelism = params().parallelism;
backupParams.scheduleUUID = params().scheduleUUID;
return backupParams;
}
}
Loading

0 comments on commit 685d448

Please sign in to comment.