Skip to content

Commit

Permalink
fixes for sqs offloading for sharing and copying regions
Browse files Browse the repository at this point in the history
  • Loading branch information
Guslington committed Jan 15, 2019
1 parent ce3e224 commit 39eea48
Showing 1 changed file with 14 additions and 14 deletions.
28 changes: 14 additions & 14 deletions shelvery/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -452,15 +452,16 @@ def do_copy_backup(self, map_args={}, **kwargs):
"""

kwargs.update(map_args)

backup_id = kwargs['BackupId']
origin_region = kwargs['OriginRegion']
# if backup is not available, exit and rely on recursive lambda call copy backup
# in non lambda mode this should never happen
if RuntimeConfig.is_offload_queueing(self):
if not self.is_backup_available(backup_region, backup_id):
self.store_backup_data(self.get_backup_resource(backup_region, backup_id))
if not self.is_backup_available(origin_region,backup_id):
self.copy_backup(self.get_backup_resource(origin_region, backup_id))
else:
if not self.wait_backup_available(backup_region=kwargs['OriginRegion'],
backup_id=kwargs['BackupId'],
if not self.wait_backup_available(backup_region=origin_region,
backup_id=backup_id,
lambda_method='do_copy_backup',
lambda_args=kwargs):
return
Expand Down Expand Up @@ -544,21 +545,21 @@ def do_copy_backup(self, map_args={}, **kwargs):
def do_share_backup(self, map_args={}, **kwargs):
"""Share backup with other AWS account, actual implementation"""
kwargs.update(map_args)

backup_id = kwargs['BackupId']
backup_region = kwargs['Region']
backup_resource = self.get_backup_resource(backup_region, backup_id)
# if backup is not available, exit and rely on recursive lambda call do share backup
# in non lambda mode this should never happen
if RuntimeConfig.is_offload_queueing(self):
if not self.is_backup_available(backup_region, backup_id):
self.store_backup_data(self.get_backup_resource(backup_region, backup_id))
self.share_backup(self.get_backup_resource(backup_region, backup_id))
else:
if not self.wait_backup_available(backup_region=kwargs['Region'],
backup_id=kwargs['BackupId'],
if not self.wait_backup_available(backup_region=backup_region,
backup_id=backup_id,
lambda_method='do_share_backup',
lambda_args=kwargs):
return

backup_region = kwargs['Region']
backup_id = kwargs['BackupId']
destination_account_id = kwargs['AwsAccountId']
self.logger.info(f"Do share backup {backup_id} ({backup_region}) with {destination_account_id}")
try:
Expand Down Expand Up @@ -615,20 +616,19 @@ def do_store_backup_data(self, map_args={}, **kwargs):
kwargs.update(map_args)
backup_id = kwargs['BackupId']
backup_region = kwargs['BackupRegion']

backup_resource = self.get_backup_resource(backup_region, backup_id)
# if backup is not available, exit and rely on recursive lambda call write metadata
# in non lambda mode this should never happen
if RuntimeConfig.is_offload_queueing(self):
if not self.is_backup_available(backup_region, backup_id):
self.store_backup_data(self.get_backup_resource(backup_region, backup_id))
self.store_backup_data(backup_resource)
else:
if not self.wait_backup_available(backup_region=backup_region,
backup_id=backup_id,
lambda_method='do_store_backup_data',
lambda_args=kwargs):
return

backup_resource = self.get_backup_resource(backup_region, backup_id)
if backup_resource.account_id is None:
backup_resource.account_id = self.account_id
bucket = self._get_data_bucket(backup_resource.region)
Expand Down

0 comments on commit 39eea48

Please sign in to comment.