Skip to content
This repository has been archived by the owner on Feb 3, 2021. It is now read-only.

Feature: add cluster list quiet flag, ability to compose with delete #581

Merged
merged 9 commits into from
Jun 6, 2018
29 changes: 16 additions & 13 deletions aztk_cli/spark/endpoints/cluster/cluster_delete.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@

def setup_parser(parser: argparse.ArgumentParser):
parser.add_argument('--id',
dest='cluster_id',
dest='cluster_ids',
nargs='*',
required=True,
help='The unique id of your spark cluster')
parser.add_argument('--force', '-f',
Expand All @@ -25,19 +26,21 @@ def setup_parser(parser: argparse.ArgumentParser):

def execute(args: typing.NamedTuple):
spark_client = aztk.spark.Client(config.load_aztk_secrets())
cluster_id = args.cluster_id
cluster_ids = args.cluster_ids

if not args.force:
if not args.keep_logs:
log.warn("All logs persisted for this cluster will be deleted.")
for cluster_id in cluster_ids:
if not args.force:
if not args.keep_logs:
log.warning("All logs persisted for this cluster will be deleted.")

confirmation_cluster_id = input("Please confirm the id of the cluster you wish to delete: ")
confirmation_cluster_id = input(
"Please confirm the id of the cluster you wish to delete [{}]: ".format(cluster_id))

if confirmation_cluster_id != cluster_id:
log.error("Confirmation cluster id does not match. Please try again.")
return
if confirmation_cluster_id != cluster_id:
log.error("Confirmation cluster id does not match. Please try again.")
return

if spark_client.delete_cluster(cluster_id, args.keep_logs):
log.info("Deleting cluster %s", cluster_id)
else:
log.error("Cluster with id '%s' doesn't exist or was already deleted.", cluster_id)
if spark_client.delete_cluster(cluster_id, args.keep_logs):
log.info("Deleting cluster %s", cluster_id)
else:
log.error("Cluster with id '%s' doesn't exist or was already deleted.", cluster_id)
17 changes: 12 additions & 5 deletions aztk_cli/spark/endpoints/cluster/cluster_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,19 @@
from aztk_cli import config, utils


def setup_parser(_: argparse.ArgumentParser):
# No arguments for list yet
pass
def setup_parser(parser: argparse.ArgumentParser):
parser.add_argument('-q', '--quiet',
dest='quiet',
required=False,
action='store_true',
help='The unique id of your spark cluster')
parser.set_defaults(quiet=False)


def execute(_: typing.NamedTuple):
def execute(args: typing.NamedTuple):
spark_client = aztk.spark.Client(config.load_aztk_secrets())
clusters = spark_client.list_clusters()
utils.print_clusters(clusters)
if args.quiet:
utils.print_clusters_quiet(clusters)
else:
utils.print_clusters(clusters)
2 changes: 1 addition & 1 deletion aztk_cli/spark/endpoints/job/delete.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def execute(args: typing.NamedTuple):
spark_client.get_job(job_id)

if not args.keep_logs:
log.warn("All logs persisted for this job will be deleted.")
log.warning("All logs persisted for this job will be deleted.")

confirmation_cluster_id = input("Please confirm the id of the cluster you wish to delete: ")

Expand Down
7 changes: 6 additions & 1 deletion aztk_cli/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def get_ssh_key_or_prompt(ssh_key, username, password, secrets_config):
ssh_key = get_ssh_key.get_user_public_key(ssh_key, secrets_config)

if username is not None and password is None and ssh_key is None:
log.warn("It is reccomended to use an SSH key for user creation instead of a password.")
log.warning("It is reccomended to use an SSH key for user creation instead of a password.")
for i in range(3):
if i > 0:
log.error("Please try again.")
Expand Down Expand Up @@ -119,6 +119,11 @@ def print_clusters(clusters: List[models.Cluster]):
)
)


def print_clusters_quiet(clusters: List[models.Cluster]):
print('\n'.join([str(cluster.id) for cluster in clusters]))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should we wait for the other PR and change to log.print



def stream_logs(client, cluster_id, application_name):
current_bytes = 0
while True:
Expand Down