diff --git a/src/connectedk8s/HISTORY.rst b/src/connectedk8s/HISTORY.rst index 8c34bccfff8..7cde55af4dc 100644 --- a/src/connectedk8s/HISTORY.rst +++ b/src/connectedk8s/HISTORY.rst @@ -3,6 +3,11 @@ Release History =============== -0.1.0 +0.2.0 +++++++ +* `az connectedk8s connect`: Added telemetry. +* `az connectedk8s delete`: Added telemetry. + +0.1.5 ++++++ * Initial release. \ No newline at end of file diff --git a/src/connectedk8s/azext_connectedk8s/custom.py b/src/connectedk8s/azext_connectedk8s/custom.py index d7f3033261a..382357541f5 100644 --- a/src/connectedk8s/azext_connectedk8s/custom.py +++ b/src/connectedk8s/azext_connectedk8s/custom.py @@ -5,7 +5,6 @@ import os import json -import uuid import time import subprocess from subprocess import Popen, PIPE @@ -17,6 +16,7 @@ from azure.cli.core.commands.client_factory import get_subscription_id from azure.cli.core.util import sdk_no_wait from azure.cli.core._profile import Profile +from azure.cli.core import telemetry from azext_connectedk8s._client_factory import _graph_client_factory from azext_connectedk8s._client_factory import cf_resource_groups from azext_connectedk8s._client_factory import _resource_client_factory @@ -31,6 +31,33 @@ logger = get_logger(__name__) +Invalid_Location_Fault_Type = 'location-validation-error' +Load_Kubeconfig_Fault_Type = 'kubeconfig-load-error' +Read_ConfigMap_Fault_Type = 'configmap-read-error' +Create_ConnectedCluster_Fault_Type = 'connected-cluster-create-error' +Delete_ConnectedCluster_Fault_Type = 'connected-cluster-delete-error' +Bad_DeleteRequest_Fault_Type = 'bad-delete-request-error' +Cluster_Already_Onboarded_Fault_Type = 'cluster-already-onboarded-error' +Resource_Already_Exists_Fault_Type = 'resource-already-exists-error' +Create_ResourceGroup_Fault_Type = 'resource-group-creation-error' +Add_HelmRepo_Fault_Type = 'helm-repo-add-error' +List_HelmRelease_Fault_Type = 'helm-list-release-error' +KeyPair_Generate_Fault_Type = 'keypair-generation-error' +PublicKey_Export_Fault_Type = 'publickey-export-error' +PrivateKey_Export_Fault_Type = 'privatekey-export-error' +Install_HelmRelease_Fault_Type = 'helm-release-install-error' +Delete_HelmRelease_Fault_Type = 'helm-release-delete-error' +Check_PodStatus_Fault_Type = 'check-pod-status-error' +Kubernetes_Connectivity_FaultType = 'kubernetes-cluster-connection-error' +Helm_Version_Fault_Type = 'helm-not-updated-error' +Check_HelmVersion_Fault_Type = 'helm-version-check-error' +Helm_Installation_Fault_Type = 'helm-not-installed-error' +Check_HelmInstallation_Fault_Type = 'check-helm-installed-error' +Get_HelmRegistery_Path_Fault_Type = 'helm-registry-path-fetch-error' +Pull_HelmChart_Fault_Type = 'helm-chart-pull-error' +Export_HelmChart_Fault_Type = 'helm-chart-export-error' +Get_Kubernetes_Version_Fault_Type = 'kubernetes-get-version-error' + # pylint:disable=unused-argument # pylint: disable=too-many-locals @@ -62,6 +89,9 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, location try: config.load_kube_config(config_file=kube_config, context=kube_context) except Exception as e: + telemetry.set_user_fault() + telemetry.set_exception(exception=e, fault_type=Load_Kubeconfig_Fault_Type, + summary='Problem loading the kubeconfig file') raise CLIError("Problem loading the kubeconfig file." + str(e)) configuration = kube_client.Configuration() @@ -70,11 +100,21 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, location # if the user had not logged in. check_kube_connection(configuration) + # Get kubernetes cluster info for telemetry + kubernetes_version = get_server_version(configuration) + kubernetes_distro = 'default' + kubernetes_properties = { + 'Context.Default.AzureCLI.KubernetesVersion': kubernetes_version, + 'Context.Default.AzureCLI.KubernetesDistro': kubernetes_distro + } + telemetry.add_extension_event('connectedk8s', kubernetes_properties) + # Checking helm installation check_helm_install(kube_config, kube_context) # Check helm version - check_helm_version(kube_config, kube_context) + helm_version = check_helm_version(kube_config, kube_context) + telemetry.add_extension_event('connectedk8s', {'Context.Default.AzureCLI.HelmVersion': helm_version}) # Validate location rp_locations = [] @@ -84,6 +124,9 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, location if resourceTypes.resource_type == 'connectedClusters': rp_locations = [location.replace(" ", "").lower() for location in resourceTypes.locations] if location.lower() not in rp_locations: + telemetry.set_user_fault() + telemetry.set_exception(exception='Location not supported', fault_type=Invalid_Location_Fault_Type, + summary='Provided location is not supported for creating connected clusters') raise CLIError("Connected cluster resource creation is supported only in the following locations: " + ', '.join(map(str, rp_locations)) + ". Use the --location flag to specify one of these locations.") @@ -97,6 +140,8 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, location try: configmap = api_instance.read_namespaced_config_map('azure-clusterconfig', 'azure-arc') except Exception as e: # pylint: disable=broad-except + telemetry.set_exception(exception=e, fault_type=Read_ConfigMap_Fault_Type, + summary='Unable to read ConfigMap') raise CLIError("Unable to read ConfigMap 'azure-clusterconfig' in 'azure-arc' namespace: %s\n" % e) configmap_rg_name = configmap.data["AZURE_RESOURCE_GROUP"] configmap_cluster_name = configmap.data["AZURE_RESOURCE_NAME"] @@ -111,9 +156,14 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, location return sdk_no_wait(no_wait, client.create, resource_group_name=resource_group_name, cluster_name=cluster_name, connected_cluster=cc) except CloudError as ex: + telemetry.set_exception(exception=ex, fault_type=Create_ConnectedCluster_Fault_Type, + summary='Unable to create connected cluster resource') raise CLIError(ex) else: - raise CLIError("The kubernetes cluster you are trying to onboard" + + telemetry.set_user_fault() + telemetry.set_exception(exception='The kubernetes cluster is already onboarded', fault_type=Cluster_Already_Onboarded_Fault_Type, + summary='Kubernetes cluster already onboarded') + raise CLIError("The kubernetes cluster you are trying to onboard " + "is already onboarded to the resource group" + " '{}' with resource name '{}'.".format(configmap_rg_name, configmap_cluster_name)) else: @@ -121,6 +171,9 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, location delete_arc_agents(release_namespace, kube_config, kube_context, configuration) else: if connected_cluster_exists(client, resource_group_name, cluster_name): + telemetry.set_user_fault() + telemetry.set_exception(exception='The connected cluster resource already exists', fault_type=Resource_Already_Exists_Fault_Type, + summary='Connected cluster resource already exists') raise CLIError("The connected cluster resource {} already exists ".format(cluster_name) + "in the resource group {} ".format(resource_group_name) + "and corresponds to a different Kubernetes cluster. To onboard this Kubernetes cluster" + @@ -132,6 +185,8 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, location try: resourceClient.resource_groups.create_or_update(resource_group_name, resource_group_params) except Exception as e: + telemetry.set_exception(exception=e, fault_type=Create_ResourceGroup_Fault_Type, + summary='Failed to create the resource group') raise CLIError("Failed to create the resource group {} :".format(resource_group_name) + str(e)) # Adding helm repo @@ -144,32 +199,44 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, location response_helm_repo = Popen(cmd_helm_repo, stdout=PIPE, stderr=PIPE) _, error_helm_repo = response_helm_repo.communicate() if response_helm_repo.returncode != 0: + telemetry.set_exception(exception=error_helm_repo.decode("ascii"), fault_type=Add_HelmRepo_Fault_Type, + summary='Failed to add helm repository') raise CLIError("Unable to add repository {} to helm: ".format(repo_url) + error_helm_repo.decode("ascii")) # Retrieving Helm chart OCI Artifact location - registery_path = get_helm_registery(profile, location) + registry_path = os.getenv('HELMREGISTRY') if os.getenv('HELMREGISTRY') else get_helm_registry(profile, location) + + # Get azure-arc agent version for telemetry + azure_arc_agent_version = registry_path.split(':')[1] + telemetry.add_extension_event('connectedk8s', {'Context.Default.AzureCLI.AgentVersion': azure_arc_agent_version}) - # Pulling helm chart from registery + # Pulling helm chart from registry os.environ['HELM_EXPERIMENTAL_OCI'] = '1' - pull_helm_chart(registery_path, kube_config, kube_context) + pull_helm_chart(registry_path, kube_config, kube_context) # Exporting helm chart chart_export_path = os.path.join(os.path.expanduser('~'), '.azure', 'AzureArcCharts') - export_helm_chart(registery_path, chart_export_path, kube_config, kube_context) + export_helm_chart(registry_path, chart_export_path, kube_config, kube_context) # Generate public-private key pair try: key_pair = RSA.generate(4096) except Exception as e: + telemetry.set_exception(exception=e, fault_type=KeyPair_Generate_Fault_Type, + summary='Failed to generate public-private key pair') raise CLIError("Failed to generate public-private key pair. " + str(e)) try: public_key = get_public_key(key_pair) except Exception as e: - raise CLIError("Failed to generate public key." + str(e)) + telemetry.set_exception(exception=e, fault_type=PublicKey_Export_Fault_Type, + summary='Failed to export public key') + raise CLIError("Failed to export public key." + str(e)) try: private_key_pem = get_private_key(key_pair) except Exception as e: - raise CLIError("Failed to generate private key." + str(e)) + telemetry.set_exception(exception=e, fault_type=PrivateKey_Export_Fault_Type, + summary='Failed to export private key') + raise CLIError("Failed to export private key." + str(e)) # Helm Install helm_chart_path = os.path.join(chart_export_path, 'azure-arc-k8sagents') @@ -188,6 +255,8 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, location response_helm_install = Popen(cmd_helm_install, stdout=PIPE, stderr=PIPE) _, error_helm_install = response_helm_install.communicate() if response_helm_install.returncode != 0: + telemetry.set_exception(exception=error_helm_install.decode("ascii"), fault_type=Install_HelmRelease_Fault_Type, + summary='Unable to install helm release') raise CLIError("Unable to install helm release: " + error_helm_install.decode("ascii")) # Create connected cluster resource @@ -199,6 +268,8 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, location if no_wait: return put_cc_response except CloudError as ex: + telemetry.set_exception(exception=ex, fault_type=Create_ConnectedCluster_Fault_Type, + summary='Unable to create connected cluster resource') raise CLIError(ex) # Getting total number of pods scheduled to run in azure-arc namespace @@ -209,6 +280,8 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, location try: check_pod_status(pod_dict) except Exception as e: # pylint: disable=broad-except + telemetry.set_exception(exception=e, fault_type=Check_PodStatus_Fault_Type, + summary='Failed to check arc agent pods statuses') logger.warning("Failed to check arc agent pods statuses: %s", e) return put_cc_response @@ -234,6 +307,9 @@ def check_kube_connection(configuration): try: api_instance.get_api_resources() except Exception as e: + telemetry.set_user_fault() + telemetry.set_exception(exception=e, fault_type=Kubernetes_Connectivity_FaultType, + summary='Unable to verify connectivity to the Kubernetes cluster') logger.warning("Unable to verify connectivity to the Kubernetes cluster: %s\n", e) raise CLIError("If you are using AAD Enabled cluster, " + "verify that you are able to access the cluster. Learn more at " + @@ -249,10 +325,18 @@ def check_helm_install(kube_config, kube_context): _, error_helm_installed = response_helm_installed.communicate() if response_helm_installed.returncode != 0: if "unknown flag" in error_helm_installed.decode("ascii"): + telemetry.set_user_fault() + telemetry.set_exception(exception='Helm 3 not found', fault_type=Helm_Version_Fault_Type, + summary='Helm3 not found on the machine') raise CLIError("Please install the latest version of Helm. " + "Learn more at https://aka.ms/arc/k8s/onboarding-helm-install") + telemetry.set_user_fault() + telemetry.set_exception(exception=error_helm_installed.decode("ascii"), fault_type=Helm_Installation_Fault_Type, + summary='Helm3 not installed on the machine') raise CLIError(error_helm_installed.decode("ascii")) - except FileNotFoundError: + except FileNotFoundError as e: + telemetry.set_exception(exception=e, fault_type=Check_HelmInstallation_Fault_Type, + summary='Unable to verify helm installation') raise CLIError("Helm is not installed or requires elevated permissions. " + "Ensure that you have the latest version of Helm installed on your machine. " + "Learn more at https://aka.ms/arc/k8s/onboarding-helm-install") @@ -268,11 +352,17 @@ def check_helm_version(kube_config, kube_context): response_helm_version = Popen(cmd_helm_version, stdout=PIPE, stderr=PIPE) output_helm_version, error_helm_version = response_helm_version.communicate() if response_helm_version.returncode != 0: + telemetry.set_exception(exception=error_helm_version.decode('ascii'), fault_type=Check_HelmVersion_Fault_Type, + summary='Unable to determine helm version') raise CLIError("Unable to determine helm version: " + error_helm_version.decode("ascii")) if "v2" in output_helm_version.decode("ascii"): + telemetry.set_user_fault() + telemetry.set_exception(exception='Helm 3 not found', fault_type=Helm_Version_Fault_Type, + summary='Helm3 not found on the machine') raise CLIError("Helm version 3+ is required. " + "Ensure that you have installed the latest version of Helm. " + "Learn more at https://aka.ms/arc/k8s/onboarding-helm-install") + return output_helm_version.decode('ascii') def resource_group_exists(ctx, resource_group_name, subscription_id=None): @@ -294,44 +384,52 @@ def connected_cluster_exists(client, resource_group_name, cluster_name): return True -def get_helm_registery(profile, location): +def get_helm_registry(profile, location): cred, _, _ = profile.get_login_credentials( resource='https://management.core.windows.net/') token = cred._token_retriever()[2].get('accessToken') # pylint: disable=protected-access get_chart_location_url = "https://{}.dp.kubernetesconfiguration.azure.com/{}/GetLatestHelmPackagePath?api-version=2019-11-01-preview".format(location, 'azure-arc-k8sagents') query_parameters = {} - query_parameters['releaseTrain'] = 'stable' + query_parameters['releaseTrain'] = os.getenv('RELEASETRAIN') if os.getenv('RELEASETRAIN') else 'stable' header_parameters = {} header_parameters['Authorization'] = "Bearer {}".format(str(token)) try: response = requests.post(get_chart_location_url, params=query_parameters, headers=header_parameters) except Exception as e: - raise CLIError("Error while fetching helm chart registery path: " + str(e)) + telemetry.set_exception(exception=e, fault_type=Get_HelmRegistery_Path_Fault_Type, + summary='Error while fetching helm chart registry path') + raise CLIError("Error while fetching helm chart registry path: " + str(e)) if response.status_code == 200: return response.json().get('repositoryPath') - raise CLIError("Error while fetching helm chart registery path: {}".format(str(response.json()))) + telemetry.set_exception(exception=str(response.json()), fault_type=Get_HelmRegistery_Path_Fault_Type, + summary='Error while fetching helm chart registry path') + raise CLIError("Error while fetching helm chart registry path: {}".format(str(response.json()))) -def pull_helm_chart(registery_path, kube_config, kube_context): - cmd_helm_chart_pull = ["helm", "chart", "pull", registery_path, "--kubeconfig", kube_config] +def pull_helm_chart(registry_path, kube_config, kube_context): + cmd_helm_chart_pull = ["helm", "chart", "pull", registry_path, "--kubeconfig", kube_config] if kube_context: cmd_helm_chart_pull.extend(["--kube-context", kube_context]) response_helm_chart_pull = subprocess.Popen(cmd_helm_chart_pull, stdout=PIPE, stderr=PIPE) _, error_helm_chart_pull = response_helm_chart_pull.communicate() if response_helm_chart_pull.returncode != 0: - raise CLIError("Unable to pull helm chart from the registery '{}': ".format(registery_path) + error_helm_chart_pull.decode("ascii")) + telemetry.set_exception(exception=error_helm_chart_pull.decode("ascii"), fault_type=Pull_HelmChart_Fault_Type, + summary='Unable to pull helm chart from the registry') + raise CLIError("Unable to pull helm chart from the registry '{}': ".format(registry_path) + error_helm_chart_pull.decode("ascii")) -def export_helm_chart(registery_path, chart_export_path, kube_config, kube_context): +def export_helm_chart(registry_path, chart_export_path, kube_config, kube_context): chart_export_path = os.path.join(os.path.expanduser('~'), '.azure', 'AzureArcCharts') - cmd_helm_chart_export = ["helm", "chart", "export", registery_path, "--destination", chart_export_path, "--kubeconfig", kube_config] + cmd_helm_chart_export = ["helm", "chart", "export", registry_path, "--destination", chart_export_path, "--kubeconfig", kube_config] if kube_context: cmd_helm_chart_export.extend(["--kube-context", kube_context]) response_helm_chart_export = subprocess.Popen(cmd_helm_chart_export, stdout=PIPE, stderr=PIPE) _, error_helm_chart_export = response_helm_chart_export.communicate() if response_helm_chart_export.returncode != 0: - raise CLIError("Unable to export helm chart from the registery '{}': ".format(registery_path) + error_helm_chart_export.decode("ascii")) + telemetry.set_exception(exception=error_helm_chart_export.decode("ascii"), fault_type=Export_HelmChart_Fault_Type, + summary='Unable to export helm chart from the registry') + raise CLIError("Unable to export helm chart from the registry '{}': ".format(registry_path) + error_helm_chart_export.decode("ascii")) def get_public_key(key_pair): @@ -346,39 +444,18 @@ def get_private_key(key_pair): return PEM.encode(privKey_DER, "RSA PRIVATE KEY") -def get_node_count(configuration): - api_instance = kube_client.CoreV1Api(kube_client.ApiClient(configuration)) - try: - api_response = api_instance.list_node() - return len(api_response.items) - except Exception as e: # pylint: disable=broad-except - logger.warning("Exception while fetching nodes: %s\n", e) - - def get_server_version(configuration): api_instance = kube_client.VersionApi(kube_client.ApiClient(configuration)) try: api_response = api_instance.get_code() return api_response.git_version except Exception as e: # pylint: disable=broad-except + telemetry.set_exception(exception=e, fault_type=Get_Kubernetes_Version_Fault_Type, + summary='Unable to fetch kubernetes version') logger.warning("Unable to fetch kubernetes version: %s\n", e) -def get_agent_version(configuration): - api_instance = kube_client.CoreV1Api(kube_client.ApiClient(configuration)) - try: - api_response = api_instance.read_namespaced_config_map('azure-clusterconfig', 'azure-arc') - return api_response.data["AZURE_ARC_AGENT_VERSION"] - except Exception as e: # pylint: disable=broad-except - logger.warning("Unable to read ConfigMap 'azure-clusterconfig' in 'azure-arc' namespace: %s\n", e) - - def generate_request_payload(configuration, location, public_key, tags): - # Fetch cluster info - total_node_count = get_node_count(configuration) - kubernetes_version = get_server_version(configuration) - azure_arc_agent_version = get_agent_version(configuration) - # Create connected cluster resource object aad_profile = ConnectedClusterAADProfile( tenant_id="", @@ -395,9 +472,6 @@ def generate_request_payload(configuration, location, public_key, tags): identity=identity, agent_public_key_certificate=public_key, aad_profile=aad_profile, - kubernetes_version=kubernetes_version, - total_node_count=total_node_count, - agent_version=azure_arc_agent_version, tags=tags ) return cc @@ -439,6 +513,7 @@ def check_pod_status(pod_dict): "Run 'kubectl get pods -n azure-arc' to check the pod status.") if all(ele == 1 for ele in list(pod_dict.values())): return + telemetry.add_extension_event('connectedk8s', {'Context.Default.AzureCLI.ExitStatus': 'Timedout'}) logger.warning("%s%s", 'The pods were unable to start before timeout. ', 'Please run "kubectl get pods -n azure-arc" to ensure if the pods are in running state.') @@ -468,6 +543,9 @@ def delete_connectedk8s(cmd, client, resource_group_name, cluster_name, try: config.load_kube_config(config_file=kube_config, context=kube_context) except Exception as e: + telemetry.set_user_fault() + telemetry.set_exception(exception=e, fault_type=Load_Kubeconfig_Fault_Type, + summary='Problem loading the kubeconfig file') raise CLIError("Problem loading the kubeconfig file." + str(e)) configuration = kube_client.Configuration() @@ -493,12 +571,17 @@ def delete_connectedk8s(cmd, client, resource_group_name, cluster_name, try: configmap = api_instance.read_namespaced_config_map('azure-clusterconfig', 'azure-arc') except Exception as e: # pylint: disable=broad-except - logger.warning("Unable to read ConfigMap 'azure-clusterconfig' in 'azure-arc' namespace: %s\n", e) + telemetry.set_exception(exception=e, fault_type=Read_ConfigMap_Fault_Type, + summary='Unable to read ConfigMap') + raise CLIError("Unable to read ConfigMap 'azure-clusterconfig' in 'azure-arc' namespace: %s\n" % e) if (configmap.data["AZURE_RESOURCE_GROUP"].lower() == resource_group_name.lower() and configmap.data["AZURE_RESOURCE_NAME"].lower() == cluster_name.lower()): delete_cc_resource(client, resource_group_name, cluster_name, no_wait) else: + telemetry.set_user_fault() + telemetry.set_exception(exception='Unable to delete connected cluster', fault_type=Bad_DeleteRequest_Fault_Type, + summary='The resource cannot be deleted as kubernetes cluster is onboarded with some other resource id') raise CLIError("The current context in the kubeconfig file does not correspond " + "to the connected cluster resource specified. Agents installed on this cluster correspond " + "to the resource group name '{}' ".format(configmap.data["AZURE_RESOURCE_GROUP"]) + @@ -515,6 +598,8 @@ def get_release_namespace(kube_config, kube_context): response_helm_release = Popen(cmd_helm_release, stdout=PIPE, stderr=PIPE) output_helm_release, error_helm_release = response_helm_release.communicate() if response_helm_release.returncode != 0: + telemetry.set_exception(exception=error_helm_release.decode("ascii"), fault_type=List_HelmRelease_Fault_Type, + summary='Unable to list helm release') raise CLIError("Helm list release failed: " + error_helm_release.decode("ascii")) output_helm_release = output_helm_release.decode("ascii") output_helm_release = json.loads(output_helm_release) @@ -530,6 +615,8 @@ def delete_cc_resource(client, resource_group_name, cluster_name, no_wait): resource_group_name=resource_group_name, cluster_name=cluster_name) except CloudError as ex: + telemetry.set_exception(exception=ex, fault_type=Delete_ConnectedCluster_Fault_Type, + summary='Unable to create connected cluster resource') raise CLIError(ex) @@ -540,6 +627,8 @@ def delete_arc_agents(release_namespace, kube_config, kube_context, configuratio response_helm_delete = Popen(cmd_helm_delete, stdout=PIPE, stderr=PIPE) _, error_helm_delete = response_helm_delete.communicate() if response_helm_delete.returncode != 0: + telemetry.set_exception(exception=error_helm_delete.decode("ascii"), fault_type=Delete_HelmRelease_Fault_Type, + summary='Unable to delete helm release') raise CLIError("Error occured while cleaning up arc agents. " + "Helm release deletion failed: " + error_helm_delete.decode("ascii")) ensure_namespace_cleanup(configuration) @@ -565,11 +654,3 @@ def update_connectedk8s(cmd, instance, tags=None): with cmd.update_context(instance) as c: c.set_param('tags', tags) return instance - - -def _is_guid(guid): - try: - uuid.UUID(guid) - return True - except ValueError: - return False diff --git a/src/connectedk8s/setup.py b/src/connectedk8s/setup.py index 2b549d2c1e5..07d924300a0 100644 --- a/src/connectedk8s/setup.py +++ b/src/connectedk8s/setup.py @@ -16,7 +16,7 @@ # TODO: Confirm this is the right version number you want and it matches your # HISTORY.rst entry. -VERSION = '0.1.5' +VERSION = '0.2.0' # The full list of classifiers is available at # https://pypi.python.org/pypi?%3Aaction=list_classifiers