Skip to content

Commit

Permalink
Log more information
Browse files Browse the repository at this point in the history
Signed-off-by: Parthvi <parthvi.vala@suse.com>
  • Loading branch information
valaparthvi committed Jun 11, 2024
1 parent 0a14a6a commit c2c3f14
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 4 deletions.
6 changes: 3 additions & 3 deletions controller/eks-cluster-config-handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ func (h *Handler) OnEksConfigRemoved(_ string, config *eksv1.EKSClusterConfig) (
return config, nil
}

logrus.Infof("deleting cluster [%s (id: %s)]", config.Name)
logrus.Infof("deleting cluster [%s (id: %s)]", config.Spec.DisplayName, config.Name)

logrus.Infof("starting node group deletion for config [%s (id: %s)]", config.Spec.DisplayName, config.Name)
waitingForNodegroupDeletion := true
Expand Down Expand Up @@ -336,7 +336,7 @@ func validateUpdate(config *eksv1.EKSClusterConfig) error {
if _, ok := nodeGroupNames[aws.ToString(ng.NodegroupName)]; !ok {
nodeGroupNames[aws.ToString(ng.NodegroupName)] = struct{}{}
} else {
errs = append(errs, fmt.Sprintf("node group names must be unique within the [%s (%s)] cluster to avoid duplication", config.Spec.DisplayName, config.Name))
errs = append(errs, fmt.Sprintf("node group name %s must be unique within the [%s (%s)] cluster to avoid duplication", aws.ToString(ng.NodegroupName), config.Spec.DisplayName, config.Name))
}

if ng.Version == nil {
Expand Down Expand Up @@ -505,7 +505,7 @@ func (h *Handler) validateCreate(ctx context.Context, config *eksv1.EKSClusterCo
}
}
if ng.NodegroupName == nil {
return fmt.Errorf(cannotBeNilError, "name", *ng.NodegroupName, config.Name)
return fmt.Errorf(cannotBeNilError, "name", *ng.NodegroupName, config.Spec.DisplayName, config.Name)
}
if nodeP[*ng.NodegroupName] {
return fmt.Errorf("node group name [%s] must be unique within the [%s (id: %s)] cluster to avoid duplication", *ng.NodegroupName, config.Spec.DisplayName, config.Name)
Expand Down
21 changes: 20 additions & 1 deletion pkg/eks/update.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,8 @@ type UpdateClusterVersionOpts struct {
func UpdateClusterVersion(ctx context.Context, opts *UpdateClusterVersionOpts) (bool, error) {
updated := false
if aws.ToString(opts.UpstreamClusterSpec.KubernetesVersion) != aws.ToString(opts.Config.Spec.KubernetesVersion) {
logrus.Infof("updating kubernetes version for cluster [%s (id: %s)]", opts.Config.Spec.DisplayName, opts.Config.Name)
logrus.Infof("updating kubernetes version to %s for cluster [%s (id: %s)]", aws.ToString(opts.Config.Spec.KubernetesVersion), opts.Config.Spec.DisplayName, opts.Config.Name)
logrus.Debugf("config: %s, upstream: %s", aws.ToString(opts.Config.Spec.KubernetesVersion), aws.ToString(opts.UpstreamClusterSpec.KubernetesVersion))
_, err := opts.EKSService.UpdateClusterVersion(ctx, &eks.UpdateClusterVersionInput{
Name: aws.String(opts.Config.Spec.DisplayName),
Version: opts.Config.Spec.KubernetesVersion,
Expand All @@ -52,6 +53,9 @@ type UpdateResourceTagsOpts struct {
func UpdateResourceTags(ctx context.Context, opts *UpdateResourceTagsOpts) (bool, error) {
updated := false
if updateTags := utils.GetKeyValuesToUpdate(opts.Tags, opts.UpstreamTags); updateTags != nil {
logrus.Infof("updating resource tags to %s for cluster [%s]", opts.Tags, opts.ClusterName)
logrus.Debugf("config: %s, upstream: %s", opts.Tags, opts.UpstreamTags)

_, err := opts.EKSService.TagResource(ctx,
&eks.TagResourceInput{
ResourceArn: aws.String(opts.ResourceARN),
Expand All @@ -64,6 +68,9 @@ func UpdateResourceTags(ctx context.Context, opts *UpdateResourceTagsOpts) (bool
}

if updateUntags := utils.GetKeysToDelete(opts.Tags, opts.UpstreamTags); updateUntags != nil {
logrus.Infof("deleting resource tags %s from cluster [%s]", opts.Tags, opts.ClusterName)
logrus.Debugf("config: %s, upstream: %s", opts.Tags, opts.UpstreamTags)

_, err := opts.EKSService.UntagResource(ctx,
&eks.UntagResourceInput{
ResourceArn: aws.String(opts.ResourceARN),
Expand All @@ -87,6 +94,10 @@ type UpdateLoggingTypesOpts struct {
func UpdateClusterLoggingTypes(ctx context.Context, opts *UpdateLoggingTypesOpts) (bool, error) {
updated := false
if loggingTypesUpdate := getLoggingTypesUpdate(opts.Config.Spec.LoggingTypes, opts.UpstreamClusterSpec.LoggingTypes); loggingTypesUpdate != nil {

Check failure on line 96 in pkg/eks/update.go

View workflow job for this annotation

GitHub Actions / lint

unnecessary leading newline (whitespace)

logrus.Infof("updating logging types to %s for cluster [%s (id: %s)]", opts.Config.Spec.LoggingTypes, opts.Config.Spec.DisplayName, opts.Config.Name)
logrus.Debugf("config: %s, upstream: %s", opts.Config.Spec.LoggingTypes, opts.UpstreamClusterSpec.LoggingTypes)

_, err := opts.EKSService.UpdateClusterConfig(ctx,
&eks.UpdateClusterConfigInput{
Name: aws.String(opts.Config.Spec.DisplayName),
Expand Down Expand Up @@ -114,6 +125,11 @@ func UpdateClusterAccess(ctx context.Context, opts *UpdateClusterAccessOpts) (bo
publicAccessUpdate := opts.Config.Spec.PublicAccess != nil && aws.ToBool(opts.UpstreamClusterSpec.PublicAccess) != aws.ToBool(opts.Config.Spec.PublicAccess)
privateAccessUpdate := opts.Config.Spec.PrivateAccess != nil && aws.ToBool(opts.UpstreamClusterSpec.PrivateAccess) != aws.ToBool(opts.Config.Spec.PrivateAccess)
if publicAccessUpdate || privateAccessUpdate {

Check failure on line 127 in pkg/eks/update.go

View workflow job for this annotation

GitHub Actions / lint

unnecessary leading newline (whitespace)

logrus.Infof("updating public access to %v and private access to %v for cluster [%s (id: %s)]", aws.ToBool(opts.Config.Spec.PublicAccess), aws.ToBool(opts.Config.Spec.PrivateAccess), opts.Config.Spec.DisplayName, opts.Config.Name)
logrus.Debugf("[public access] config: %v, upstream: %v", aws.ToBool(opts.Config.Spec.PublicAccess), aws.ToBool(opts.UpstreamClusterSpec.PublicAccess))
logrus.Debugf("[private access] config: %v, upstream: %v", aws.ToBool(opts.Config.Spec.PrivateAccess), aws.ToBool(opts.UpstreamClusterSpec.PrivateAccess))

// public and private access updates need to be sent together. When they are sent one at a time
// the request may be denied due to having both public and private access disabled.
_, err := opts.EKSService.UpdateClusterConfig(ctx,
Expand Down Expand Up @@ -147,6 +163,8 @@ func UpdateClusterPublicAccessSources(ctx context.Context, opts *UpdateClusterPu
filteredSpecPublicAccessSources := filterPublicAccessSources(opts.Config.Spec.PublicAccessSources)
filteredUpstreamPublicAccessSources := filterPublicAccessSources(opts.UpstreamClusterSpec.PublicAccessSources)
if !utils.CompareStringSliceElements(filteredSpecPublicAccessSources, filteredUpstreamPublicAccessSources) {
logrus.Infof("updating public access source config to %v for cluster [%s (id: %s)]", opts.Config.Spec.PublicAccessSources, opts.Config.Spec.DisplayName, opts.Config.Name)
logrus.Debugf("config: %v, upstream: %v", opts.Config.Spec.PublicAccessSources, opts.UpstreamClusterSpec.PublicAccessSources)
_, err := opts.EKSService.UpdateClusterConfig(ctx,
&eks.UpdateClusterConfigInput{
Name: aws.String(opts.Config.Spec.DisplayName),
Expand Down Expand Up @@ -175,6 +193,7 @@ type UpdateNodegroupVersionOpts struct {
}

func UpdateNodegroupVersion(ctx context.Context, opts *UpdateNodegroupVersionOpts) error {
logrus.Infof("updating nodegroup version for cluster [%s (id: %s)]", opts.Config.Spec.DisplayName, opts.Config.Name)
if _, err := opts.EKSService.UpdateNodegroupVersion(ctx, opts.NGVersionInput); err != nil {
if version, ok := opts.LTVersions[aws.ToString(opts.NodeGroup.NodegroupName)]; ok {
// If there was an error updating the node group and a Rancher-managed launch template version was created,
Expand Down

0 comments on commit c2c3f14

Please sign in to comment.