diff --git a/tests/ci/config/config.go b/tests/ci/config/config.go index 263d7a026e..3af446a5a1 100644 --- a/tests/ci/config/config.go +++ b/tests/ci/config/config.go @@ -40,12 +40,13 @@ type TestConfig struct { GlobalENV *GlobalENVVariables } type GlobalENVVariables struct { - ChannelGroup string `env:"CHANNEL_GROUP" default:""` - Version string `env:"VERSION" default:""` - Region string `env:"REGION" default:""` - ProvisionShard string `env:"PROVISION_SHARD" default:""` - NamePrefix string `env:"NAME_PREFIX"` - ClusterWaitingTime int `env:"CLUSTER_TIMEOUT" default:"60"` + ChannelGroup string `env:"CHANNEL_GROUP" default:""` + Version string `env:"VERSION" default:""` + Region string `env:"REGION" default:""` + ProvisionShard string `env:"PROVISION_SHARD" default:""` + NamePrefix string `env:"NAME_PREFIX"` + ClusterWaitingTime int `env:"CLUSTER_TIMEOUT" default:"60"` + WaitSetupClusterReady bool `env:"WAIT_SETUP_CLUSTER_READY" default:"true"` } func init() { @@ -82,13 +83,15 @@ func init() { if err != nil { panic(fmt.Errorf("env variable CLUSTER_TIMEOUT must be set to an integer")) } + waitSetupClusterReady, _ := strconv.ParseBool(common.ReadENVWithDefaultValue("WAIT_SETUP_CLUSTER_READY", "true")) Test.GlobalENV = &GlobalENVVariables{ - ChannelGroup: os.Getenv("CHANNEL_GROUP"), - Version: os.Getenv("VERSION"), - Region: os.Getenv("REGION"), - ProvisionShard: os.Getenv("PROVISION_SHARD"), - NamePrefix: os.Getenv("NAME_PREFIX"), - ClusterWaitingTime: waitingTime, + ChannelGroup: os.Getenv("CHANNEL_GROUP"), + Version: os.Getenv("VERSION"), + Region: os.Getenv("REGION"), + ProvisionShard: os.Getenv("PROVISION_SHARD"), + NamePrefix: os.Getenv("NAME_PREFIX"), + ClusterWaitingTime: waitingTime, + WaitSetupClusterReady: waitSetupClusterReady, } } diff --git a/tests/e2e/e2e_setup_test.go b/tests/e2e/e2e_setup_test.go index 3864894e82..b8bbc86a7c 100644 --- a/tests/e2e/e2e_setup_test.go +++ b/tests/e2e/e2e_setup_test.go @@ -4,21 +4,28 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/openshift/rosa/tests/ci/config" "github.com/openshift/rosa/tests/ci/labels" "github.com/openshift/rosa/tests/utils/exec/rosacli" "github.com/openshift/rosa/tests/utils/log" - PH "github.com/openshift/rosa/tests/utils/profilehandler" + "github.com/openshift/rosa/tests/utils/profilehandler" ) var _ = Describe("ROSA CLI Test", func() { It("PrepareClusterByProfile", - labels.Critical, labels.Day1Prepare, func() { client := rosacli.NewClient() - profile := PH.LoadProfileYamlFileByENV() - cluster, err := PH.CreateClusterByProfile(profile, client, true) + profile := profilehandler.LoadProfileYamlFileByENV() + cluster, err := profilehandler.CreateClusterByProfile(profile, client, config.Test.GlobalENV.WaitSetupClusterReady) Expect(err).ToNot(HaveOccurred()) log.Logger.Infof("Cluster prepared successfully with id %s", cluster.ID) }) + + It("WaitClusterReady", func() { + clusterDetail, err := profilehandler.ParserClusterDetail() + Expect(err).ToNot(HaveOccurred()) + client := rosacli.NewClient() + profilehandler.WaitForClusterReady(client, clusterDetail.ClusterID, config.Test.GlobalENV.ClusterWaitingTime) + }) }) diff --git a/tests/utils/exec/rosacli/cmd_runner.go b/tests/utils/exec/rosacli/cmd_runner.go index 445983d59b..782602bfe5 100644 --- a/tests/utils/exec/rosacli/cmd_runner.go +++ b/tests/utils/exec/rosacli/cmd_runner.go @@ -196,7 +196,7 @@ func (r *runner) Run() (bytes.Buffer, error) { err = cmd.Run() - log.Logger.Debugf("Get Combining Stdout and Stder is :\n%s", output.String()) + log.Logger.Infof("Get Combining Stdout and Stder is :\n%s", output.String()) if strings.Contains(output.String(), "Not able to get authentication token") { retry = retry + 1 diff --git a/tests/utils/log/logger.go b/tests/utils/log/logger.go index eb957cef3e..1aaa3335e3 100644 --- a/tests/utils/log/logger.go +++ b/tests/utils/log/logger.go @@ -16,7 +16,7 @@ func GetLogger() *Log { logger, _ := logging. NewStdLoggerBuilder(). Streams(g.GinkgoWriter, g.GinkgoWriter). - Debug(true). + Debug(false). Build() return &Log{ logger: logger, diff --git a/tests/utils/profilehandler/data_preparation.go b/tests/utils/profilehandler/data_preparation.go index 37c60011b9..8587f40a31 100644 --- a/tests/utils/profilehandler/data_preparation.go +++ b/tests/utils/profilehandler/data_preparation.go @@ -3,6 +3,7 @@ package profilehandler import ( "bytes" "fmt" + "reflect" "strconv" "strings" @@ -12,12 +13,25 @@ import ( "github.com/openshift-online/ocm-common/pkg/test/vpc_client" "github.com/openshift/rosa/pkg/ocm" + "github.com/openshift/rosa/tests/ci/config" "github.com/openshift/rosa/tests/utils/common" con "github.com/openshift/rosa/tests/utils/common/constants" "github.com/openshift/rosa/tests/utils/exec/rosacli" "github.com/openshift/rosa/tests/utils/log" ) +func RecordUserDataInfo(filePath string, key string, value string) error { + userData, _ := ParseUserData() + + if userData == nil { + userData = &UserData{} + } + valueOfUserData := reflect.ValueOf(userData).Elem() + valueOfUserData.FieldByName(key).SetString(value) + _, err := common.CreateFileWithContent(filePath, userData) + return err + +} func PrepareVersion(client *rosacli.Client, versionRequirement string, channelGroup string, hcp bool) ( *rosacli.OpenShiftVersionTableOutput, error) { log.Logger.Infof("Got version requirement %s going to prepare accordingly", versionRequirement) @@ -81,6 +95,10 @@ func PreparePrefix(profilePrefix string, nameLength int) string { func PrepareVPC(region string, vpcName string, cidrValue string) (*vpc_client.VPC, error) { log.Logger.Info("Starting vpc preparation") vpc, err := vpc_client.PrepareVPC(vpcName, region, cidrValue, false) + if err != nil { + return vpc, err + } + err = RecordUserDataInfo(config.Test.UserDataFile, "VpcID", vpc.VpcID) log.Logger.Info("VPC preparation finished") return vpc, err @@ -134,11 +152,19 @@ func PrepareProxy(vpcClient *vpc_client.VPC, zone string, sshPemFileName string, }, nil } -func PrepareKMSKey(region string, multiRegion bool, testClient string, hcp bool) (string, error) { +func PrepareKMSKey(region string, multiRegion bool, testClient string, hcp bool, etcdKMS bool) (string, error) { keyArn, err := kms_key.CreateOCMTestKMSKey(region, multiRegion, testClient) if err != nil { return keyArn, err } + userDataKey := "KMSKey" + if etcdKMS { + userDataKey = "EtcdKMSKey" + } + err = RecordUserDataInfo(config.Test.UserDataFile, userDataKey, keyArn) + if err != nil { + return keyArn, err + } if hcp { kms_key.AddTagToKMS(keyArn, region, map[string]string{ "red-hat": "true", @@ -207,6 +233,10 @@ func PrepareAccountRoles(client *rosacli.Client, err = fmt.Errorf("error happens when create account-roles, %s", output.String()) return } + err = RecordUserDataInfo(config.Test.UserDataFile, "AccountRolesPrefix", namePrefix) + if err != nil { + return + } accRoleList, output, err := client.OCMResource.ListAccountRole() if err != nil { err = fmt.Errorf("error happens when list account-roles, %s", output.String()) @@ -243,6 +273,10 @@ func PrepareOperatorRolesByOIDCConfig(client *rosacli.Client, _, err := client.OCMResource.CreateOperatorRoles( flags..., ) + if err != nil { + return err + } + err = RecordUserDataInfo(config.Test.UserDataFile, "OperatorRolesPrefix", namePrefix) return err } @@ -257,7 +291,12 @@ func PrepareAuditlogRoleArnByOIDCConfig(client *rosacli.Client, auditLogRoleName if err != nil { return "", err } - return PrepareAuditlogRoleArnByIssuer(auditLogRoleName, oidcConfig.IssuerUrl, region) + logRoleArn, err := PrepareAuditlogRoleArnByIssuer(auditLogRoleName, oidcConfig.IssuerUrl, region) + if err != nil { + return logRoleArn, err + } + err = RecordUserDataInfo(config.Test.UserDataFile, "AuditLogArn", logRoleArn) + return logRoleArn, err } @@ -281,7 +320,11 @@ func PrepareAuditlogRoleArnByIssuer(auditLogRoleName string, oidcIssuerURL strin return auditLogRoleArn, err } log.Logger.Infof("Create a new role for audit log forwarding: %s", auditLogRoleArn) - + err = RecordUserDataInfo(config.Test.UserDataFile, "AuditLogArn", auditLogRoleArn) + if err != nil { + log.Logger.Errorf("Error happened when record audit log role: %s", err.Error()) + return auditLogRoleArn, err + } err = awsClient.AttachIAMPolicy(auditLogRoleName, policyArn) if err != nil { log.Logger.Errorf("Error happens when attach audit log policy %s to role %s: %s", policyArn, auditLogRoleName, err.Error()) @@ -339,7 +382,8 @@ func PrepareOIDCConfig(client *rosacli.Client, } parser := rosacli.NewParser() oidcConfigID = parser.JsonData.Input(output).Parse().DigString("id") - return oidcConfigID, nil + err = RecordUserDataInfo(config.Test.UserDataFile, "OIDCConfigID", oidcConfigID) + return oidcConfigID, err } func PrepareOIDCProvider(client *rosacli.Client, oidcConfigID string) error { diff --git a/tests/utils/profilehandler/profile_handler.go b/tests/utils/profilehandler/profile_handler.go index d5cee70dc8..2b93d4028c 100644 --- a/tests/utils/profilehandler/profile_handler.go +++ b/tests/utils/profilehandler/profile_handler.go @@ -309,7 +309,7 @@ func GenerateClusterCreateFlags(profile *Profile, client *rosacli.Client) ([]str AutoscalerIgnoreDaemonsetsUtilization: true, AutoscalerMaxNodeProvisionTime: "10m", AutoscalerBalancingIgnoredLabels: "aaa", - AutoscalerMaxNodesTotal: "1000", + AutoscalerMaxNodesTotal: "10", AutoscalerMinCores: "0", AutoscalerMaxCores: "100", AutoscalerMinMemory: "0", @@ -453,7 +453,7 @@ func GenerateClusterCreateFlags(profile *Profile, client *rosacli.Client) ([]str clusterConfiguration.DisableWorkloadMonitoring = true } if profile.ClusterConfig.EtcdKMS { - keyArn, err := PrepareKMSKey(profile.Region, false, "rosacli", profile.ClusterConfig.HCP) + keyArn, err := PrepareKMSKey(profile.Region, false, "rosacli", profile.ClusterConfig.HCP, true) userData.EtcdKMSKey = keyArn if err != nil { return flags, err @@ -490,7 +490,7 @@ func GenerateClusterCreateFlags(profile *Profile, client *rosacli.Client) ([]str flags = append(flags, "--compute-machine-type", profile.ClusterConfig.InstanceType) } if profile.ClusterConfig.KMSKey { - kmsKeyArn, err := PrepareKMSKey(profile.Region, false, "rosacli", profile.ClusterConfig.HCP) + kmsKeyArn, err := PrepareKMSKey(profile.Region, false, "rosacli", profile.ClusterConfig.HCP, false) userData.KMSKey = kmsKeyArn clusterConfiguration.Encryption = &ClusterConfigure.Encryption{ KmsKeyArn: kmsKeyArn, // placeHolder @@ -556,15 +556,31 @@ func GenerateClusterCreateFlags(profile *Profile, client *rosacli.Client) ([]str return flags, nil } func WaitForClusterReady(client *rosacli.Client, cluster string, timeoutMin int) error { - + var description *rosacli.ClusterDescription + var clusterDetail *ClusterDetail + var err error + clusterDetail, err = ParserClusterDetail() + if err != nil { + return err + } + defer func() { + log.Logger.Info("Going to record the necessary information") + common.CreateFileWithContent(config.Test.ClusterDetailFile, clusterDetail) + common.CreateFileWithContent(config.Test.APIURLFile, description.APIURL) // Temporary recoding file to make it compatible to existing jobs + common.CreateFileWithContent(config.Test.ConsoleUrlFile, description.ConsoleURL) // Temporary recoding file to make it compatible to existing jobs + common.CreateFileWithContent(config.Test.InfraIDFile, description.InfraID) // Temporary recoding file to make it compatible to existing jobs + }() endTime := time.Now().Add(time.Duration(timeoutMin) * time.Minute) sleepTime := 0 for time.Now().Before(endTime) { - output, err := client.Cluster.DescribeClusterAndReflect(cluster) + description, err = client.Cluster.DescribeClusterAndReflect(cluster) if err != nil { return err } - switch output.State { + clusterDetail.APIURL = description.APIURL + clusterDetail.ConsoleURL = description.ConsoleURL + clusterDetail.InfraID = description.InfraID + switch description.State { case con.Ready: log.Logger.Infof("Cluster %s is ready now.", cluster) return nil @@ -572,28 +588,28 @@ func WaitForClusterReady(client *rosacli.Client, cluster string, timeoutMin int) return fmt.Errorf("cluster %s is %s now. Cannot wait for it ready", cluster, con.Uninstalling) default: - if strings.Contains(output.State, con.Error) { + if strings.Contains(description.State, con.Error) { log.Logger.Errorf("Cluster is in %s status now. Recording the installation log", con.Error) RecordClusterInstallationLog(client, cluster) return fmt.Errorf("cluster %s is in %s state with reason: %s", - cluster, con.Error, output.State) + cluster, con.Error, description.State) } - if strings.Contains(output.State, con.Pending) || - strings.Contains(output.State, con.Installing) || - strings.Contains(output.State, con.Validating) { + if strings.Contains(description.State, con.Pending) || + strings.Contains(description.State, con.Installing) || + strings.Contains(description.State, con.Validating) { time.Sleep(2 * time.Minute) continue } - if strings.Contains(output.State, con.Waiting) { + if strings.Contains(description.State, con.Waiting) { log.Logger.Infof("Cluster is in status of %v, wait for ready", con.Waiting) if sleepTime >= 6 { - return fmt.Errorf("cluster stuck to %s status for more than 6 mins. Check the user data preparation for roles", output.State) + return fmt.Errorf("cluster stuck to %s status for more than 6 mins. Check the user data preparation for roles", description.State) } sleepTime += 2 time.Sleep(2 * time.Minute) continue } - return fmt.Errorf("unknown cluster state %s", output.State) + return fmt.Errorf("unknown cluster state %s", description.State) } } @@ -616,7 +632,7 @@ func RecordClusterInstallationLog(client *rosacli.Client, cluster string) error return err } -func CreateClusterByProfile(profile *Profile, client *rosacli.Client, waitForClusterReady bool) (*rosacli.ClusterDescription, error) { +func CreateClusterByProfileWithoutWaiting(profile *Profile, client *rosacli.Client, waitForClusterReady bool) (*rosacli.ClusterDescription, error) { clusterDetail := new(ClusterDetail) flags, err := GenerateClusterCreateFlags(profile, client) @@ -638,12 +654,9 @@ func CreateClusterByProfile(profile *Profile, client *rosacli.Client, waitForClu defer func() { log.Logger.Info("Going to record the necessary information") common.CreateFileWithContent(config.Test.ClusterDetailFile, clusterDetail) - common.CreateFileWithContent(config.Test.ClusterIDFile, description.ID) // Temporary recoding file to make it compatible to existing jobs - common.CreateFileWithContent(config.Test.ClusterNameFile, description.Name) // Temporary recoding file to make it compatible to existing jobs - common.CreateFileWithContent(config.Test.APIURLFile, description.APIURL) // Temporary recoding file to make it compatible to existing jobs - common.CreateFileWithContent(config.Test.ConsoleUrlFile, description.ConsoleURL) // Temporary recoding file to make it compatible to existing jobs - common.CreateFileWithContent(config.Test.InfraIDFile, description.InfraID) // Temporary recoding file to make it compatible to existing jobs - common.CreateFileWithContent(config.Test.ClusterTypeFile, "rosa") // Temporary recoding file to make it compatible to existing jobs + common.CreateFileWithContent(config.Test.ClusterIDFile, description.ID) // Temporary recoding file to make it compatible to existing jobs + common.CreateFileWithContent(config.Test.ClusterNameFile, description.Name) // Temporary recoding file to make it compatible to existing jobs + common.CreateFileWithContent(config.Test.ClusterTypeFile, "rosa") // Temporary recoding file to make it compatible to existing jobs }() clusterDetail.ClusterID = description.ID clusterDetail.ClusterName = description.Name @@ -674,6 +687,10 @@ func CreateClusterByProfile(profile *Profile, client *rosacli.Client, waitForClu return description, err } } + return description, err +} +func CreateClusterByProfile(profile *Profile, client *rosacli.Client, waitForClusterReady bool) (*rosacli.ClusterDescription, error) { + description, err := CreateClusterByProfileWithoutWaiting(profile, client, waitForClusterReady) if profile.ClusterConfig.BYOVPC { log.Logger.Infof("Reverify the network for the cluster %s to make sure it can be parsed", description.ID) ReverifyClusterNetwork(client, description.ID) @@ -690,24 +707,23 @@ func CreateClusterByProfile(profile *Profile, client *rosacli.Client, waitForClu } func WaitForClusterUninstalled(client *rosacli.Client, cluster string, timeoutMin int) error { - endTime := time.Now().Add(time.Duration(timeoutMin) * time.Minute) for time.Now().Before(endTime) { output, err := client.Cluster.DescribeCluster(cluster) + if err != nil && strings.Contains(output.String(), fmt.Sprintf("There is no cluster with identifier or name '%s'", cluster)) { + log.Logger.Infof("Cluster %s has been deleted.", cluster) + return nil + } desc, err := client.Cluster.ReflectClusterDescription(output) if err != nil { return err } - if strings.Contains(output.String(), fmt.Sprintf("There is no cluster with identifier or name '%s'", cluster)) { - log.Logger.Infof("Cluster %s has been deleted.", cluster) - return nil - } if strings.Contains(desc.State, con.Uninstalling) { time.Sleep(2 * time.Minute) continue } - return fmt.Errorf("Cluster %s is in status of %s which won't be deleted, stop waiting", cluster, desc.State) + return fmt.Errorf("cluster %s is in status of %s which won't be deleted, stop waiting", cluster, desc.State) } return fmt.Errorf("timeout for waiting for cluster deletion finished after %d mins", timeoutMin) }