Skip to content

Commit

Permalink
Merge pull request #2096 from xueli181114/OCM-8257
Browse files Browse the repository at this point in the history
OCM-8257 | ci: Split creation out and split cluster waiting out and make autoscaler value to 10
  • Loading branch information
openshift-merge-bot[bot] committed May 27, 2024
2 parents b37862b + 4c1ccbb commit 88e6525
Show file tree
Hide file tree
Showing 6 changed files with 119 additions and 49 deletions.
27 changes: 15 additions & 12 deletions tests/ci/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,13 @@ type TestConfig struct {
GlobalENV *GlobalENVVariables
}
type GlobalENVVariables struct {
ChannelGroup string `env:"CHANNEL_GROUP" default:""`
Version string `env:"VERSION" default:""`
Region string `env:"REGION" default:""`
ProvisionShard string `env:"PROVISION_SHARD" default:""`
NamePrefix string `env:"NAME_PREFIX"`
ClusterWaitingTime int `env:"CLUSTER_TIMEOUT" default:"60"`
ChannelGroup string `env:"CHANNEL_GROUP" default:""`
Version string `env:"VERSION" default:""`
Region string `env:"REGION" default:""`
ProvisionShard string `env:"PROVISION_SHARD" default:""`
NamePrefix string `env:"NAME_PREFIX"`
ClusterWaitingTime int `env:"CLUSTER_TIMEOUT" default:"60"`
WaitSetupClusterReady bool `env:"WAIT_SETUP_CLUSTER_READY" default:"true"`
}

func init() {
Expand Down Expand Up @@ -82,13 +83,15 @@ func init() {
if err != nil {
panic(fmt.Errorf("env variable CLUSTER_TIMEOUT must be set to an integer"))
}
waitSetupClusterReady, _ := strconv.ParseBool(common.ReadENVWithDefaultValue("WAIT_SETUP_CLUSTER_READY", "true"))
Test.GlobalENV = &GlobalENVVariables{
ChannelGroup: os.Getenv("CHANNEL_GROUP"),
Version: os.Getenv("VERSION"),
Region: os.Getenv("REGION"),
ProvisionShard: os.Getenv("PROVISION_SHARD"),
NamePrefix: os.Getenv("NAME_PREFIX"),
ClusterWaitingTime: waitingTime,
ChannelGroup: os.Getenv("CHANNEL_GROUP"),
Version: os.Getenv("VERSION"),
Region: os.Getenv("REGION"),
ProvisionShard: os.Getenv("PROVISION_SHARD"),
NamePrefix: os.Getenv("NAME_PREFIX"),
ClusterWaitingTime: waitingTime,
WaitSetupClusterReady: waitSetupClusterReady,
}

}
15 changes: 11 additions & 4 deletions tests/e2e/e2e_setup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,21 +4,28 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"

"github.com/openshift/rosa/tests/ci/config"
"github.com/openshift/rosa/tests/ci/labels"
"github.com/openshift/rosa/tests/utils/exec/rosacli"
"github.com/openshift/rosa/tests/utils/log"
PH "github.com/openshift/rosa/tests/utils/profilehandler"
"github.com/openshift/rosa/tests/utils/profilehandler"
)

var _ = Describe("ROSA CLI Test", func() {
It("PrepareClusterByProfile",
labels.Critical,
labels.Day1Prepare,
func() {
client := rosacli.NewClient()
profile := PH.LoadProfileYamlFileByENV()
cluster, err := PH.CreateClusterByProfile(profile, client, true)
profile := profilehandler.LoadProfileYamlFileByENV()
cluster, err := profilehandler.CreateClusterByProfile(profile, client, config.Test.GlobalENV.WaitSetupClusterReady)
Expect(err).ToNot(HaveOccurred())
log.Logger.Infof("Cluster prepared successfully with id %s", cluster.ID)
})

It("WaitClusterReady", func() {
clusterDetail, err := profilehandler.ParserClusterDetail()
Expect(err).ToNot(HaveOccurred())
client := rosacli.NewClient()
profilehandler.WaitForClusterReady(client, clusterDetail.ClusterID, config.Test.GlobalENV.ClusterWaitingTime)
})
})
2 changes: 1 addition & 1 deletion tests/utils/exec/rosacli/cmd_runner.go
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ func (r *runner) Run() (bytes.Buffer, error) {

err = cmd.Run()

log.Logger.Debugf("Get Combining Stdout and Stder is :\n%s", output.String())
log.Logger.Infof("Get Combining Stdout and Stder is :\n%s", output.String())

if strings.Contains(output.String(), "Not able to get authentication token") {
retry = retry + 1
Expand Down
2 changes: 1 addition & 1 deletion tests/utils/log/logger.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ func GetLogger() *Log {
logger, _ := logging.
NewStdLoggerBuilder().
Streams(g.GinkgoWriter, g.GinkgoWriter).
Debug(true).
Debug(false).
Build()
return &Log{
logger: logger,
Expand Down
52 changes: 48 additions & 4 deletions tests/utils/profilehandler/data_preparation.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package profilehandler
import (
"bytes"
"fmt"
"reflect"
"strconv"
"strings"

Expand All @@ -12,12 +13,25 @@ import (
"github.com/openshift-online/ocm-common/pkg/test/vpc_client"

"github.com/openshift/rosa/pkg/ocm"
"github.com/openshift/rosa/tests/ci/config"
"github.com/openshift/rosa/tests/utils/common"
con "github.com/openshift/rosa/tests/utils/common/constants"
"github.com/openshift/rosa/tests/utils/exec/rosacli"
"github.com/openshift/rosa/tests/utils/log"
)

func RecordUserDataInfo(filePath string, key string, value string) error {
userData, _ := ParseUserData()

if userData == nil {
userData = &UserData{}
}
valueOfUserData := reflect.ValueOf(userData).Elem()
valueOfUserData.FieldByName(key).SetString(value)
_, err := common.CreateFileWithContent(filePath, userData)
return err

}
func PrepareVersion(client *rosacli.Client, versionRequirement string, channelGroup string, hcp bool) (
*rosacli.OpenShiftVersionTableOutput, error) {
log.Logger.Infof("Got version requirement %s going to prepare accordingly", versionRequirement)
Expand Down Expand Up @@ -81,6 +95,10 @@ func PreparePrefix(profilePrefix string, nameLength int) string {
func PrepareVPC(region string, vpcName string, cidrValue string) (*vpc_client.VPC, error) {
log.Logger.Info("Starting vpc preparation")
vpc, err := vpc_client.PrepareVPC(vpcName, region, cidrValue, false)
if err != nil {
return vpc, err
}
err = RecordUserDataInfo(config.Test.UserDataFile, "VpcID", vpc.VpcID)
log.Logger.Info("VPC preparation finished")
return vpc, err

Expand Down Expand Up @@ -134,11 +152,19 @@ func PrepareProxy(vpcClient *vpc_client.VPC, zone string, sshPemFileName string,
}, nil
}

func PrepareKMSKey(region string, multiRegion bool, testClient string, hcp bool) (string, error) {
func PrepareKMSKey(region string, multiRegion bool, testClient string, hcp bool, etcdKMS bool) (string, error) {
keyArn, err := kms_key.CreateOCMTestKMSKey(region, multiRegion, testClient)
if err != nil {
return keyArn, err
}
userDataKey := "KMSKey"
if etcdKMS {
userDataKey = "EtcdKMSKey"
}
err = RecordUserDataInfo(config.Test.UserDataFile, userDataKey, keyArn)
if err != nil {
return keyArn, err
}
if hcp {
kms_key.AddTagToKMS(keyArn, region, map[string]string{
"red-hat": "true",
Expand Down Expand Up @@ -207,6 +233,10 @@ func PrepareAccountRoles(client *rosacli.Client,
err = fmt.Errorf("error happens when create account-roles, %s", output.String())
return
}
err = RecordUserDataInfo(config.Test.UserDataFile, "AccountRolesPrefix", namePrefix)
if err != nil {
return
}
accRoleList, output, err := client.OCMResource.ListAccountRole()
if err != nil {
err = fmt.Errorf("error happens when list account-roles, %s", output.String())
Expand Down Expand Up @@ -243,6 +273,10 @@ func PrepareOperatorRolesByOIDCConfig(client *rosacli.Client,
_, err := client.OCMResource.CreateOperatorRoles(
flags...,
)
if err != nil {
return err
}
err = RecordUserDataInfo(config.Test.UserDataFile, "OperatorRolesPrefix", namePrefix)
return err
}

Expand All @@ -257,7 +291,12 @@ func PrepareAuditlogRoleArnByOIDCConfig(client *rosacli.Client, auditLogRoleName
if err != nil {
return "", err
}
return PrepareAuditlogRoleArnByIssuer(auditLogRoleName, oidcConfig.IssuerUrl, region)
logRoleArn, err := PrepareAuditlogRoleArnByIssuer(auditLogRoleName, oidcConfig.IssuerUrl, region)
if err != nil {
return logRoleArn, err
}
err = RecordUserDataInfo(config.Test.UserDataFile, "AuditLogArn", logRoleArn)
return logRoleArn, err

}

Expand All @@ -281,7 +320,11 @@ func PrepareAuditlogRoleArnByIssuer(auditLogRoleName string, oidcIssuerURL strin
return auditLogRoleArn, err
}
log.Logger.Infof("Create a new role for audit log forwarding: %s", auditLogRoleArn)

err = RecordUserDataInfo(config.Test.UserDataFile, "AuditLogArn", auditLogRoleArn)
if err != nil {
log.Logger.Errorf("Error happened when record audit log role: %s", err.Error())
return auditLogRoleArn, err
}
err = awsClient.AttachIAMPolicy(auditLogRoleName, policyArn)
if err != nil {
log.Logger.Errorf("Error happens when attach audit log policy %s to role %s: %s", policyArn, auditLogRoleName, err.Error())
Expand Down Expand Up @@ -339,7 +382,8 @@ func PrepareOIDCConfig(client *rosacli.Client,
}
parser := rosacli.NewParser()
oidcConfigID = parser.JsonData.Input(output).Parse().DigString("id")
return oidcConfigID, nil
err = RecordUserDataInfo(config.Test.UserDataFile, "OIDCConfigID", oidcConfigID)
return oidcConfigID, err
}

func PrepareOIDCProvider(client *rosacli.Client, oidcConfigID string) error {
Expand Down
70 changes: 43 additions & 27 deletions tests/utils/profilehandler/profile_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ func GenerateClusterCreateFlags(profile *Profile, client *rosacli.Client) ([]str
AutoscalerIgnoreDaemonsetsUtilization: true,
AutoscalerMaxNodeProvisionTime: "10m",
AutoscalerBalancingIgnoredLabels: "aaa",
AutoscalerMaxNodesTotal: "1000",
AutoscalerMaxNodesTotal: "10",
AutoscalerMinCores: "0",
AutoscalerMaxCores: "100",
AutoscalerMinMemory: "0",
Expand Down Expand Up @@ -453,7 +453,7 @@ func GenerateClusterCreateFlags(profile *Profile, client *rosacli.Client) ([]str
clusterConfiguration.DisableWorkloadMonitoring = true
}
if profile.ClusterConfig.EtcdKMS {
keyArn, err := PrepareKMSKey(profile.Region, false, "rosacli", profile.ClusterConfig.HCP)
keyArn, err := PrepareKMSKey(profile.Region, false, "rosacli", profile.ClusterConfig.HCP, true)
userData.EtcdKMSKey = keyArn
if err != nil {
return flags, err
Expand Down Expand Up @@ -490,7 +490,7 @@ func GenerateClusterCreateFlags(profile *Profile, client *rosacli.Client) ([]str
flags = append(flags, "--compute-machine-type", profile.ClusterConfig.InstanceType)
}
if profile.ClusterConfig.KMSKey {
kmsKeyArn, err := PrepareKMSKey(profile.Region, false, "rosacli", profile.ClusterConfig.HCP)
kmsKeyArn, err := PrepareKMSKey(profile.Region, false, "rosacli", profile.ClusterConfig.HCP, false)
userData.KMSKey = kmsKeyArn
clusterConfiguration.Encryption = &ClusterConfigure.Encryption{
KmsKeyArn: kmsKeyArn, // placeHolder
Expand Down Expand Up @@ -556,44 +556,60 @@ func GenerateClusterCreateFlags(profile *Profile, client *rosacli.Client) ([]str
return flags, nil
}
func WaitForClusterReady(client *rosacli.Client, cluster string, timeoutMin int) error {

var description *rosacli.ClusterDescription
var clusterDetail *ClusterDetail
var err error
clusterDetail, err = ParserClusterDetail()
if err != nil {
return err
}
defer func() {
log.Logger.Info("Going to record the necessary information")
common.CreateFileWithContent(config.Test.ClusterDetailFile, clusterDetail)
common.CreateFileWithContent(config.Test.APIURLFile, description.APIURL) // Temporary recoding file to make it compatible to existing jobs
common.CreateFileWithContent(config.Test.ConsoleUrlFile, description.ConsoleURL) // Temporary recoding file to make it compatible to existing jobs
common.CreateFileWithContent(config.Test.InfraIDFile, description.InfraID) // Temporary recoding file to make it compatible to existing jobs
}()
endTime := time.Now().Add(time.Duration(timeoutMin) * time.Minute)
sleepTime := 0
for time.Now().Before(endTime) {
output, err := client.Cluster.DescribeClusterAndReflect(cluster)
description, err = client.Cluster.DescribeClusterAndReflect(cluster)
if err != nil {
return err
}
switch output.State {
clusterDetail.APIURL = description.APIURL
clusterDetail.ConsoleURL = description.ConsoleURL
clusterDetail.InfraID = description.InfraID
switch description.State {
case con.Ready:
log.Logger.Infof("Cluster %s is ready now.", cluster)
return nil
case con.Uninstalling:
return fmt.Errorf("cluster %s is %s now. Cannot wait for it ready",
cluster, con.Uninstalling)
default:
if strings.Contains(output.State, con.Error) {
if strings.Contains(description.State, con.Error) {
log.Logger.Errorf("Cluster is in %s status now. Recording the installation log", con.Error)
RecordClusterInstallationLog(client, cluster)
return fmt.Errorf("cluster %s is in %s state with reason: %s",
cluster, con.Error, output.State)
cluster, con.Error, description.State)
}
if strings.Contains(output.State, con.Pending) ||
strings.Contains(output.State, con.Installing) ||
strings.Contains(output.State, con.Validating) {
if strings.Contains(description.State, con.Pending) ||
strings.Contains(description.State, con.Installing) ||
strings.Contains(description.State, con.Validating) {
time.Sleep(2 * time.Minute)
continue
}
if strings.Contains(output.State, con.Waiting) {
if strings.Contains(description.State, con.Waiting) {
log.Logger.Infof("Cluster is in status of %v, wait for ready", con.Waiting)
if sleepTime >= 6 {
return fmt.Errorf("cluster stuck to %s status for more than 6 mins. Check the user data preparation for roles", output.State)
return fmt.Errorf("cluster stuck to %s status for more than 6 mins. Check the user data preparation for roles", description.State)
}
sleepTime += 2
time.Sleep(2 * time.Minute)
continue
}
return fmt.Errorf("unknown cluster state %s", output.State)
return fmt.Errorf("unknown cluster state %s", description.State)
}

}
Expand All @@ -616,7 +632,7 @@ func RecordClusterInstallationLog(client *rosacli.Client, cluster string) error
return err
}

func CreateClusterByProfile(profile *Profile, client *rosacli.Client, waitForClusterReady bool) (*rosacli.ClusterDescription, error) {
func CreateClusterByProfileWithoutWaiting(profile *Profile, client *rosacli.Client, waitForClusterReady bool) (*rosacli.ClusterDescription, error) {
clusterDetail := new(ClusterDetail)

flags, err := GenerateClusterCreateFlags(profile, client)
Expand All @@ -638,12 +654,9 @@ func CreateClusterByProfile(profile *Profile, client *rosacli.Client, waitForClu
defer func() {
log.Logger.Info("Going to record the necessary information")
common.CreateFileWithContent(config.Test.ClusterDetailFile, clusterDetail)
common.CreateFileWithContent(config.Test.ClusterIDFile, description.ID) // Temporary recoding file to make it compatible to existing jobs
common.CreateFileWithContent(config.Test.ClusterNameFile, description.Name) // Temporary recoding file to make it compatible to existing jobs
common.CreateFileWithContent(config.Test.APIURLFile, description.APIURL) // Temporary recoding file to make it compatible to existing jobs
common.CreateFileWithContent(config.Test.ConsoleUrlFile, description.ConsoleURL) // Temporary recoding file to make it compatible to existing jobs
common.CreateFileWithContent(config.Test.InfraIDFile, description.InfraID) // Temporary recoding file to make it compatible to existing jobs
common.CreateFileWithContent(config.Test.ClusterTypeFile, "rosa") // Temporary recoding file to make it compatible to existing jobs
common.CreateFileWithContent(config.Test.ClusterIDFile, description.ID) // Temporary recoding file to make it compatible to existing jobs
common.CreateFileWithContent(config.Test.ClusterNameFile, description.Name) // Temporary recoding file to make it compatible to existing jobs
common.CreateFileWithContent(config.Test.ClusterTypeFile, "rosa") // Temporary recoding file to make it compatible to existing jobs
}()
clusterDetail.ClusterID = description.ID
clusterDetail.ClusterName = description.Name
Expand Down Expand Up @@ -674,6 +687,10 @@ func CreateClusterByProfile(profile *Profile, client *rosacli.Client, waitForClu
return description, err
}
}
return description, err
}
func CreateClusterByProfile(profile *Profile, client *rosacli.Client, waitForClusterReady bool) (*rosacli.ClusterDescription, error) {
description, err := CreateClusterByProfileWithoutWaiting(profile, client, waitForClusterReady)
if profile.ClusterConfig.BYOVPC {
log.Logger.Infof("Reverify the network for the cluster %s to make sure it can be parsed", description.ID)
ReverifyClusterNetwork(client, description.ID)
Expand All @@ -690,24 +707,23 @@ func CreateClusterByProfile(profile *Profile, client *rosacli.Client, waitForClu
}

func WaitForClusterUninstalled(client *rosacli.Client, cluster string, timeoutMin int) error {

endTime := time.Now().Add(time.Duration(timeoutMin) * time.Minute)
for time.Now().Before(endTime) {
output, err := client.Cluster.DescribeCluster(cluster)
if err != nil && strings.Contains(output.String(), fmt.Sprintf("There is no cluster with identifier or name '%s'", cluster)) {
log.Logger.Infof("Cluster %s has been deleted.", cluster)
return nil
}
desc, err := client.Cluster.ReflectClusterDescription(output)

if err != nil {
return err
}
if strings.Contains(output.String(), fmt.Sprintf("There is no cluster with identifier or name '%s'", cluster)) {
log.Logger.Infof("Cluster %s has been deleted.", cluster)
return nil
}
if strings.Contains(desc.State, con.Uninstalling) {
time.Sleep(2 * time.Minute)
continue
}
return fmt.Errorf("Cluster %s is in status of %s which won't be deleted, stop waiting", cluster, desc.State)
return fmt.Errorf("cluster %s is in status of %s which won't be deleted, stop waiting", cluster, desc.State)
}
return fmt.Errorf("timeout for waiting for cluster deletion finished after %d mins", timeoutMin)
}
Expand Down

0 comments on commit 88e6525

Please sign in to comment.