From 40c8dbd7150ad812174351aa143e215409cd3811 Mon Sep 17 00:00:00 2001 From: dawang Date: Tue, 20 Aug 2024 14:38:54 +0800 Subject: [PATCH] OCM-9805 | test: Automate id:60278 Scaling up/down node pool on rosa hcp cluster --- tests/e2e/hcp_machine_pool_test.go | 225 +++++++++++++++++- tests/e2e/test_rosacli_cluster.go | 3 +- tests/e2e/utils.go | 5 + tests/utils/common/constants/cluster.go | 18 ++ tests/utils/config/cluster.go | 10 + tests/utils/exec/rosacli/cluster_service.go | 11 +- .../utils/exec/rosacli/machinepool_service.go | 184 +++++++++++++- 7 files changed, 444 insertions(+), 12 deletions(-) diff --git a/tests/e2e/hcp_machine_pool_test.go b/tests/e2e/hcp_machine_pool_test.go index 683669f130..901f37997c 100644 --- a/tests/e2e/hcp_machine_pool_test.go +++ b/tests/e2e/hcp_machine_pool_test.go @@ -1,6 +1,7 @@ package e2e import ( + "fmt" "strconv" "strings" @@ -10,17 +11,25 @@ import ( "github.com/openshift/rosa/tests/ci/labels" "github.com/openshift/rosa/tests/utils/common" + "github.com/openshift/rosa/tests/utils/common/constants" + "github.com/openshift/rosa/tests/utils/config" "github.com/openshift/rosa/tests/utils/exec/rosacli" ph "github.com/openshift/rosa/tests/utils/profilehandler" ) -var _ = Describe("Create Machine Pool", labels.Feature.Machinepool, func() { +var _ = Describe("HCP Machine Pool", labels.Feature.Machinepool, func() { + // It doesn't check whether node pool instances ready in default. + // If needed for verify hcp node pool's changes, pls set the ENV CLUSTER_NODE_POOL_GLOBAL_CHECK to true, + // which will wait for node pool instances ready until timeout. + isNodePoolGlobalCheck := config.IsNodePoolGlobalCheck() var ( rosaClient *rosacli.Client machinePoolService rosacli.MachinePoolService profile *ph.Profile + isMultiArch bool ) + BeforeEach(func() { Expect(clusterID).ToNot(BeEmpty(), "Cluster ID is empty, please export the env variable CLUSTER_ID") rosaClient = rosacli.NewClient() @@ -30,15 +39,19 @@ var _ = Describe("Create Machine Pool", labels.Feature.Machinepool, func() { hostedCluster, err := rosaClient.Cluster.IsHostedCPCluster(clusterID) Expect(err).ToNot(HaveOccurred()) + By("Check whether the cluster is multi arch") + isMultiArch, err = rosaClient.Cluster.IsMultiArch(clusterID) + Expect(err).ToNot(HaveOccurred()) + profile = ph.LoadProfileYamlFileByENV() if !hostedCluster { SkipNotHosted() } }) - It("to hosted cluster with additional security group IDs will work [id:72195]", - labels.Critical, labels.Runtime.Day2, - func() { + + Describe("Create/delete/view a machine pool", func() { + It("should succeed with additional security group IDs [id:72195]", labels.Critical, labels.Runtime.Day2, func() { By("Load the vpc client of the machinepool") mps, err := rosaClient.MachinePool.ListAndReflectNodePools(clusterID) Expect(err).ToNot(HaveOccurred()) @@ -48,7 +61,9 @@ var _ = Describe("Create Machine Pool", labels.Feature.Machinepool, func() { Expect(err).ToNot(HaveOccurred()) By("Prepare security groups") - sgIDs, err := vpcClient.CreateAdditionalSecurityGroups(3, "72195", "testing for case 72195") + // Configure with a random str, which can solve the rerun failure + sgPrefix := common.GenerateRandomName("72195", 2) + sgIDs, err := vpcClient.CreateAdditionalSecurityGroups(3, sgPrefix, "testing for case 72195") Expect(err).ToNot(HaveOccurred()) By("Create machinepool with security groups set") @@ -80,6 +95,7 @@ var _ = Describe("Create Machine Pool", labels.Feature.Machinepool, func() { Expect(err).ToNot(HaveOccurred()) Expect(mpDescription.AdditionalSecurityGroupIDs).To(BeEmpty()) }) + }) It("machinepool AWS preflight tag validation[id:73638]", labels.Medium, labels.Runtime.Day2, @@ -177,4 +193,203 @@ var _ = Describe("Create Machine Pool", labels.Feature.Machinepool, func() { Expect(err).To(HaveOccurred()) Expect(out.String()).Should(ContainSubstring("ERR: expected a valid user tag value '#'")) }) + + DescribeTable("Scale up/down a machine pool", labels.Critical, labels.Runtime.Day2, + func(instanceType string, amdOrArm string) { + if !isMultiArch && amdOrArm == constants.ARM { + SkipNotMultiArch() + } + + By("Create machinepool with " + amdOrArm + " instance " + instanceType) + mpPrefix := fmt.Sprintf("%v-60278", amdOrArm) + mpName := common.GenerateRandomName(mpPrefix, 2) + desiredReplicas := 1 + _, err := rosaClient.MachinePool.CreateMachinePool(clusterID, mpName, + "--instance-type", instanceType, + "--replicas", fmt.Sprintf("%v", desiredReplicas), + "-y", + ) + Expect(err).ToNot(HaveOccurred()) + defer rosaClient.MachinePool.DeleteMachinePool(clusterID, mpName) + + mpDesc, err := rosaClient.MachinePool.DescribeAndReflectNodePool(clusterID, mpName) + Expect(err).ToNot(HaveOccurred()) + Expect(mpDesc.DesiredReplicas).Should(Equal(desiredReplicas)) + Expect(mpDesc.InstanceType).Should(Equal(instanceType)) + + if isNodePoolGlobalCheck { + By("Check if current replicas reach the desired replicas after creating a machine pool") + err = rosaClient.MachinePool.WaitNodePoolReplicasReady( + clusterID, + mpName, + false, + constants.NodePoolCheckPoll, + constants.NodePoolCheckTimeout, + ) + Expect(err).ToNot(HaveOccurred()) + } + + By("Scale a machine pool with unchanged parameters") + err = rosaClient.MachinePool.ScaleNodePool(clusterID, mpName, desiredReplicas, true) + Expect(err).ToNot(HaveOccurred()) + + By("Scale up a machine pool replicas from 1 to 2") + upReplicas := 2 + err = rosaClient.MachinePool.ScaleNodePool(clusterID, mpName, upReplicas, true) + Expect(err).ToNot(HaveOccurred()) + + By("Scale down a machine pool replicas from 2 to 1") + downReplicas := 1 + err = rosaClient.MachinePool.ScaleNodePool(clusterID, mpName, downReplicas, true) + Expect(err).ToNot(HaveOccurred()) + + By("Scale down a machine pool replicas to 0") + zeroReplica := 0 + err = rosaClient.MachinePool.ScaleNodePool(clusterID, mpName, zeroReplica, true) + Expect(err).ToNot(HaveOccurred()) + }, + Entry("For amd64 cpu architecture [id:60278]", constants.M5XLarge, constants.AMD), + Entry("For arm64 cpu architecture [id:60278]", constants.M6gXLarge, constants.ARM), + ) + + DescribeTable("Scale up/down a machine pool with invalid replica", labels.Critical, labels.Runtime.Day2, + func(instanceType string, updatedReplicas string, expectedErrMsg string) { + By("Create machinepool with instance " + instanceType) + mpName := common.GenerateRandomName("mp-60278", 2) + desiredReplicas := 1 + _, err := rosaClient.MachinePool.CreateMachinePool(clusterID, mpName, + "--instance-type", instanceType, + "--replicas", fmt.Sprintf("%v", desiredReplicas), + "-y", + ) + Expect(err).ToNot(HaveOccurred()) + defer rosaClient.MachinePool.DeleteMachinePool(clusterID, mpName) + + _, err = rosaClient.MachinePool.EditMachinePool(clusterID, mpName, + "--replicas", fmt.Sprintf("%v", updatedReplicas), + "-y", + ) + Expect(err.Error()).Should(ContainSubstring(expectedErrMsg)) + }, + + Entry("Scale replica too large [id:60278]", constants.M52XLarge, "1000", "exceeds the maximum allowed"), + Entry("Scale replica to -1 [id:60278]", constants.M52XLarge, "-1", "Replicas must be a non-negative number"), + Entry("Scale replica to a char [id:60278]", constants.M52XLarge, "a", "invalid syntax"), + ) + + Describe("Scale up/down a machine pool enabling autoscale", func() { + It("should succeed to scale with valid parameters [id:60278]", labels.Medium, labels.Runtime.Day2, func() { + instanceType := constants.M52XLarge + By("Create machinepool with " + " instance " + instanceType + " and enable autoscale") + + mpPrefix := "autoscale" + mpName := common.GenerateRandomName(mpPrefix, 2) + minReplica := 1 + maxReplica := 3 + _, err := rosaClient.MachinePool.CreateMachinePool(clusterID, mpName, + "--instance-type", instanceType, + "--enable-autoscaling", + "--min-replicas", fmt.Sprintf("%v", minReplica), + "--max-replicas", fmt.Sprintf("%v", maxReplica), + "-y", + ) + Expect(err).ToNot(HaveOccurred()) + defer rosaClient.MachinePool.DeleteMachinePool(clusterID, mpName) + + if isNodePoolGlobalCheck { + By("Check current replicas reach the min replicas after creating a autoscaled machine pool") + err = rosaClient.MachinePool.WaitNodePoolReplicasReady( + clusterID, + mpName, + true, + constants.NodePoolCheckPoll, + constants.NodePoolCheckTimeout, + ) + Expect(err).ToNot(HaveOccurred()) + } + + // TODO There's an issue here, uncomment when solved + //By("Scale a machine pool with unchanged parameters") + //err = rosaClient.MachinePool.ScaleAutoScaledNodePool(clusterID, mpName, minReplica, maxReplica, true) + //Expect(err).ToNot(HaveOccurred()) + + By("Scale up a machine pool replicas from 1~3 to 2~5") + upMinReplica := 2 + upMaxReplica := 5 + err = rosaClient.MachinePool.ScaleAutoScaledNodePool(clusterID, mpName, upMinReplica, upMaxReplica, true) + Expect(err).ToNot(HaveOccurred()) + + // Don't check the current replicas when scale down, because it won't change after reducing min_replica + // It only depends on the autoscale strategy when reducing min_replica + By("Scale down a machine pool replicas from 2~5 to 1~2") + downMinReplica := 1 + downMaxReplica := 2 + err = rosaClient.MachinePool.ScaleAutoScaledNodePool(clusterID, mpName, downMinReplica, downMaxReplica, false) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should raise error message with the invalid parameters [id:60278]", labels.Critical, labels.Runtime.Day2, func() { + instanceType := constants.M52XLarge + By("Create machinepool with" + " instance " + instanceType + " and enable autoscale") + + mpPrefix := "autoscale" + mpName := common.GenerateRandomName(mpPrefix, 2) + minReplica := 1 + maxReplica := 2 + _, err := rosaClient.MachinePool.CreateMachinePool(clusterID, mpName, + "--instance-type", instanceType, + "--enable-autoscaling", + "--min-replicas", fmt.Sprintf("%v", minReplica), + "--max-replicas", fmt.Sprintf("%v", maxReplica), + "-y", + ) + Expect(err).ToNot(HaveOccurred()) + defer rosaClient.MachinePool.DeleteMachinePool(clusterID, mpName) + + By("Scale down a machine pool min replica to 0") + zeroMinReplica := 0 + _, err = rosaClient.MachinePool.EditMachinePool(clusterID, mpName, + "--min-replicas", fmt.Sprintf("%v", zeroMinReplica), + "-y", + ) + expectErrMsg := "The number of machine pool min-replicas needs to be a non-negative integer" + Expect(err.Error()).Should(ContainSubstring(expectErrMsg)) + + By("Scale up a machine pool max replica too large") + moreMaxReplica := 1000 + _, err = rosaClient.MachinePool.EditMachinePool(clusterID, mpName, + "--max-replicas", fmt.Sprintf("%v", moreMaxReplica), + "-y", + ) + expectErrMsg = "exceeds the maximum allowed" + Expect(err.Error()).Should(ContainSubstring(expectErrMsg)) + + By("Scale a machine pool min replica > max replica") + _, err = rosaClient.MachinePool.EditMachinePool(clusterID, mpName, + "--min-replicas", fmt.Sprintf("%v", "5"), + "--max-replicas", fmt.Sprintf("%v", "3"), + "-y", + ) + expectErrMsg = "min-replicas needs to be less than the number of machine pool max-replicas" + Expect(err.Error()).Should(ContainSubstring(expectErrMsg)) + + By("Scale down a machine pool min replica to -1") + downMinReplica := -1 + _, err = rosaClient.MachinePool.EditMachinePool(clusterID, mpName, + "--min-replicas", fmt.Sprintf("%v", downMinReplica), + "-y", + ) + expectErrMsg = "Min replicas must be a non-negative number when autoscaling is set" + Expect(err.Error()).Should(ContainSubstring(expectErrMsg)) + + By("Scale a machine pool with min replica and max replica a char") + _, err = rosaClient.MachinePool.EditMachinePool(clusterID, mpName, + "--min-replicas", fmt.Sprintf("%v", "a"), + "--max-replicas", fmt.Sprintf("%v", "b"), + "-y", + ) + expectErrMsg = "invalid syntax" + Expect(err.Error()).Should(ContainSubstring(expectErrMsg)) + }) + }) }) diff --git a/tests/e2e/test_rosacli_cluster.go b/tests/e2e/test_rosacli_cluster.go index 045b5470c3..85c7cd0bdd 100644 --- a/tests/e2e/test_rosacli_cluster.go +++ b/tests/e2e/test_rosacli_cluster.go @@ -2000,7 +2000,8 @@ var _ = Describe("create/delete operator-roles and oidc-provider to cluster", By("Create one sts cluster in manual mode") rosaClient.Runner.SetDir(dirToClean) clusterNameToClean = "test-43053" - operatorRolePreifx := "opPrefix43053" + // Configure with a random str, which can solve the rerun failure + operatorRolePreifx := common.GenerateRandomName("opPrefix43053", 2) _, err, _ = clusterService.Create( clusterNameToClean, "--sts", "--mode", "manual", diff --git a/tests/e2e/utils.go b/tests/e2e/utils.go index b6beeb2b37..763ff7dda4 100644 --- a/tests/e2e/utils.go +++ b/tests/e2e/utils.go @@ -19,3 +19,8 @@ func SkipTestOnFeature(feature string) { message := fmt.Sprintf("The test profile is not configured with the feature: %s", feature) Skip(message) } + +func SkipNotMultiArch() { + message := fmt.Sprintln("The cluster can not handle multiple CPU architecture") + Skip(message) +} diff --git a/tests/utils/common/constants/cluster.go b/tests/utils/common/constants/cluster.go index df074e48c8..6de4c9d786 100644 --- a/tests/utils/common/constants/cluster.go +++ b/tests/utils/common/constants/cluster.go @@ -2,6 +2,7 @@ package constants import ( "regexp" + "time" ) const ( @@ -57,4 +58,21 @@ var ( // instance type const ( DefaultInstanceType = "m5.xlarge" + + M5XLarge = "m5.xlarge" + M52XLarge = "m5.2xlarge" + M6gXLarge = "m6g.xlarge" +) + +// cpu architecture +const ( + AMD = "amd64" + ARM = "arm64" +) + +// timeout for hcp node pool +const ( + // NodePoolCheckTimeout The timeout may depend on the resource + NodePoolCheckTimeout = 30 * time.Minute + NodePoolCheckPoll = 10 * time.Second ) diff --git a/tests/utils/config/cluster.go b/tests/utils/config/cluster.go index c07011d0d3..31fb80499c 100644 --- a/tests/utils/config/cluster.go +++ b/tests/utils/config/cluster.go @@ -174,3 +174,13 @@ func GetClusterID() (clusterID string) { func getClusterIDENVExisted() string { return os.Getenv("CLUSTER_ID") } + +// IsNodePoolGlobalCheck Get the nodepool global check flag +func IsNodePoolGlobalCheck() bool { + nodePoolGlobalCheck := os.Getenv("CLUSTER_NODE_POOL_GLOBAL_CHECK") + if nodePoolGlobalCheck == "true" { + return true + } else { + return false + } +} diff --git a/tests/utils/exec/rosacli/cluster_service.go b/tests/utils/exec/rosacli/cluster_service.go index 64da8221f6..46f2754277 100644 --- a/tests/utils/exec/rosacli/cluster_service.go +++ b/tests/utils/exec/rosacli/cluster_service.go @@ -29,11 +29,11 @@ type ClusterService interface { EditCluster(clusterID string, flags ...string) (bytes.Buffer, error) InstallLog(clusterID string, flags ...string) (bytes.Buffer, error) UnInstallLog(clusterID string, flags ...string) (bytes.Buffer, error) - IsHostedCPCluster(clusterID string) (bool, error) IsSTSCluster(clusterID string) (bool, error) IsPrivateCluster(clusterID string) (bool, error) IsUsingReusableOIDCConfig(clusterID string) (bool, error) + IsMultiArch(clusterID string) (bool, error) GetClusterVersion(clusterID string) (config.Version, error) IsBYOVPCCluster(clusterID string) (bool, error) IsExternalAuthenticationEnabled(clusterID string) (bool, error) @@ -300,6 +300,15 @@ func (c *clusterService) IsUsingReusableOIDCConfig(clusterID string) (bool, erro return jsonData.DigBool("aws", "sts", "oidc_config", "reusable"), nil } +// IsMultiArch Check if the cluster is multi arch +func (c *clusterService) IsMultiArch(clusterID string) (bool, error) { + jsonData, err := c.GetJSONClusterDescription(clusterID) + if err != nil { + return false, err + } + return jsonData.DigBool("multi_arch_enabled"), nil +} + // Get cluster version func (c *clusterService) GetClusterVersion(clusterID string) (clusterVersion config.Version, err error) { var clusterConfig *config.ClusterConfig diff --git a/tests/utils/exec/rosacli/machinepool_service.go b/tests/utils/exec/rosacli/machinepool_service.go index b4d292d746..9c41d117df 100644 --- a/tests/utils/exec/rosacli/machinepool_service.go +++ b/tests/utils/exec/rosacli/machinepool_service.go @@ -2,10 +2,19 @@ package rosacli import ( "bytes" + "context" + "errors" + "fmt" + "strconv" + "time" "gopkg.in/yaml.v3" - common "github.com/openshift/rosa/tests/utils/common" + "k8s.io/apimachinery/pkg/util/wait" + + "github.com/openshift/rosa/tests/utils/common" + "github.com/openshift/rosa/tests/utils/common/constants" + "github.com/openshift/rosa/tests/utils/config" . "github.com/openshift/rosa/tests/utils/log" ) @@ -21,12 +30,22 @@ type MachinePoolService interface { ReflectMachinePoolList(result bytes.Buffer) (mpl MachinePoolList, err error) ReflectMachinePoolDescription(result bytes.Buffer) (*MachinePoolDescription, error) ListAndReflectMachinePools(clusterID string) (mpl MachinePoolList, err error) + DescribeAndReflectMachinePool(clusterID string, name string) (*MachinePoolDescription, error) ReflectNodePoolList(result bytes.Buffer) (*NodePoolList, error) ListAndReflectNodePools(clusterID string) (*NodePoolList, error) ReflectNodePoolDescription(result bytes.Buffer) (npd *NodePoolDescription, err error) DescribeAndReflectNodePool(clusterID string, name string) (*NodePoolDescription, error) - DescribeAndReflectMachinePool(clusterID string, name string) (*MachinePoolDescription, error) + GetNodePoolAutoScaledReplicas(clusterID string, mpName string) (map[string]int, error) + WaitNodePoolReplicasReady(clusterID string, mpName string, isAutoscale bool, interval, timeout time.Duration) error + ScaleNodePool(clusterID string, mpName string, updateReplicas int, waitForNPInstancesReady bool) error + ScaleAutoScaledNodePool( + clusterID string, + mpName string, + minReplicas int, + maxReplicas int, + waitForNPInstancesReady bool, + ) error RetrieveHelpForCreate() (bytes.Buffer, error) RetrieveHelpForEdit() (bytes.Buffer, error) @@ -103,9 +122,11 @@ type NodePoolList struct { } type NodePoolDescription struct { - ID string `yaml:"ID,omitempty"` - ClusterID string `yaml:"Cluster ID,omitempty"` - AutoScaling string `yaml:"Autoscaling,omitempty"` + ID string `yaml:"ID,omitempty"` + ClusterID string `yaml:"Cluster ID,omitempty"` + AutoScaling string `yaml:"Autoscaling,omitempty"` + // autoscale enabled nodepool return `[]interface{}`, which interface{} here is map[string]string + // autoscale disabled nodepool return `int` DesiredReplicas interface{} `yaml:"Desired replicas,omitempty"` CurrentReplicas string `yaml:"Current replicas,omitempty"` InstanceType string `yaml:"Instance type,omitempty"` @@ -306,6 +327,159 @@ func (m *machinepoolService) ReflectNodePoolDescription(result bytes.Buffer) (*N return npd, err } +// GetNodePoolAutoScaledReplicas Get autoscaled replicas of node pool +func (m *machinepoolService) GetNodePoolAutoScaledReplicas(clusterID string, mpName string) (map[string]int, error) { + mpDesc, err := m.DescribeAndReflectNodePool(clusterID, mpName) + if err != nil { + return nil, err + } + + desiredReplicaList := mpDesc.DesiredReplicas.([]interface{}) + // Parse replicas of autoscaled machine/node pool + replicas, err := parseAutoscaledReplicas(desiredReplicaList) + // For node pool, it has current replicas which will be used to compare. + replicas["Current replicas"], _ = strconv.Atoi(fmt.Sprintf("%v", mpDesc.CurrentReplicas)) + return replicas, err +} + +// Parse replicas(Min replicas and Max replicas) of autoscaled machine/node pool +func parseAutoscaledReplicas(desiredReplicaList []interface{}) (map[string]int, error) { + // Parse replicas of autoscaled machine pool + replicas := make(map[string]int) + for _, data := range desiredReplicaList { + valMap := data.(map[string]interface{}) + for key, value := range valMap { + replica, err := strconv.Atoi(fmt.Sprintf("%v", value)) + if err != nil { + return nil, err + } + replicas[key] = replica + } + } + + return replicas, nil +} + +// WaitNodePoolReplicasReady Wait node pool replicas ready +func (m *machinepoolService) WaitNodePoolReplicasReady( + clusterID string, + mpName string, + isAutoscale bool, + interval, timeout time.Duration, +) error { + err := wait.PollUntilContextTimeout( + context.Background(), + interval, + timeout, + true, + func(context.Context) (bool, error) { + if isAutoscale { + replicas, err := m.GetNodePoolAutoScaledReplicas(clusterID, mpName) + if err != nil { + return false, err + } + + if replicas["Current replicas"] == replicas["Min replicas"] { + return true, nil + } + + } else { + mpDesc, err := m.DescribeAndReflectNodePool(clusterID, mpName) + if err != nil { + return false, err + } + + if mpDesc.CurrentReplicas == fmt.Sprintf("%v", mpDesc.DesiredReplicas) { + return true, nil + } + } + return false, nil + }) + return err +} + +// ScaleNodePool Scale node pool and return its check result +func (m *machinepoolService) ScaleNodePool( + clusterID string, + mpName string, + updateReplicas int, + waitForNPInstancesReady bool, +) error { + _, err := m.EditMachinePool(clusterID, mpName, + "--replicas", fmt.Sprintf("%v", updateReplicas), + "-y", + ) + if err != nil { + return err + } + + // Check the machinepool replicas after scale + mpDesc, err := m.DescribeAndReflectNodePool(clusterID, mpName) + if err != nil { + return err + } + + if mpDesc.DesiredReplicas != updateReplicas { + return errors.New("replicas does not match when scaling node pool") + } + + if waitForNPInstancesReady && config.IsNodePoolGlobalCheck() { + // Check current replicas reach the desired replicas after scale + err = m.WaitNodePoolReplicasReady( + clusterID, + mpName, + false, + constants.NodePoolCheckPoll, + constants.NodePoolCheckTimeout, + ) + } + return err +} + +// ScaleAutoScaledNodePool Scale autoscaled node pool and return its check result +func (m *machinepoolService) ScaleAutoScaledNodePool( + clusterID string, + mpName string, + minReplicas int, + maxReplicas int, + waitForNPInstancesReady bool, +) error { + _, err := m.EditMachinePool(clusterID, mpName, + "--enable-autoscaling", + "--min-replicas", fmt.Sprintf("%v", minReplicas), + "--max-replicas", fmt.Sprintf("%v", maxReplicas), + "-y", + ) + if err != nil { + return err + } + + // Check the machinepool min_replica and max_replica after scale + desiredReplicas, err := m.GetNodePoolAutoScaledReplicas(clusterID, mpName) + if err != nil { + return err + } + + if desiredReplicas["Min replicas"] != minReplicas { + return errors.New("min replicas does not match when scaling autoscaled node pool") + } + if desiredReplicas["Max replicas"] != maxReplicas { + return errors.New("max replicas does not match when scaling autoscaled node pool") + } + + if waitForNPInstancesReady && config.IsNodePoolGlobalCheck() { + // Check current replicas reach the min_replica in desired replicas after scale + err = m.WaitNodePoolReplicasReady( + clusterID, + mpName, + true, + constants.NodePoolCheckPoll, + constants.NodePoolCheckTimeout, + ) + } + return err +} + // Get specified nodepool by nodepool id func (npl NodePoolList) Nodepool(id string) (np *NodePool) { for _, npItem := range npl.NodePools {