diff --git a/cmd/list/machinepool/cmd_test.go b/cmd/list/machinepool/cmd_test.go index b8ebd7d25f..6a49c77570 100644 --- a/cmd/list/machinepool/cmd_test.go +++ b/cmd/list/machinepool/cmd_test.go @@ -18,9 +18,9 @@ import ( const ( nodePoolName = "nodepool85" clusterId = "24vf9iitg3p6tlml88iml6j6mu095mh8" - singleNodePoolOutput = "ID AUTOSCALING REPLICAS INSTANCE TYPE LABELS TAINTS" + - " AVAILABILITY ZONE SUBNET VERSION AUTOREPAIR \nnodepool85 No /0" + - " m5.xlarge us-east-1a 4.12.24 No \n" + singleNodePoolOutput = "ID AUTOSCALING REPLICAS INSTANCE TYPE LABELS TAINTS AVAILABILITY ZONE" + + " SUBNET DISK SIZE VERSION AUTOREPAIR \nnodepool85 No /0 m5.xlarge" + + " us-east-1a default 4.12.24 No \n" singleMachinePoolOutput = "ID AUTOSCALING REPLICAS INSTANCE TYPE LABELS TAINTS AVAILABILITY " + "ZONES SUBNETS SPOT INSTANCES DISK SIZE SG IDs\nnodepool85 No 0 " + " m5.xlarge us-east-1a, us-east-1b, us-east-1c " + @@ -33,10 +33,10 @@ const ( "\nnodepool853 Yes 1-100 m5.xlarge test=label test=taint: " + "us-east-1a, us-east-1b, us-east-1c Yes (max $5) default \n" multipleNodePoolsOutput = "ID AUTOSCALING REPLICAS INSTANCE TYPE LABELS TAINTS " + - "AVAILABILITY ZONE SUBNET VERSION AUTOREPAIR \nnodepool85 No /0 " + - "m5.xlarge us-east-1a 4.12.24 No " + - "\nnodepool852 Yes /100-1000 m5.xlarge test=label us-east-1a " + - "4.12.24 No \n" + "AVAILABILITY ZONE SUBNET DISK SIZE VERSION AUTOREPAIR \nnodepool85 No /0 m5.xlarge" + + " us-east-1a default 4.12.24 No \nnodepool852 Yes" + + " /100-1000 m5.xlarge test=label us-east-1a default 4.12.24" + + " No \n" ) var _ = Describe("List machine pool", func() { diff --git a/pkg/machinepool/machinepool.go b/pkg/machinepool/machinepool.go index 6c46c7e93a..0af23ae41b 100644 --- a/pkg/machinepool/machinepool.go +++ b/pkg/machinepool/machinepool.go @@ -1193,9 +1193,9 @@ func getMachinePoolsString(machinePools []*cmv1.MachinePool) string { func getNodePoolsString(nodePools []*cmv1.NodePool) string { outputString := "ID\tAUTOSCALING\tREPLICAS\t" + - "INSTANCE TYPE\tLABELS\t\tTAINTS\t\tAVAILABILITY ZONE\tSUBNET\tVERSION\tAUTOREPAIR\t\n" + "INSTANCE TYPE\tLABELS\t\tTAINTS\t\tAVAILABILITY ZONE\tSUBNET\tDISK SIZE\tVERSION\tAUTOREPAIR\t\n" for _, nodePool := range nodePools { - outputString += fmt.Sprintf("%s\t%s\t%s\t%s\t%s\t\t%s\t\t%s\t%s\t%s\t%s\t\n", + outputString += fmt.Sprintf("%s\t%s\t%s\t%s\t%s\t\t%s\t\t%s\t%s\t%s\t%s\t%s\t\n", nodePool.ID(), ocmOutput.PrintNodePoolAutoscaling(nodePool.Autoscaling()), ocmOutput.PrintNodePoolReplicasShort( @@ -1207,6 +1207,7 @@ func getNodePoolsString(nodePools []*cmv1.NodePool) string { ocmOutput.PrintTaints(nodePool.Taints()), nodePool.AvailabilityZone(), nodePool.Subnet(), + ocmOutput.PrintNodePoolDiskSize(nodePool.AWSNodePool()), ocmOutput.PrintNodePoolVersion(nodePool.Version()), ocmOutput.PrintNodePoolAutorepair(nodePool.AutoRepair()), ) diff --git a/pkg/machinepool/machinepool_test.go b/pkg/machinepool/machinepool_test.go index 7736e287a2..f7f8346e6f 100644 --- a/pkg/machinepool/machinepool_test.go +++ b/pkg/machinepool/machinepool_test.go @@ -149,14 +149,15 @@ var _ = Describe("Machinepool and nodepool", func() { clusterBuilder := cmv1.NewCluster().ID("test").State(cmv1.ClusterStateReady). Hypershift(cmv1.NewHypershift().Enabled(true)).NodePools(cmv1.NewNodePoolList(). Items(cmv1.NewNodePool().ID("np").Replicas(8).AvailabilityZone("az"). + AWSNodePool(cmv1.NewAWSNodePool().RootVolume(cmv1.NewAWSVolume().Size(256))). Subnet("sn").Version(cmv1.NewVersion().ID("1")).AutoRepair(false))) cluster, err := clusterBuilder.Build() Expect(err).ToNot(HaveOccurred()) out := getNodePoolsString(cluster.NodePools().Slice()) Expect(err).ToNot(HaveOccurred()) Expect(out).To(Equal(fmt.Sprintf("ID\tAUTOSCALING\tREPLICAS\t"+ - "INSTANCE TYPE\tLABELS\t\tTAINTS\t\tAVAILABILITY ZONE\tSUBNET\tVERSION\tAUTOREPAIR\t\n"+ - "%s\t%s\t%s\t%s\t%s\t\t%s\t\t%s\t%s\t%s\t%s\t\n", + "INSTANCE TYPE\tLABELS\t\tTAINTS\t\tAVAILABILITY ZONE\tSUBNET\tDISK SIZE\tVERSION\tAUTOREPAIR\t\n"+ + "%s\t%s\t%s\t%s\t%s\t\t%s\t\t%s\t%s\t%s\t%s\t%s\t\n", cluster.NodePools().Get(0).ID(), ocmOutput.PrintNodePoolAutoscaling(cluster.NodePools().Get(0).Autoscaling()), ocmOutput.PrintNodePoolReplicasShort( @@ -169,6 +170,7 @@ var _ = Describe("Machinepool and nodepool", func() { ocmOutput.PrintTaints(cluster.NodePools().Get(0).Taints()), cluster.NodePools().Get(0).AvailabilityZone(), cluster.NodePools().Get(0).Subnet(), + ocmOutput.PrintNodePoolDiskSize(cluster.NodePools().Get(0).AWSNodePool()), ocmOutput.PrintNodePoolVersion(cluster.NodePools().Get(0).Version()), ocmOutput.PrintNodePoolAutorepair(cluster.NodePools().Get(0).AutoRepair())))) }) diff --git a/pkg/ocm/output/nodepools.go b/pkg/ocm/output/nodepools.go index 112d1b24b8..24687c4b6a 100644 --- a/pkg/ocm/output/nodepools.go +++ b/pkg/ocm/output/nodepools.go @@ -22,6 +22,7 @@ import ( cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" + "github.com/openshift/rosa/pkg/helper" "github.com/openshift/rosa/pkg/ocm" "github.com/openshift/rosa/pkg/output" ) @@ -134,3 +135,15 @@ func PrintNodePoolManagementUpgrade(upgrade *cmv1.NodePoolManagementUpgrade) str return "" } + +func PrintNodePoolDiskSize(aws *cmv1.AWSNodePool) string { + diskSizeStr := "default" + if aws != nil && aws.RootVolume() != nil { + diskSize, ok := aws.RootVolume().GetSize() + if ok { + diskSizeStr = helper.GigybyteStringer(diskSize) + } + } + + return diskSizeStr +} diff --git a/tests/utils/exec/rosacli/machinepool_service.go b/tests/utils/exec/rosacli/machinepool_service.go index 9c41d117df..cdda6019fd 100644 --- a/tests/utils/exec/rosacli/machinepool_service.go +++ b/tests/utils/exec/rosacli/machinepool_service.go @@ -111,6 +111,7 @@ type NodePool struct { Taints string `json:"TAINTS,omitempty"` AvalaiblityZones string `json:"AVAILABILITY ZONES,omitempty"` Subnet string `json:"SUBNET,omitempty"` + DiskSize string `json:"DISK SIZE,omitempty"` Version string `json:"VERSION,omitempty"` AutoRepair string `json:"AUTOREPAIR,omitempty"` TuningConfigs string `json:"TUNING CONFIGS,omitempty"` @@ -145,6 +146,7 @@ type NodePoolDescription struct { ScheduledUpgrade string `yaml:"Scheduled upgrade,omitempty"` AdditionalSecurityGroupIDs string `yaml:"Additional security group IDs,omitempty"` NodeDrainGracePeriod string `yaml:"Node drain grace period,omitempty"` + DiskSize string `yaml:"Disk size,omitempty"` } // Create MachinePool