Skip to content

Commit 8bb7bac

Browse files
committed
OCM-14136 | feat: Add UX informational msgs when create/nodepool
1 parent dcebaa4 commit 8bb7bac

File tree

4 files changed

+109
-2
lines changed

4 files changed

+109
-2
lines changed

cmd/describe/autoscaler/cmd.go

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,11 @@ func DescribeAutoscalerRunner() rosa.CommandRunner {
6262
if output.HasFlag() {
6363
output.Print(autoscaler)
6464
} else {
65-
fmt.Print(clusterautoscaler.PrintAutoscaler(autoscaler))
65+
if cluster.Hypershift().Enabled() {
66+
fmt.Print(clusterautoscaler.PrintHypershiftAutoscaler(autoscaler))
67+
} else {
68+
fmt.Print(clusterautoscaler.PrintAutoscaler(autoscaler))
69+
}
6670
}
6771
return nil
6872
}

pkg/clusterautoscaler/output.go

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -87,3 +87,25 @@ func PrintAutoscaler(a *cmv1.ClusterAutoscaler) string {
8787

8888
return out
8989
}
90+
91+
func PrintHypershiftAutoscaler(a *cmv1.ClusterAutoscaler) string {
92+
93+
out := "\n"
94+
95+
if a.MaxNodeProvisionTime() != "" {
96+
out += fmt.Sprintf("Maximum Node Provision Time: %s\n",
97+
a.MaxNodeProvisionTime())
98+
}
99+
100+
out += fmt.Sprintf("Maximum Pod Grace Period: %d\n",
101+
a.MaxPodGracePeriod())
102+
out += fmt.Sprintf("Pod Priority Threshold: %d\n",
103+
a.PodPriorityThreshold())
104+
105+
//Resource Limits
106+
out += "Resource Limits:\n"
107+
out += fmt.Sprintf(" - Maximum Nodes: %d\n",
108+
a.ResourceLimits().MaxNodesTotal())
109+
110+
return out
111+
}

pkg/machinepool/helper.go

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,10 @@ import (
2121
"github.com/openshift/rosa/pkg/rosa"
2222
)
2323

24+
const (
25+
hcpMaxNodesLimit = 500
26+
)
27+
2428
type ReplicaSizeValidation struct {
2529
MinReplicas int
2630
ClusterVersion string
@@ -316,7 +320,6 @@ func getSubnetFromAvailabilityZone(cmd *cobra.Command, r *rosa.Runtime, isAvaila
316320

317321
// temporary fn until calculated default values can be retrieved from single source of truth
318322
func validateClusterVersionWithMaxNodesLimit(clusterVersion string, replicas int, isHostedCp bool) error {
319-
hcpMaxNodesLimit := 500
320323
if isHostedCp {
321324
if replicas > hcpMaxNodesLimit {
322325
return fmt.Errorf("should provide an integer number less than or equal to '%v'", hcpMaxNodesLimit)

pkg/machinepool/machinepool.go

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,10 @@ import (
3434
"github.com/openshift/rosa/pkg/rosa"
3535
)
3636

37+
const (
38+
clusterAutoscalerLimitMessage = "Cluster Autoscaler limit (MaxNodes)"
39+
)
40+
3741
var fetchMessage string = "Fetching %s '%s' for cluster '%s'"
3842
var notFoundMessage string = "Machine pool '%s' not found"
3943

@@ -948,6 +952,43 @@ func (m *machinePool) CreateNodePools(r *rosa.Runtime, cmd *cobra.Command, clust
948952
}
949953
}
950954

955+
sumOfReplicas := replicas
956+
sumOfMaxReplicas := maxReplicas
957+
sumOfMinReplicas := minReplicas
958+
959+
for _, np := range cluster.NodePools().Items() {
960+
// If autoscaling, calculate min and max, use min and max in separate messages below
961+
autoscaling, ok := np.GetAutoscaling()
962+
if !ok || autoscaling == nil {
963+
npReplicas, ok := np.GetReplicas()
964+
if !ok {
965+
return fmt.Errorf("Failed to get node pool replicas for hosted cluster '%s': %v", clusterKey, err)
966+
}
967+
sumOfReplicas += npReplicas
968+
} else {
969+
sumOfMaxReplicas += autoscaling.MaxReplica()
970+
sumOfMinReplicas += autoscaling.MinReplica()
971+
}
972+
}
973+
974+
// Informational message for cluster autoscaler + scaling out to max nodes
975+
if autoscaling {
976+
r.Reporter.Infof("Scaling max replicas to the maximum allowed value is subject to cluster autoscaler" +
977+
" configuration")
978+
}
979+
980+
// Informational message for sum of replicas > MaxNodesTotal
981+
if sumOfReplicas+sumOfMaxReplicas > hcpMaxNodesLimit {
982+
r.Reporter.Infof("Actual maximum replicas can be lowered, since the replicas defined exceeds "+
983+
"%s", clusterAutoscalerLimitMessage)
984+
}
985+
986+
// Informational message for min-replicas or replicas > MaxNodesTotal
987+
if sumOfReplicas+sumOfMinReplicas > hcpMaxNodesLimit {
988+
r.Reporter.Infof("Actual total nodes in the cluster will be more than the maximum nodes configured " +
989+
"in the cluster autoscaler")
990+
}
991+
951992
if version != "" {
952993
npBuilder.Version(cmv1.NewVersion().ID(version))
953994
}
@@ -1776,6 +1817,43 @@ func editNodePool(cmd *cobra.Command, nodePoolID string,
17761817
}
17771818
}
17781819

1820+
sumOfReplicas := replicas
1821+
sumOfMaxReplicas := maxReplicas
1822+
sumOfMinReplicas := minReplicas
1823+
1824+
for _, np := range cluster.NodePools().Items() {
1825+
// If autoscaling, calculate min and max, use min and max in separate messages below
1826+
autoscaling, ok := np.GetAutoscaling()
1827+
if !ok || autoscaling == nil {
1828+
npReplicas, ok := np.GetReplicas()
1829+
if !ok {
1830+
return fmt.Errorf("Failed to get node pool replicas for hosted cluster '%s': %v", clusterKey, err)
1831+
}
1832+
sumOfReplicas += npReplicas
1833+
} else {
1834+
sumOfMaxReplicas += autoscaling.MaxReplica()
1835+
sumOfMinReplicas += autoscaling.MinReplica()
1836+
}
1837+
}
1838+
1839+
// Informational message for cluster autoscaler + scaling out to max nodes
1840+
if autoscaling {
1841+
r.Reporter.Infof("Scaling max replicas to the maximum allowed value is subject to cluster autoscaler" +
1842+
" configuration")
1843+
}
1844+
1845+
// Informational message for sum of replicas > MaxNodesTotal
1846+
if sumOfReplicas+sumOfMaxReplicas > hcpMaxNodesLimit {
1847+
r.Reporter.Infof("Actual maximum replicas can be lowered, since the replicas defined exceeds "+
1848+
"%s", clusterAutoscalerLimitMessage)
1849+
}
1850+
1851+
// Informational message for min-replicas or replicas > MaxNodesTotal
1852+
if sumOfReplicas+sumOfMinReplicas > hcpMaxNodesLimit {
1853+
r.Reporter.Infof("Actual total nodes in the cluster will be more than the maximum nodes configured " +
1854+
"in the cluster autoscaler")
1855+
}
1856+
17791857
update, err := npBuilder.Build()
17801858
if err != nil {
17811859
return fmt.Errorf("Failed to create machine pool for hosted cluster '%s': %v", clusterKey, err)

0 commit comments

Comments
 (0)