Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions api/core/v1beta2/cluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,15 +82,18 @@ const (

// ClusterTopologyReconciledControlPlaneUpgradePendingReason documents reconciliation of a Cluster topology
// not yet completed because Control Plane is not yet updated to match the desired topology spec.
// Deprecated: please use ClusterUpgrading instead.
ClusterTopologyReconciledControlPlaneUpgradePendingReason = "ControlPlaneUpgradePending"

// ClusterTopologyReconciledMachineDeploymentsCreatePendingReason documents reconciliation of a Cluster topology
// not yet completed because at least one of the MachineDeployments is yet to be created.
// This generally happens because new MachineDeployment creations are held off while the ControlPlane is not stable.
// Deprecated: please use ClusterUpgrading instead.
ClusterTopologyReconciledMachineDeploymentsCreatePendingReason = "MachineDeploymentsCreatePending"

// ClusterTopologyReconciledMachineDeploymentsUpgradePendingReason documents reconciliation of a Cluster topology
// not yet completed because at least one of the MachineDeployments is not yet updated to match the desired topology spec.
// Deprecated: please use ClusterUpgrading instead.
ClusterTopologyReconciledMachineDeploymentsUpgradePendingReason = "MachineDeploymentsUpgradePending"

// ClusterTopologyReconciledMachineDeploymentsUpgradeDeferredReason documents reconciliation of a Cluster topology
Expand All @@ -99,21 +102,29 @@ const (

// ClusterTopologyReconciledMachinePoolsUpgradePendingReason documents reconciliation of a Cluster topology
// not yet completed because at least one of the MachinePools is not yet updated to match the desired topology spec.
// Deprecated: please use ClusterUpgrading instead.
ClusterTopologyReconciledMachinePoolsUpgradePendingReason = "MachinePoolsUpgradePending"

// ClusterTopologyReconciledMachinePoolsCreatePendingReason documents reconciliation of a Cluster topology
// not yet completed because at least one of the MachinePools is yet to be created.
// This generally happens because new MachinePool creations are held off while the ControlPlane is not stable.
// Deprecated: please use ClusterUpgrading instead.
ClusterTopologyReconciledMachinePoolsCreatePendingReason = "MachinePoolsCreatePending"

// ClusterTopologyReconciledMachinePoolsUpgradeDeferredReason documents reconciliation of a Cluster topology
// not yet completed because the upgrade for at least one of the MachinePools has been deferred.
// Deprecated: please use ClusterUpgrading instead.
ClusterTopologyReconciledMachinePoolsUpgradeDeferredReason = "MachinePoolsUpgradeDeferred"

// ClusterTopologyReconciledHookBlockingReason documents reconciliation of a Cluster topology
// not yet completed because at least one of the lifecycle hooks is blocking.
// Deprecated: please use ClusterUpgrading instead.
ClusterTopologyReconciledHookBlockingReason = "LifecycleHookBlocking"

// ClusterTopologyReconciledClusterUpgradingReason documents reconciliation of a Cluster topology
// not yet completed because a cluster upgrade is still in progress.
ClusterTopologyReconciledClusterUpgradingReason = "ClusterUpgrading"

// ClusterTopologyReconciledClusterClassNotReconciledReason documents reconciliation of a Cluster topology not
// yet completed because the ClusterClass has not reconciled yet. If this condition persists there may be an issue
// with the ClusterClass surfaced in the ClusterClass status or controller logs.
Expand Down
10 changes: 10 additions & 0 deletions api/core/v1beta2/v1beta1_condition_consts.go
Original file line number Diff line number Diff line change
Expand Up @@ -302,15 +302,18 @@ const (

// TopologyReconciledControlPlaneUpgradePendingV1Beta1Reason (Severity=Info) documents reconciliation of a Cluster topology
// not yet completed because Control Plane is not yet updated to match the desired topology spec.
// Deprecated: please use ClusterUpgrading instead.
TopologyReconciledControlPlaneUpgradePendingV1Beta1Reason = "ControlPlaneUpgradePending"

// TopologyReconciledMachineDeploymentsCreatePendingV1Beta1Reason (Severity=Info) documents reconciliation of a Cluster topology
// not yet completed because at least one of the MachineDeployments is yet to be created.
// This generally happens because new MachineDeployment creations are held off while the ControlPlane is not stable.
// Deprecated: please use ClusterUpgrading instead.
TopologyReconciledMachineDeploymentsCreatePendingV1Beta1Reason = "MachineDeploymentsCreatePending"

// TopologyReconciledMachineDeploymentsUpgradePendingV1Beta1Reason (Severity=Info) documents reconciliation of a Cluster topology
// not yet completed because at least one of the MachineDeployments is not yet updated to match the desired topology spec.
// Deprecated: please use ClusterUpgrading instead.
TopologyReconciledMachineDeploymentsUpgradePendingV1Beta1Reason = "MachineDeploymentsUpgradePending"

// TopologyReconciledMachineDeploymentsUpgradeDeferredV1Beta1Reason (Severity=Info) documents reconciliation of a Cluster topology
Expand All @@ -319,11 +322,13 @@ const (

// TopologyReconciledMachinePoolsUpgradePendingV1Beta1Reason (Severity=Info) documents reconciliation of a Cluster topology
// not yet completed because at least one of the MachinePools is not yet updated to match the desired topology spec.
// Deprecated: please use ClusterUpgrading instead.
TopologyReconciledMachinePoolsUpgradePendingV1Beta1Reason = "MachinePoolsUpgradePending"

// TopologyReconciledMachinePoolsCreatePendingV1Beta1Reason (Severity=Info) documents reconciliation of a Cluster topology
// not yet completed because at least one of the MachinePools is yet to be created.
// This generally happens because new MachinePool creations are held off while the ControlPlane is not stable.
// Deprecated: please use ClusterUpgrading instead.
TopologyReconciledMachinePoolsCreatePendingV1Beta1Reason = "MachinePoolsCreatePending"

// TopologyReconciledMachinePoolsUpgradeDeferredV1Beta1Reason (Severity=Info) documents reconciliation of a Cluster topology
Expand All @@ -332,8 +337,13 @@ const (

// TopologyReconciledHookBlockingV1Beta1Reason (Severity=Info) documents reconciliation of a Cluster topology
// not yet completed because at least one of the lifecycle hooks is blocking.
// Deprecated: please use ClusterUpgrading instead.
TopologyReconciledHookBlockingV1Beta1Reason = "LifecycleHookBlocking"

// ClusterTopologyReconciledClusterUpgradingV1Beta1Reason documents reconciliation of a Cluster topology
// not yet completed because a cluster upgrade is still in progress.
ClusterTopologyReconciledClusterUpgradingV1Beta1Reason = "ClusterUpgrading"

// TopologyReconciledClusterClassNotReconciledV1Beta1Reason (Severity=Info) documents reconciliation of a Cluster topology not
// yet completed because the ClusterClass has not reconciled yet. If this condition persists there may be an issue
// with the ClusterClass surfaced in the ClusterClass status or controller logs.
Expand Down
38 changes: 31 additions & 7 deletions exp/topology/desiredstate/desired_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package desiredstate

import (
"context"
"fmt"
"maps"
"reflect"
"time"
Expand All @@ -30,6 +31,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/klog/v2"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"

clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1"
Expand Down Expand Up @@ -507,6 +509,8 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf
// The version is calculated using the state of the current machine deployments, the current control plane
// and the version defined in the topology.
func (g *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Scope) (string, error) {
log := ctrl.LoggerFrom(ctx)

topologyVersion := s.Blueprint.Topology.Version
// If we are creating the control plane object (current control plane is nil), use version from topology.
if s.Current.ControlPlane == nil || s.Current.ControlPlane.Object == nil {
Expand Down Expand Up @@ -599,8 +603,7 @@ func (g *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Sco
// Also check if MachineDeployments/MachinePools are already upgrading.
// If the MachineDeployments/MachinePools are upgrading, then do not pick up the next control plane version yet.
// We will pick up the new version after the MachineDeployments/MachinePools finish upgrading.
if len(s.UpgradeTracker.MachineDeployments.UpgradingNames()) > 0 ||
len(s.UpgradeTracker.MachinePools.UpgradingNames()) > 0 {
if s.UpgradeTracker.MachineDeployments.IsAnyUpgrading() || s.UpgradeTracker.MachinePools.IsAnyUpgrading() {
return *currentVersion, nil
}

Expand Down Expand Up @@ -692,6 +695,11 @@ func (g *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Sco
s.UpgradeTracker.ControlPlane.IsStartingUpgrade = true
s.UpgradeTracker.ControlPlane.IsPendingUpgrade = false

log.Info(fmt.Sprintf("Control plane %s upgraded from version %s to version %s", klog.KObj(s.Current.ControlPlane.Object), *currentVersion, nextVersion),
"ControlPlaneUpgrades", toUpgradeStep(s.UpgradeTracker.ControlPlane.UpgradePlan),
"WorkersUpgrades", toUpgradeStep(s.UpgradeTracker.MachineDeployments.UpgradePlan, s.UpgradeTracker.MachinePools.UpgradePlan),
s.Current.ControlPlane.Object.GetKind(), klog.KObj(s.Current.ControlPlane.Object),
)
return nextVersion, nil
}

Expand Down Expand Up @@ -857,7 +865,7 @@ func (g *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope
// Add ClusterTopologyMachineDeploymentLabel to the generated InfrastructureMachine template
infraMachineTemplateLabels[clusterv1.ClusterTopologyMachineDeploymentNameLabel] = machineDeploymentTopology.Name
desiredMachineDeployment.InfrastructureMachineTemplate.SetLabels(infraMachineTemplateLabels)
version, err := g.computeMachineDeploymentVersion(s, machineDeploymentTopology, currentMachineDeployment)
version, err := g.computeMachineDeploymentVersion(ctx, s, machineDeploymentTopology, currentMachineDeployment)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -1039,7 +1047,9 @@ func (g *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope
// computeMachineDeploymentVersion calculates the version of the desired machine deployment.
// The version is calculated using the state of the current machine deployments,
// the current control plane and the version defined in the topology.
func (g *generator) computeMachineDeploymentVersion(s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology, currentMDState *scope.MachineDeploymentState) (string, error) {
func (g *generator) computeMachineDeploymentVersion(ctx context.Context, s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology, currentMDState *scope.MachineDeploymentState) (string, error) {
log := ctrl.LoggerFrom(ctx)

topologyVersion := s.Blueprint.Topology.Version
// If creating a new machine deployment, mark it as pending if the control plane is not
// yet stable. Creating a new MD while the control plane is upgrading can lead to unexpected race conditions.
Expand Down Expand Up @@ -1111,6 +1121,12 @@ func (g *generator) computeMachineDeploymentVersion(s *scope.Scope, machineDeplo
s.UpgradeTracker.MachineDeployments.MarkUpgrading(currentMDState.Object.Name)

nextVersion := s.UpgradeTracker.MachineDeployments.UpgradePlan[0]

log.Info(fmt.Sprintf("MachineDeployment %s upgraded from version %s to version %s", klog.KObj(currentMDState.Object), currentVersion, nextVersion),
"ControlPlaneUpgrades", toUpgradeStep(s.UpgradeTracker.ControlPlane.UpgradePlan),
"WorkersUpgrades", toUpgradeStep(s.UpgradeTracker.MachineDeployments.UpgradePlan, s.UpgradeTracker.MachinePools.UpgradePlan),
"MachineDeployment", klog.KObj(currentMDState.Object),
)
return nextVersion, nil
}

Expand Down Expand Up @@ -1165,7 +1181,7 @@ func (g *generator) computeMachinePools(ctx context.Context, s *scope.Scope) (sc
// computeMachinePool computes the desired state for a MachinePoolTopology.
// The generated machinePool object is calculated using the values from the machinePoolTopology and
// the machinePool class.
func (g *generator) computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology) (*scope.MachinePoolState, error) {
func (g *generator) computeMachinePool(ctx context.Context, s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology) (*scope.MachinePoolState, error) {
desiredMachinePool := &scope.MachinePoolState{}

// Gets the blueprint for the MachinePool class.
Expand Down Expand Up @@ -1243,7 +1259,7 @@ func (g *generator) computeMachinePool(_ context.Context, s *scope.Scope, machin
// Add ClusterTopologyMachinePoolLabel to the generated InfrastructureMachinePool object
infraMachinePoolObjectLabels[clusterv1.ClusterTopologyMachinePoolNameLabel] = machinePoolTopology.Name
desiredMachinePool.InfrastructureMachinePoolObject.SetLabels(infraMachinePoolObjectLabels)
version, err := g.computeMachinePoolVersion(s, machinePoolTopology, currentMachinePool)
version, err := g.computeMachinePoolVersion(ctx, s, machinePoolTopology, currentMachinePool)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -1359,7 +1375,9 @@ func (g *generator) computeMachinePool(_ context.Context, s *scope.Scope, machin
// computeMachinePoolVersion calculates the version of the desired machine pool.
// The version is calculated using the state of the current machine pools,
// the current control plane and the version defined in the topology.
func (g *generator) computeMachinePoolVersion(s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology, currentMPState *scope.MachinePoolState) (string, error) {
func (g *generator) computeMachinePoolVersion(ctx context.Context, s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology, currentMPState *scope.MachinePoolState) (string, error) {
log := ctrl.LoggerFrom(ctx)

topologyVersion := s.Blueprint.Topology.Version
// If creating a new machine pool, mark it as pending if the control plane is not
// yet stable. Creating a new MP while the control plane is upgrading can lead to unexpected race conditions.
Expand Down Expand Up @@ -1431,6 +1449,12 @@ func (g *generator) computeMachinePoolVersion(s *scope.Scope, machinePoolTopolog
s.UpgradeTracker.MachinePools.MarkUpgrading(currentMPState.Object.Name)

nextVersion := s.UpgradeTracker.MachinePools.UpgradePlan[0]

log.Info(fmt.Sprintf("MachinePool %s upgraded from version %s to version %s", klog.KObj(currentMPState.Object), currentVersion, nextVersion),
"ControlPlaneUpgrades", toUpgradeStep(s.UpgradeTracker.ControlPlane.UpgradePlan),
"WorkersUpgrades", toUpgradeStep(s.UpgradeTracker.MachineDeployments.UpgradePlan, s.UpgradeTracker.MachinePools.UpgradePlan),
"MachinePool", klog.KObj(currentMPState.Object),
)
return nextVersion, nil
}

Expand Down
34 changes: 16 additions & 18 deletions exp/topology/desiredstate/desired_state_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1062,11 +1062,13 @@ func TestComputeControlPlaneVersion(t *testing.T) {

catalog := runtimecatalog.New()
_ = runtimehooksv1.AddToCatalog(catalog)
beforeClusterUpgradeGVH, _ := catalog.GroupVersionHook(runtimehooksv1.BeforeClusterUpgrade)
beforeControlPlaneUpgradeGVH, _ := catalog.GroupVersionHook(runtimehooksv1.BeforeControlPlaneUpgrade)
afterControlPlaneUpgradeGVH, _ := catalog.GroupVersionHook(runtimehooksv1.AfterControlPlaneUpgrade)
beforeWorkersUpgradeGVH, _ := catalog.GroupVersionHook(runtimehooksv1.BeforeWorkersUpgrade)
afterWorkersUpgradeGVH, _ := catalog.GroupVersionHook(runtimehooksv1.AfterWorkersUpgrade)
afterClusterUpgradeGVH, _ := catalog.GroupVersionHook(runtimehooksv1.AfterClusterUpgrade)

beforeClusterUpgradeGVH, err := catalog.GroupVersionHook(runtimehooksv1.BeforeClusterUpgrade)
if err != nil {
panic("unable to compute GVH")
}
nonBlockingBeforeClusterUpgradeResponse := &runtimehooksv1.BeforeClusterUpgradeResponse{
CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
CommonResponse: runtimehooksv1.CommonResponse{
Expand All @@ -1090,10 +1092,6 @@ func TestComputeControlPlaneVersion(t *testing.T) {
},
}

beforeControlPlaneUpgradeGVH, err := catalog.GroupVersionHook(runtimehooksv1.BeforeControlPlaneUpgrade)
if err != nil {
panic("unable to compute GVH")
}
nonBlockingBeforeControlPlaneUpgradeResponse := &runtimehooksv1.BeforeControlPlaneUpgradeResponse{
CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
CommonResponse: runtimehooksv1.CommonResponse{
Expand All @@ -1117,10 +1115,6 @@ func TestComputeControlPlaneVersion(t *testing.T) {
},
}

beforeWorkersUpgradeGVH, err := catalog.GroupVersionHook(runtimehooksv1.BeforeWorkersUpgrade)
if err != nil {
panic("unable to compute GVH")
}
nonBlockingBeforeWorkersUpgradeResponse := &runtimehooksv1.BeforeWorkersUpgradeResponse{
CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
CommonResponse: runtimehooksv1.CommonResponse{
Expand All @@ -1144,10 +1138,6 @@ func TestComputeControlPlaneVersion(t *testing.T) {
},
}

afterWorkersUpgradeGVH, err := catalog.GroupVersionHook(runtimehooksv1.AfterWorkersUpgrade)
if err != nil {
panic("unable to compute GVH")
}
nonBlockingAfterWorkersUpgradeResponse := &runtimehooksv1.AfterWorkersUpgradeResponse{
CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
CommonResponse: runtimehooksv1.CommonResponse{
Expand Down Expand Up @@ -1705,6 +1695,14 @@ func TestComputeControlPlaneVersion(t *testing.T) {

runtimeClient := fakeruntimeclient.NewRuntimeClientBuilder().
WithCatalog(catalog).
WithGetAllExtensionResponses(map[runtimecatalog.GroupVersionHook][]string{
beforeClusterUpgradeGVH: {"foo"},
beforeControlPlaneUpgradeGVH: {"foo"},
afterControlPlaneUpgradeGVH: {"foo"},
beforeWorkersUpgradeGVH: {"foo"},
afterWorkersUpgradeGVH: {"foo"},
afterClusterUpgradeGVH: {"foo"},
}).
WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{
beforeClusterUpgradeGVH: tt.beforeClusterUpgradeResponse,
beforeControlPlaneUpgradeGVH: tt.beforeControlPlaneUpgradeResponse,
Expand Down Expand Up @@ -2969,7 +2967,7 @@ func TestComputeMachineDeploymentVersion(t *testing.T) {

e := generator{}

version, err := e.computeMachineDeploymentVersion(s, tt.machineDeploymentTopology, tt.currentMachineDeploymentState)
version, err := e.computeMachineDeploymentVersion(ctx, s, tt.machineDeploymentTopology, tt.currentMachineDeploymentState)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(version).To(Equal(tt.expectedVersion))

Expand Down Expand Up @@ -3214,7 +3212,7 @@ func TestComputeMachinePoolVersion(t *testing.T) {

e := generator{}

version, err := e.computeMachinePoolVersion(s, tt.machinePoolTopology, tt.currentMachinePoolState)
version, err := e.computeMachinePoolVersion(ctx, s, tt.machinePoolTopology, tt.currentMachinePoolState)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(version).To(Equal(tt.expectedVersion))

Expand Down
Loading