|
| 1 | +/* |
| 2 | +Copyright 2020 The Kubernetes Authors. |
| 3 | +
|
| 4 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | +you may not use this file except in compliance with the License. |
| 6 | +You may obtain a copy of the License at |
| 7 | +
|
| 8 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | +
|
| 10 | +Unless required by applicable law or agreed to in writing, software |
| 11 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | +See the License for the specific language governing permissions and |
| 14 | +limitations under the License. |
| 15 | +*/ |
| 16 | + |
| 17 | +package e2e |
| 18 | + |
| 19 | +import ( |
| 20 | + "context" |
| 21 | + "fmt" |
| 22 | + "maps" |
| 23 | + "os" |
| 24 | + "path/filepath" |
| 25 | + "slices" |
| 26 | + "time" |
| 27 | + |
| 28 | + . "github.com/onsi/ginkgo/v2" |
| 29 | + . "github.com/onsi/gomega" |
| 30 | + corev1 "k8s.io/api/core/v1" |
| 31 | + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" |
| 32 | + "k8s.io/apimachinery/pkg/util/sets" |
| 33 | + "k8s.io/utils/ptr" |
| 34 | + "sigs.k8s.io/controller-runtime/pkg/client" |
| 35 | + |
| 36 | + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" |
| 37 | + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" |
| 38 | + "sigs.k8s.io/cluster-api/test/framework" |
| 39 | + "sigs.k8s.io/cluster-api/test/framework/clusterctl" |
| 40 | + "sigs.k8s.io/cluster-api/util" |
| 41 | +) |
| 42 | + |
| 43 | +// ClusterInPlaceUpdateSpecInput is the input for ClusterInPlaceUpdateSpec. |
| 44 | +type ClusterInPlaceUpdateSpecInput struct { |
| 45 | + E2EConfig *clusterctl.E2EConfig |
| 46 | + ClusterctlConfigPath string |
| 47 | + BootstrapClusterProxy framework.ClusterProxy |
| 48 | + ArtifactFolder string |
| 49 | + SkipCleanup bool |
| 50 | + |
| 51 | + // InfrastructureProvider allows to specify the infrastructure provider to be used when looking for |
| 52 | + // cluster templates. |
| 53 | + // If not set, clusterctl will look at the infrastructure provider installed in the management cluster; |
| 54 | + // if only one infrastructure provider exists, it will be used, otherwise the operation will fail if more than one exists. |
| 55 | + InfrastructureProvider *string |
| 56 | + |
| 57 | + // Flavor, if specified is the template flavor used to create the cluster for testing. |
| 58 | + // If not specified, the default flavor for the selected infrastructure provider is used. |
| 59 | + Flavor *string |
| 60 | + |
| 61 | + // WorkerMachineCount defines number of worker machines to be added to the workload cluster. |
| 62 | + // If not specified, 1 will be used. |
| 63 | + WorkerMachineCount *int64 |
| 64 | + |
| 65 | + // ExtensionConfigName is the name of the ExtensionConfig. Defaults to "in-place-update". |
| 66 | + // This value is provided to clusterctl as "EXTENSION_CONFIG_NAME" variable and can be used to template the |
| 67 | + // name of the ExtensionConfig into the ClusterClass. |
| 68 | + ExtensionConfigName string |
| 69 | + |
| 70 | + // ExtensionServiceNamespace is the namespace where the service for the Runtime Extension is located. |
| 71 | + // Note: This should only be set if a Runtime Extension is used. |
| 72 | + ExtensionServiceNamespace string |
| 73 | + |
| 74 | + // ExtensionServiceNamespace is the name where the service for the Runtime Extension is located. |
| 75 | + // Note: This should only be set if a Runtime Extension is used. |
| 76 | + ExtensionServiceName string |
| 77 | + |
| 78 | + // Allows to inject a function to be run after test namespace is created. |
| 79 | + // If not specified, this is a no-op. |
| 80 | + PostNamespaceCreated func(managementClusterProxy framework.ClusterProxy, workloadClusterNamespace string) |
| 81 | + |
| 82 | + // ClusterctlVariables allows injecting variables to the cluster template. |
| 83 | + // If not specified, this is a no-op. |
| 84 | + ClusterctlVariables map[string]string |
| 85 | +} |
| 86 | + |
| 87 | +// ClusterInPlaceUpdateSpec implements a test for in-place updates. |
| 88 | +// Note: This test works with KCP as it tests the KCP in-place update feature. |
| 89 | +func ClusterInPlaceUpdateSpec(ctx context.Context, inputGetter func() ClusterInPlaceUpdateSpecInput) { |
| 90 | + var ( |
| 91 | + specName = "in-place-update" |
| 92 | + input ClusterInPlaceUpdateSpecInput |
| 93 | + namespace *corev1.Namespace |
| 94 | + cancelWatches context.CancelFunc |
| 95 | + clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult |
| 96 | + ) |
| 97 | + |
| 98 | + BeforeEach(func() { |
| 99 | + Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName) |
| 100 | + input = inputGetter() |
| 101 | + Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName) |
| 102 | + Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName) |
| 103 | + Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) |
| 104 | + Expect(os.MkdirAll(input.ArtifactFolder, 0750)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) |
| 105 | + |
| 106 | + Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion)) |
| 107 | + |
| 108 | + if input.ExtensionServiceNamespace != "" && input.ExtensionServiceName != "" { |
| 109 | + if input.ExtensionConfigName == "" { |
| 110 | + input.ExtensionConfigName = specName |
| 111 | + } |
| 112 | + } |
| 113 | + |
| 114 | + // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. |
| 115 | + namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) |
| 116 | + |
| 117 | + clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) |
| 118 | + }) |
| 119 | + |
| 120 | + It("Should create a workload cluster", func() { |
| 121 | + By("Creating a workload cluster") |
| 122 | + |
| 123 | + infrastructureProvider := clusterctl.DefaultInfrastructureProvider |
| 124 | + if input.InfrastructureProvider != nil { |
| 125 | + infrastructureProvider = *input.InfrastructureProvider |
| 126 | + } |
| 127 | + |
| 128 | + flavor := clusterctl.DefaultFlavor |
| 129 | + if input.Flavor != nil { |
| 130 | + flavor = *input.Flavor |
| 131 | + } |
| 132 | + |
| 133 | + workerMachineCount := ptr.To[int64](1) |
| 134 | + if input.WorkerMachineCount != nil { |
| 135 | + workerMachineCount = input.WorkerMachineCount |
| 136 | + } |
| 137 | + |
| 138 | + clusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6)) |
| 139 | + |
| 140 | + if input.ExtensionServiceNamespace != "" && input.ExtensionServiceName != "" { |
| 141 | + // NOTE: test extension is already deployed in the management cluster. If for any reason in future we want |
| 142 | + // to make this test more self-contained this test should be modified in order to create an additional |
| 143 | + // management cluster; also the E2E test configuration should be modified introducing something like |
| 144 | + // optional:true allowing to define which providers should not be installed by default in |
| 145 | + // a management cluster. |
| 146 | + By("Deploy Test Extension ExtensionConfig") |
| 147 | + |
| 148 | + // In this test we are defaulting all handlers to non-blocking because we don't expect the handlers to block the |
| 149 | + // cluster lifecycle by default. Setting defaultAllHandlersToBlocking to false enforces that the test-extension |
| 150 | + // automatically creates the ConfigMap with non-blocking preloaded responses. |
| 151 | + defaultAllHandlersToBlocking := false |
| 152 | + // select on the current namespace |
| 153 | + // This is necessary so in CI this test doesn't influence other tests by enabling lifecycle hooks |
| 154 | + // in other test namespaces. |
| 155 | + namespaces := []string{namespace.Name} |
| 156 | + extensionConfig := extensionConfig(input.ExtensionConfigName, input.ExtensionServiceNamespace, input.ExtensionServiceName, defaultAllHandlersToBlocking, namespaces...) |
| 157 | + Expect(input.BootstrapClusterProxy.GetClient().Create(ctx, |
| 158 | + extensionConfig)). |
| 159 | + To(Succeed(), "Failed to create the ExtensionConfig") |
| 160 | + } |
| 161 | + |
| 162 | + variables := map[string]string{ |
| 163 | + // This is used to template the name of the ExtensionConfig into the ClusterClass. |
| 164 | + "EXTENSION_CONFIG_NAME": input.ExtensionConfigName, |
| 165 | + } |
| 166 | + maps.Copy(variables, input.ClusterctlVariables) |
| 167 | + |
| 168 | + clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ |
| 169 | + ClusterProxy: input.BootstrapClusterProxy, |
| 170 | + ConfigCluster: clusterctl.ConfigClusterInput{ |
| 171 | + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), |
| 172 | + ClusterctlConfigPath: input.ClusterctlConfigPath, |
| 173 | + ClusterctlVariables: variables, |
| 174 | + KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), |
| 175 | + InfrastructureProvider: infrastructureProvider, |
| 176 | + Flavor: flavor, |
| 177 | + Namespace: namespace.Name, |
| 178 | + ClusterName: clusterName, |
| 179 | + KubernetesVersion: input.E2EConfig.MustGetVariable(KubernetesVersion), |
| 180 | + // ControlPlaneMachineCount is not configurable because it has to be 3 because we want |
| 181 | + // to use scale-in to test in-place updates without any Machine re-creations. |
| 182 | + ControlPlaneMachineCount: ptr.To[int64](3), |
| 183 | + WorkerMachineCount: workerMachineCount, |
| 184 | + }, |
| 185 | + WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), |
| 186 | + WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), |
| 187 | + WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), |
| 188 | + }, clusterResources) |
| 189 | + |
| 190 | + cluster := clusterResources.Cluster |
| 191 | + mgmtClient := input.BootstrapClusterProxy.GetClient() |
| 192 | + |
| 193 | + Byf("Verify Cluster is Available and Machines are Ready before starting in-place updates") |
| 194 | + framework.VerifyClusterAvailable(ctx, framework.VerifyClusterAvailableInput{ |
| 195 | + Getter: mgmtClient, |
| 196 | + Name: clusterResources.Cluster.Name, |
| 197 | + Namespace: clusterResources.Cluster.Namespace, |
| 198 | + }) |
| 199 | + framework.VerifyMachinesReady(ctx, framework.VerifyMachinesReadyInput{ |
| 200 | + Lister: mgmtClient, |
| 201 | + Name: clusterResources.Cluster.Name, |
| 202 | + Namespace: clusterResources.Cluster.Namespace, |
| 203 | + }) |
| 204 | + |
| 205 | + var machineObjectsBeforeInPlaceUpdate machineObjects |
| 206 | + Eventually(func(g Gomega) { |
| 207 | + machineObjectsBeforeInPlaceUpdate = getMachineObjects(ctx, g, mgmtClient, cluster) |
| 208 | + }, 30*time.Second, 1*time.Second).Should(Succeed()) |
| 209 | + |
| 210 | + // Doing multiple in-place updates for additional coverage. |
| 211 | + filePath := "/tmp/test" |
| 212 | + for i, fileContent := range []string{"first in-place update", "second in-place update"} { |
| 213 | + Byf("[%d] Trigger in-place update by modifying the files variable", i) |
| 214 | + |
| 215 | + originalCluster := cluster.DeepCopy() |
| 216 | + // Ensure the files variable is set to the expected value (first remove, then add the variable). |
| 217 | + cluster.Spec.Topology.Variables = slices.DeleteFunc(cluster.Spec.Topology.Variables, func(v clusterv1.ClusterVariable) bool { |
| 218 | + return v.Name == "files" |
| 219 | + }) |
| 220 | + cluster.Spec.Topology.Variables = append(cluster.Spec.Topology.Variables, clusterv1.ClusterVariable{ |
| 221 | + Name: "files", |
| 222 | + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`[{"path":%q,"content":%q}]`, filePath, fileContent))}, |
| 223 | + }) |
| 224 | + Expect(mgmtClient.Patch(ctx, cluster, client.MergeFrom(originalCluster))).To(Succeed()) |
| 225 | + |
| 226 | + var machineObjectsAfterInPlaceUpdate machineObjects |
| 227 | + Eventually(func(g Gomega) { |
| 228 | + // Ensure the in-place update was done. |
| 229 | + framework.VerifyClusterCondition(ctx, framework.VerifyClusterConditionInput{ |
| 230 | + Getter: mgmtClient, |
| 231 | + Name: clusterResources.Cluster.Name, |
| 232 | + Namespace: clusterResources.Cluster.Namespace, |
| 233 | + ConditionType: clusterv1.ClusterControlPlaneMachinesUpToDateCondition, |
| 234 | + }) |
| 235 | + framework.VerifyClusterCondition(ctx, framework.VerifyClusterConditionInput{ |
| 236 | + Getter: mgmtClient, |
| 237 | + Name: clusterResources.Cluster.Name, |
| 238 | + Namespace: clusterResources.Cluster.Namespace, |
| 239 | + ConditionType: clusterv1.ClusterWorkerMachinesUpToDateCondition, |
| 240 | + }) |
| 241 | + for _, kubeadmConfig := range machineObjectsAfterInPlaceUpdate.KubeadmConfigByMachine { |
| 242 | + g.Expect(kubeadmConfig.Spec.Files).To(ContainElement(HaveField("Path", filePath))) |
| 243 | + g.Expect(kubeadmConfig.Spec.Files).To(ContainElement(HaveField("Content", fileContent))) |
| 244 | + } |
| 245 | + |
| 246 | + // Ensure only in-place updates were executed and no Machine was re-created. |
| 247 | + machineObjectsAfterInPlaceUpdate = getMachineObjects(ctx, g, mgmtClient, cluster) |
| 248 | + g.Expect(machineNames(machineObjectsAfterInPlaceUpdate.ControlPlaneMachines)).To(Equal(machineNames(machineObjectsBeforeInPlaceUpdate.ControlPlaneMachines))) |
| 249 | + // TODO(in-place): enable once MD/MS/Machine controller PRs are merged |
| 250 | + // g.Expect(machineNames(machineObjectsAfterInPlaceUpdate.WorkerMachines)).To(Equal(machineNames(machineObjectsBeforeInPlaceUpdate.WorkerMachines))) |
| 251 | + }, input.E2EConfig.GetIntervals(specName, "wait-control-plane")...).Should(Succeed()) |
| 252 | + |
| 253 | + // Update machineObjectsBeforeInPlaceUpdate for the next round of in-place update. |
| 254 | + machineObjectsBeforeInPlaceUpdate = machineObjectsAfterInPlaceUpdate |
| 255 | + } |
| 256 | + |
| 257 | + By("PASSED!") |
| 258 | + }) |
| 259 | + |
| 260 | + AfterEach(func() { |
| 261 | + // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. |
| 262 | + framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ClusterctlConfigPath, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) |
| 263 | + if !input.SkipCleanup { |
| 264 | + if input.ExtensionServiceNamespace != "" && input.ExtensionServiceName != "" { |
| 265 | + Eventually(func() error { |
| 266 | + return input.BootstrapClusterProxy.GetClient().Delete(ctx, extensionConfig(input.ExtensionConfigName, input.ExtensionServiceNamespace, input.ExtensionServiceName, true)) |
| 267 | + }, 10*time.Second, 1*time.Second).Should(Succeed(), "Deleting ExtensionConfig failed") |
| 268 | + } |
| 269 | + } |
| 270 | + }) |
| 271 | +} |
| 272 | + |
| 273 | +type machineObjects struct { |
| 274 | + ControlPlaneMachines []*clusterv1.Machine |
| 275 | + WorkerMachines []*clusterv1.Machine |
| 276 | + |
| 277 | + KubeadmConfigByMachine map[string]*bootstrapv1.KubeadmConfig |
| 278 | +} |
| 279 | + |
| 280 | +// getMachineObjects retrieves Machines and corresponding KubeadmConfigs. |
| 281 | +func getMachineObjects(ctx context.Context, g Gomega, c client.Client, cluster *clusterv1.Cluster) machineObjects { |
| 282 | + res := machineObjects{ |
| 283 | + KubeadmConfigByMachine: map[string]*bootstrapv1.KubeadmConfig{}, |
| 284 | + } |
| 285 | + |
| 286 | + // ControlPlane Machines. |
| 287 | + controlPlaneMachineList := &clusterv1.MachineList{} |
| 288 | + g.Expect(c.List(ctx, controlPlaneMachineList, client.InNamespace(cluster.Namespace), client.MatchingLabels{ |
| 289 | + clusterv1.MachineControlPlaneLabel: "", |
| 290 | + clusterv1.ClusterNameLabel: cluster.Name, |
| 291 | + })).To(Succeed()) |
| 292 | + for _, machine := range controlPlaneMachineList.Items { |
| 293 | + res.ControlPlaneMachines = append(res.ControlPlaneMachines, &machine) |
| 294 | + kubeadmConfig := &bootstrapv1.KubeadmConfig{} |
| 295 | + g.Expect(c.Get(ctx, client.ObjectKey{Namespace: machine.Namespace, Name: machine.Spec.Bootstrap.ConfigRef.Name}, kubeadmConfig)).To(Succeed()) |
| 296 | + res.KubeadmConfigByMachine[machine.Name] = kubeadmConfig |
| 297 | + } |
| 298 | + |
| 299 | + // MachineDeployments Machines. |
| 300 | + machines := framework.GetMachinesByCluster(ctx, framework.GetMachinesByClusterInput{ |
| 301 | + Lister: c, |
| 302 | + ClusterName: cluster.Name, |
| 303 | + Namespace: cluster.Namespace, |
| 304 | + }) |
| 305 | + for _, machine := range machines { |
| 306 | + res.WorkerMachines = append(res.WorkerMachines, &machine) |
| 307 | + kubeadmConfig := &bootstrapv1.KubeadmConfig{} |
| 308 | + g.Expect(c.Get(ctx, client.ObjectKey{Namespace: machine.Namespace, Name: machine.Spec.Bootstrap.ConfigRef.Name}, kubeadmConfig)).To(Succeed()) |
| 309 | + res.KubeadmConfigByMachine[machine.Name] = kubeadmConfig |
| 310 | + } |
| 311 | + |
| 312 | + return res |
| 313 | +} |
| 314 | + |
| 315 | +func machineNames(machines []*clusterv1.Machine) sets.Set[string] { |
| 316 | + ret := sets.Set[string]{} |
| 317 | + for _, m := range machines { |
| 318 | + ret.Insert(m.Name) |
| 319 | + } |
| 320 | + return ret |
| 321 | +} |
0 commit comments