diff --git a/api/v1alpha1/kamajicontrolplane_types.go b/api/v1alpha1/kamajicontrolplane_types.go index 702b0fa..d4da70b 100644 --- a/api/v1alpha1/kamajicontrolplane_types.go +++ b/api/v1alpha1/kamajicontrolplane_types.go @@ -8,7 +8,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + capiv1beta2 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // ControlPlaneComponent allows the customization for the given component of the control plane. @@ -105,7 +105,7 @@ type CoreDNSAddonSpec struct { type KamajiControlPlaneSpec struct { KamajiControlPlaneFields `json:",inline"` // ControlPlaneEndpoint propagates the endpoint the Kubernetes API Server managed by Kamaji is located. - ControlPlaneEndpoint capiv1beta1.APIEndpoint `json:"controlPlaneEndpoint,omitempty"` + ControlPlaneEndpoint capiv1beta2.APIEndpoint `json:"controlPlaneEndpoint,omitempty"` // Number of desired replicas for the given TenantControlPlane. // Defaults to 2. // +kubebuilder:default=2 diff --git a/api/v1alpha1/kamajicontrolplanetemplate_types.go b/api/v1alpha1/kamajicontrolplanetemplate_types.go index 1fd328e..2cd0219 100644 --- a/api/v1alpha1/kamajicontrolplanetemplate_types.go +++ b/api/v1alpha1/kamajicontrolplanetemplate_types.go @@ -5,7 +5,7 @@ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // KamajiControlPlaneTemplateSpec defines the desired state of KamajiControlPlaneTemplate. diff --git a/config/control-plane-components.yaml b/config/control-plane-components.yaml index 64fc7d6..1ada92c 100644 --- a/config/control-plane-components.yaml +++ b/config/control-plane-components.yaml @@ -493,18 +493,19 @@ spec: type: object controlPlaneEndpoint: description: ControlPlaneEndpoint propagates the endpoint the Kubernetes API Server managed by Kamaji is located. + minProperties: 1 properties: host: description: host is the hostname on which the API server is serving. maxLength: 512 + minLength: 1 type: string port: description: port is the port on which the API server is serving. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - host - - port type: object controllerManager: description: ControlPlaneComponent allows the customization for the given component of the control plane. @@ -14093,6 +14094,14 @@ rules: - patch - update - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch - apiGroups: - cluster.x-k8s.io resources: diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_kamajicontrolplanes.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_kamajicontrolplanes.yaml index e4c950e..21e1588 100644 --- a/config/crd/bases/controlplane.cluster.x-k8s.io_kamajicontrolplanes.yaml +++ b/config/crd/bases/controlplane.cluster.x-k8s.io_kamajicontrolplanes.yaml @@ -493,18 +493,19 @@ spec: controlPlaneEndpoint: description: ControlPlaneEndpoint propagates the endpoint the Kubernetes API Server managed by Kamaji is located. + minProperties: 1 properties: host: description: host is the hostname on which the API server is serving. maxLength: 512 + minLength: 1 type: string port: description: port is the port on which the API server is serving. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - host - - port type: object controllerManager: description: ControlPlaneComponent allows the customization for the diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 2740b71..dd073d1 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -15,6 +15,14 @@ rules: - patch - update - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch - apiGroups: - cluster.x-k8s.io resources: diff --git a/controllers/kamajicontrolplane_controller.go b/controllers/kamajicontrolplane_controller.go index b606a44..9467574 100644 --- a/controllers/kamajicontrolplane_controller.go +++ b/controllers/kamajicontrolplane_controller.go @@ -18,8 +18,9 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/util/retry" "k8s.io/component-base/featuregate" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + capiv1beta2 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/annotations" + conditionsapi "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -80,18 +81,18 @@ func (r *KamajiControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R } // Retrieving the Cluster information - cluster := capiv1beta1.Cluster{} + cluster := capiv1beta2.Cluster{} cluster.SetName(kcp.GetOwnerReferences()[0].Name) cluster.SetNamespace(kcp.GetNamespace()) if err = r.client.Get(ctx, types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}, &cluster); err != nil { if errors.IsNotFound(err) { - log.Info("capiv1beta1.Cluster resource may have been deleted, withdrawing reconciliation") + log.Info("capiv1beta2.Cluster resource may have been deleted, withdrawing reconciliation") return ctrl.Result{}, nil } - log.Error(err, "unable to get capiv1beta1.Cluster") + log.Error(err, "unable to get capiv1beta2.Cluster") return ctrl.Result{}, err //nolint:wrapcheck } @@ -199,12 +200,12 @@ func (r *KamajiControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R // check that happens latter will never succeed. if err = r.client.Get(ctx, types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}, &cluster); err != nil { if errors.IsNotFound(err) { - log.Info("capiv1beta1.Cluster resource may have been deleted, withdrawing reconciliation") + log.Info("capiv1beta2.Cluster resource may have been deleted, withdrawing reconciliation") return ctrl.Result{}, nil } - log.Error(err, "unable to get capiv1beta1.Cluster") + log.Error(err, "unable to get capiv1beta2.Cluster") return ctrl.Result{}, err //nolint:wrapcheck } @@ -221,7 +222,7 @@ func (r *KamajiControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R }) if err != nil { - log.Error(err, "cannot patch capiv1beta1.Cluster") + log.Error(err, "cannot patch capiv1beta2.Cluster") return ctrl.Result{}, err } @@ -231,13 +232,13 @@ func (r *KamajiControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R // 1. an assigned Control Plane endpoint // 2. a ready infrastructure if len(cluster.Spec.ControlPlaneEndpoint.Host) == 0 { - log.Info("capiv1beta1.Cluster Control Plane endpoint still unprocessed, enqueuing back") + log.Info("capiv1beta2.Cluster Control Plane endpoint still unprocessed, enqueuing back") return ctrl.Result{RequeueAfter: time.Second}, nil } - if !cluster.Status.InfrastructureReady { - log.Info("capiv1beta1.Cluster infrastructure is not yet ready, enqueuing back") + if conditionsapi.IsFalse(&cluster, capiv1beta2.InfrastructureReadyCondition) { + log.Info("capiv1beta2.Cluster infrastructure is not yet ready, enqueuing back") return ctrl.Result{RequeueAfter: time.Second}, nil } diff --git a/controllers/kamajicontrolplane_controller_cluster_patch.go b/controllers/kamajicontrolplane_controller_cluster_patch.go index 35bfbed..1377eef 100644 --- a/controllers/kamajicontrolplane_controller_cluster_patch.go +++ b/controllers/kamajicontrolplane_controller_cluster_patch.go @@ -12,9 +12,9 @@ import ( "github.com/pkg/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + capiv1beta2 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" @@ -60,7 +60,7 @@ func (r *KamajiControlPlaneReconciler) patchControlPlaneEndpoint(ctx context.Con return errors.Wrap(scopedErr, "cannot retrieve *v1alpha1.KamajiControlPlane") } - controlPlane.Spec.ControlPlaneEndpoint = capiv1beta1.APIEndpoint{ + controlPlane.Spec.ControlPlaneEndpoint = capiv1beta2.APIEndpoint{ Host: endpoint, Port: int32(port), //nolint:gosec } @@ -73,10 +73,12 @@ func (r *KamajiControlPlaneReconciler) patchControlPlaneEndpoint(ctx context.Con return nil } +//+kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get;list;watch + //nolint:cyclop -func (r *KamajiControlPlaneReconciler) patchCluster(ctx context.Context, cluster capiv1beta1.Cluster, controlPlane *v1alpha1.KamajiControlPlane, hostPort string) error { - if cluster.Spec.InfrastructureRef == nil { - return errors.New("capiv1beta1.Cluster has no InfrastructureRef") +func (r *KamajiControlPlaneReconciler) patchCluster(ctx context.Context, cluster capiv1beta2.Cluster, controlPlane *v1alpha1.KamajiControlPlane, hostPort string) error { + if !cluster.Spec.InfrastructureRef.IsDefined() { + return errors.New("capiv1beta2.Cluster has no InfrastructureRef") } endpoint, port, err := r.controlPlaneEndpoint(controlPlane, hostPort) @@ -121,7 +123,7 @@ func (r *KamajiControlPlaneReconciler) patchCluster(ctx context.Context, cluster //+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=proxmoxclusters;vsphereclusters;tinkerbellclusters,verbs=get;list;watch //+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=proxmoxclusters;vsphereclusters;tinkerbellclusters,verbs=patch -func (r *KamajiControlPlaneReconciler) checkOrPatchGenericCluster(ctx context.Context, cluster capiv1beta1.Cluster, endpoint string, port int64) error { +func (r *KamajiControlPlaneReconciler) checkOrPatchGenericCluster(ctx context.Context, cluster capiv1beta2.Cluster, endpoint string, port int64) error { if err := r.checkGenericCluster(ctx, cluster, endpoint, port); err != nil { if errors.As(err, &UnmanagedControlPlaneAddressError{}) { return r.patchGenericCluster(ctx, cluster, endpoint, port, false) @@ -136,18 +138,13 @@ func (r *KamajiControlPlaneReconciler) checkOrPatchGenericCluster(ctx context.Co //+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsclusters;azureclusters;hetznerclusters;kubevirtclusters;nutanixclusters;packetclusters;ionoscloudclusters,verbs=patch;get;list;watch //+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=kubevirtclusters/status;nutanixclusters/status;packetclusters/status,verbs=patch -func (r *KamajiControlPlaneReconciler) patchGenericCluster(ctx context.Context, cluster capiv1beta1.Cluster, endpoint string, port int64, patchStatus bool) error { - infraCluster := unstructured.Unstructured{} - - infraCluster.SetGroupVersionKind(cluster.Spec.InfrastructureRef.GroupVersionKind()) - infraCluster.SetName(cluster.Spec.InfrastructureRef.Name) - infraCluster.SetNamespace(cluster.Spec.InfrastructureRef.Namespace) - - if err := r.client.Get(ctx, types.NamespacedName{Name: infraCluster.GetName(), Namespace: infraCluster.GetNamespace()}, &infraCluster); err != nil { - return errors.Wrap(err, fmt.Sprintf("cannot retrieve the %s resource", infraCluster.GetKind())) +func (r *KamajiControlPlaneReconciler) patchGenericCluster(ctx context.Context, cluster capiv1beta2.Cluster, endpoint string, port int64, patchStatus bool) error { + infraCluster, err := external.GetObjectFromContractVersionedRef(ctx, r.client, cluster.Spec.InfrastructureRef, cluster.GetNamespace()) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("cannot get infrastructure reference %s", cluster.Spec.InfrastructureRef.Name)) } - patchHelper, err := patch.NewHelper(&infraCluster, r.client) + patchHelper, err := patch.NewHelper(infraCluster, r.client) if err != nil { return errors.Wrap(err, "unable to create patch helper") } @@ -165,7 +162,7 @@ func (r *KamajiControlPlaneReconciler) patchGenericCluster(ctx context.Context, } } - if err = patchHelper.Patch(ctx, &infraCluster); err != nil { + if err = patchHelper.Patch(ctx, infraCluster); err != nil { return errors.Wrap(err, fmt.Sprintf("cannot perform PATCH update for the %s resource", infraCluster.GetKind())) } @@ -174,15 +171,10 @@ func (r *KamajiControlPlaneReconciler) patchGenericCluster(ctx context.Context, //+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=metal3clusters,verbs=get;list;watch -func (r *KamajiControlPlaneReconciler) checkGenericCluster(ctx context.Context, cluster capiv1beta1.Cluster, endpoint string, port int64) error { - gkc := unstructured.Unstructured{} - - gkc.SetGroupVersionKind(cluster.Spec.InfrastructureRef.GroupVersionKind()) - gkc.SetName(cluster.Spec.InfrastructureRef.Name) - gkc.SetNamespace(cluster.Spec.InfrastructureRef.Namespace) - - if err := r.client.Get(ctx, types.NamespacedName{Name: gkc.GetName(), Namespace: gkc.GetNamespace()}, &gkc); err != nil { - return errors.Wrap(err, fmt.Sprintf("cannot retrieve the %s resource", gkc.GetKind())) +func (r *KamajiControlPlaneReconciler) checkGenericCluster(ctx context.Context, cluster capiv1beta2.Cluster, endpoint string, port int64) error { + gkc, err := external.GetObjectFromContractVersionedRef(ctx, r.client, cluster.Spec.InfrastructureRef, cluster.GetNamespace()) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("cannot get infrastructure reference %s", cluster.Spec.InfrastructureRef.Name)) } cpHost, _, err := unstructured.NestedString(gkc.Object, "spec", "controlPlaneEndpoint", "host") @@ -204,11 +196,11 @@ func (r *KamajiControlPlaneReconciler) checkGenericCluster(ctx context.Context, } if cpHost != endpoint { - return fmt.Errorf("the %s cluster has been provisioned with a mismatching host", gkc.GetKind()) + return fmt.Errorf("the %s cluster has been provisioned with a mismatching host %s instead of %s", gkc.GetKind(), cpHost, endpoint) } if cpPort != port { - return fmt.Errorf("the %s cluster has been provisioned with a mismatching port", gkc.GetKind()) + return fmt.Errorf("the %s cluster has been provisioned with a mismatching port %d instead of %d", gkc.GetKind(), cpPort, port) } return nil @@ -216,18 +208,13 @@ func (r *KamajiControlPlaneReconciler) checkGenericCluster(ctx context.Context, //+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=openstackclusters,verbs=patch;get;list;watch -func (r *KamajiControlPlaneReconciler) patchOpenStackCluster(ctx context.Context, cluster capiv1beta1.Cluster, endpoint string, port int64) error { - osc := unstructured.Unstructured{} - - osc.SetGroupVersionKind(cluster.Spec.InfrastructureRef.GroupVersionKind()) - osc.SetName(cluster.Spec.InfrastructureRef.Name) - osc.SetNamespace(cluster.Spec.InfrastructureRef.Namespace) - - if err := r.client.Get(ctx, types.NamespacedName{Name: osc.GetName(), Namespace: osc.GetNamespace()}, &osc); err != nil { - return errors.Wrap(err, fmt.Sprintf("cannot retrieve the %s resource", osc.GetKind())) +func (r *KamajiControlPlaneReconciler) patchOpenStackCluster(ctx context.Context, cluster capiv1beta2.Cluster, endpoint string, port int64) error { + osc, err := external.GetObjectFromContractVersionedRef(ctx, r.client, cluster.Spec.InfrastructureRef, cluster.GetNamespace()) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("cannot get infrastructure reference %s", cluster.Spec.InfrastructureRef.Name)) } - patchHelper, err := patch.NewHelper(&osc, r.client) + patchHelper, err := patch.NewHelper(osc, r.client) if err != nil { return errors.Wrap(err, "unable to create patch helper") } @@ -240,7 +227,7 @@ func (r *KamajiControlPlaneReconciler) patchOpenStackCluster(ctx context.Context return errors.Wrap(err, fmt.Sprintf("unable to set unstructured %s spec apiServerPort", osc.GetKind())) } - if err = patchHelper.Patch(ctx, &osc); err != nil { + if err = patchHelper.Patch(ctx, osc); err != nil { return errors.Wrap(err, "cannot perform PATCH update for the OpenStackCluster resource") } diff --git a/controllers/kamajicontrolplane_controller_resources.go b/controllers/kamajicontrolplane_controller_resources.go index b626258..a0ebb32 100644 --- a/controllers/kamajicontrolplane_controller_resources.go +++ b/controllers/kamajicontrolplane_controller_resources.go @@ -12,7 +12,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + capiv1beta2 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ctrllog "sigs.k8s.io/controller-runtime/pkg/log" @@ -24,7 +24,7 @@ var ErrEnqueueBack = errors.New("enqueue back") //+kubebuilder:rbac:groups="",resources="secrets",verbs=get;list;watch;create;update;patch -func (r *KamajiControlPlaneReconciler) createRequiredResources(ctx context.Context, remoteClient client.Client, cluster capiv1beta1.Cluster, kcp v1alpha1.KamajiControlPlane, tcp *kamajiv1alpha1.TenantControlPlane) error { +func (r *KamajiControlPlaneReconciler) createRequiredResources(ctx context.Context, remoteClient client.Client, cluster capiv1beta2.Cluster, kcp v1alpha1.KamajiControlPlane, tcp *kamajiv1alpha1.TenantControlPlane) error { log := ctrllog.FromContext(ctx) // Creating a kubeconfig secret for the workload cluster. if secretName := tcp.Status.KubeConfig.Admin.SecretName; len(secretName) == 0 { @@ -64,7 +64,7 @@ func (r *KamajiControlPlaneReconciler) createRequiredResources(ctx context.Conte // also in regard to the naming conventions according to the Cluster API contracts about Kubeconfig. // // more info: https://cluster-api.sigs.k8s.io/developer/architecture/controllers/cluster.html#secrets -func (r *KamajiControlPlaneReconciler) createOrUpdateCertificateAuthority(ctx context.Context, reader client.Client, cluster capiv1beta1.Cluster, kcp v1alpha1.KamajiControlPlane, tcp *kamajiv1alpha1.TenantControlPlane) error { +func (r *KamajiControlPlaneReconciler) createOrUpdateCertificateAuthority(ctx context.Context, reader client.Client, cluster capiv1beta2.Cluster, kcp v1alpha1.KamajiControlPlane, tcp *kamajiv1alpha1.TenantControlPlane) error { capiCA := &corev1.Secret{} capiCA.Name = cluster.Name + "-ca" capiCA.Namespace = cluster.Namespace @@ -99,7 +99,7 @@ func (r *KamajiControlPlaneReconciler) createOrUpdateCertificateAuthority(ctx co labels = map[string]string{} } - labels[capiv1beta1.ClusterNameLabel] = cluster.Name + labels[capiv1beta2.ClusterNameLabel] = cluster.Name labels["kamaji.clastix.io/component"] = "capi" labels["kamaji.clastix.io/secret"] = "ca" labels["kamaji.clastix.io/cluster"] = cluster.Name @@ -111,7 +111,7 @@ func (r *KamajiControlPlaneReconciler) createOrUpdateCertificateAuthority(ctx co corev1.TLSCertKey: crt, corev1.TLSPrivateKeyKey: key, } - capiCA.Type = capiv1beta1.ClusterSecretType + capiCA.Type = capiv1beta2.ClusterSecretType return controllerutil.SetControllerReference(&kcp, capiCA, r.client.Scheme()) }) @@ -129,7 +129,7 @@ func (r *KamajiControlPlaneReconciler) createOrUpdateCertificateAuthority(ctx co // also in regard to the naming conventions according to the Cluster API contracts about kubeconfig. // // more info: https://cluster-api.sigs.k8s.io/developer/architecture/controllers/cluster.html#secrets -func (r *KamajiControlPlaneReconciler) createOrUpdateKubeconfig(ctx context.Context, reader client.Client, cluster capiv1beta1.Cluster, kcp v1alpha1.KamajiControlPlane, tcp *kamajiv1alpha1.TenantControlPlane) error { +func (r *KamajiControlPlaneReconciler) createOrUpdateKubeconfig(ctx context.Context, reader client.Client, cluster capiv1beta2.Cluster, kcp v1alpha1.KamajiControlPlane, tcp *kamajiv1alpha1.TenantControlPlane) error { capiAdminKubeconfig := &corev1.Secret{} capiAdminKubeconfig.Name = cluster.Name + "-kubeconfig" capiAdminKubeconfig.Namespace = cluster.Namespace @@ -149,7 +149,7 @@ func (r *KamajiControlPlaneReconciler) createOrUpdateKubeconfig(ctx context.Cont labels = map[string]string{} } - labels[capiv1beta1.ClusterNameLabel] = cluster.Name + labels[capiv1beta2.ClusterNameLabel] = cluster.Name labels["kamaji.clastix.io/component"] = "capi" labels["kamaji.clastix.io/secret"] = "kubeconfig" labels["kamaji.clastix.io/cluster"] = cluster.Name @@ -170,7 +170,7 @@ func (r *KamajiControlPlaneReconciler) createOrUpdateKubeconfig(ctx context.Cont capiAdminKubeconfig.Data = map[string][]byte{ "value": value, } - capiAdminKubeconfig.Type = capiv1beta1.ClusterSecretType + capiAdminKubeconfig.Type = capiv1beta2.ClusterSecretType return controllerutil.SetControllerReference(&kcp, capiAdminKubeconfig, r.client.Scheme()) }) diff --git a/controllers/kamajicontrolplane_controller_tcp.go b/controllers/kamajicontrolplane_controller_tcp.go index 39c3164..6ff5f65 100644 --- a/controllers/kamajicontrolplane_controller_tcp.go +++ b/controllers/kamajicontrolplane_controller_tcp.go @@ -14,7 +14,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/client-go/util/retry" "k8s.io/utils/ptr" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + capiv1beta2 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -27,7 +27,7 @@ var ErrUnsupportedCertificateSAN = errors.New("a certificate SAN must be made of //+kubebuilder:rbac:groups=kamaji.clastix.io,resources=tenantcontrolplanes,verbs=get;list;watch;create;update //nolint:funlen,gocognit,cyclop,maintidx -func (r *KamajiControlPlaneReconciler) createOrUpdateTenantControlPlane(ctx context.Context, remoteClient client.Client, cluster capiv1beta1.Cluster, kcp kcpv1alpha1.KamajiControlPlane) (*kamajiv1alpha1.TenantControlPlane, error) { +func (r *KamajiControlPlaneReconciler) createOrUpdateTenantControlPlane(ctx context.Context, remoteClient client.Client, cluster capiv1beta2.Cluster, kcp kcpv1alpha1.KamajiControlPlane) (*kamajiv1alpha1.TenantControlPlane, error) { tcp := &kamajiv1alpha1.TenantControlPlane{} tcp.Name = kcp.GetName() tcp.Namespace = kcp.GetNamespace() @@ -62,22 +62,20 @@ func (r *KamajiControlPlaneReconciler) createOrUpdateTenantControlPlane(ctx cont } else { delete(tcp.Annotations, kamajiv1alpha1.KubeconfigSecretKeyAnnotation) } - if cluster.Spec.ClusterNetwork != nil { - // TenantControlPlane port - if apiPort := cluster.Spec.ClusterNetwork.APIServerPort; apiPort != nil { - tcp.Spec.NetworkProfile.Port = *apiPort - } - // TenantControlPlane Services CIDR - if serviceCIDR := cluster.Spec.ClusterNetwork.Services; serviceCIDR != nil && len(serviceCIDR.CIDRBlocks) > 0 { - tcp.Spec.NetworkProfile.ServiceCIDR = serviceCIDR.CIDRBlocks[0] - } - // TenantControlPlane Pods CIDR - if podsCIDR := cluster.Spec.ClusterNetwork.Pods; podsCIDR != nil && len(podsCIDR.CIDRBlocks) > 0 { - tcp.Spec.NetworkProfile.PodCIDR = podsCIDR.CIDRBlocks[0] - } - // TenantControlPlane cluster domain - tcp.Spec.NetworkProfile.ClusterDomain = cluster.Spec.ClusterNetwork.ServiceDomain + // TenantControlPlane port + tcp.Spec.NetworkProfile.Port = cluster.Spec.ClusterNetwork.APIServerPort + // TenantControlPlane Services CIDR + serviceCIDR := cluster.Spec.ClusterNetwork.Services + if len(serviceCIDR.CIDRBlocks) > 0 { + tcp.Spec.NetworkProfile.ServiceCIDR = serviceCIDR.CIDRBlocks[0] + } + // TenantControlPlane Pods CIDR + podsCIDR := cluster.Spec.ClusterNetwork.Pods + if len(podsCIDR.CIDRBlocks) > 0 { + tcp.Spec.NetworkProfile.PodCIDR = podsCIDR.CIDRBlocks[0] } + // TenantControlPlane cluster domain + tcp.Spec.NetworkProfile.ClusterDomain = cluster.Spec.ClusterNetwork.ServiceDomain // Replicas tcp.Spec.ControlPlane.Deployment.Replicas = kcp.Spec.Replicas // Version diff --git a/go.mod b/go.mod index bf9dc35..047d041 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( k8s.io/client-go v0.34.1 k8s.io/component-base v0.34.1 k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 - sigs.k8s.io/cluster-api v1.10.4 + sigs.k8s.io/cluster-api v1.11.3 sigs.k8s.io/controller-runtime v0.22.1 ) diff --git a/go.sum b/go.sum index 718ca14..b89b981 100644 --- a/go.sum +++ b/go.sum @@ -26,8 +26,8 @@ github.com/clastix/kamaji v1.0.1-0.20250911194241-764433bd0405 h1:jG0mn8IMevcOnE github.com/clastix/kamaji v1.0.1-0.20250911194241-764433bd0405/go.mod h1:VCArjICd7jtO90DMI/k3P2Xu02lnCz6eTbLlpu3HMf4= github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0= github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= -github.com/coredns/corefile-migration v1.0.26 h1:xiiEkVB1Dwolb24pkeDUDBfygV9/XsOSq79yFCrhptY= -github.com/coredns/corefile-migration v1.0.26/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY= +github.com/coredns/corefile-migration v1.0.29 h1:g4cPYMXXDDs9uLE2gFYrJaPBuUAR07eEMGyh9JBE13w= +github.com/coredns/corefile-migration v1.0.29/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -292,8 +292,8 @@ k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8 k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/cluster-api v1.10.4 h1:5mdyWLGbbwOowWrjqM/J9N600QnxTohu5J1/1YR6g7c= -sigs.k8s.io/cluster-api v1.10.4/go.mod h1:68GJs286ZChsncp+TxYNj/vhy2NWokiPtH4+SA0afs0= +sigs.k8s.io/cluster-api v1.11.3 h1:apxfugbP1X8AG7THCM74CTarCOW4H2oOc6hlbm1hY80= +sigs.k8s.io/cluster-api v1.11.3/go.mod h1:CA471SACi81M8DzRKTlWpHV33G0cfWEj7sC4fALFVok= sigs.k8s.io/controller-runtime v0.22.1 h1:Ah1T7I+0A7ize291nJZdS1CabF/lB4E++WizgV24Eqg= sigs.k8s.io/controller-runtime v0.22.1/go.mod h1:FwiwRjkRPbiN+zp2QRp7wlTCzbUXxZ/D4OzuQUDwBHY= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= diff --git a/main.go b/main.go index dfdff96..03e1b72 100644 --- a/main.go +++ b/main.go @@ -17,7 +17,7 @@ import ( _ "k8s.io/client-go/plugin/pkg/client/auth" "k8s.io/client-go/rest" "k8s.io/component-base/featuregate" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + capiv1beta2 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/flags" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -41,7 +41,7 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(kamajiv1alpha1.AddToScheme(scheme)) - utilruntime.Must(capiv1beta1.AddToScheme(scheme)) + utilruntime.Must(capiv1beta2.AddToScheme(scheme)) utilruntime.Must(controlplanev1alpha1.AddToScheme(scheme)) }