Skip to content

Commit 94233c1

Browse files
committed
Add ClusterFilter to ClusterCache Options
This allows filtering the Clusters that are handled by the cache. It can be used for example by providers that only want to cache Clusters of the relevant type to them. Signed-off-by: Lennart Jern <[email protected]>
1 parent 0978f70 commit 94233c1

File tree

2 files changed

+63
-3
lines changed

2 files changed

+63
-3
lines changed

controllers/clustercache/cluster_cache.go

Lines changed: 32 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -60,13 +60,22 @@ type Options struct {
6060
// will never be created.
6161
WatchFilterValue string
6262

63+
// ClusterFilter is a function that can be used to filter which clusters should be handled
64+
// by the ClusterCache. If nil, all clusters will be handled. If set, only clusters for which
65+
// the filter returns true will be handled.
66+
ClusterFilter ClusterFilter
67+
6368
// Cache are the cache options for the caches that are created per cluster.
6469
Cache CacheOptions
6570

6671
// Client are the client options for the clients that are created per cluster.
6772
Client ClientOptions
6873
}
6974

75+
// ClusterFilter is a function that filters which clusters should be handled by the ClusterCache.
76+
// It returns true if the cluster should be handled, false otherwise.
77+
type ClusterFilter func(cluster *clusterv1.Cluster) bool
78+
7079
// CacheOptions are the cache options for the caches that are created per cluster.
7180
type CacheOptions struct {
7281
// SyncPeriod is the sync period of the cache.
@@ -364,6 +373,11 @@ type clusterCache struct {
364373

365374
// cacheCtxCancel is used during Shutdown to stop caches.
366375
cacheCtxCancel context.CancelCauseFunc
376+
377+
// ClusterFilter is a function that can be used to filter which clusters should be handled
378+
// by the ClusterCache. If nil, all clusters will be handled. If set, only clusters for which
379+
// the filter returns true will be handled.
380+
clusterFilter ClusterFilter
367381
}
368382

369383
// clusterSource stores the necessary information so we can enqueue reconcile.Requests for reconcilers that
@@ -451,13 +465,14 @@ func (cc *clusterCache) Reconcile(ctx context.Context, req reconcile.Request) (r
451465
log := ctrl.LoggerFrom(ctx)
452466
clusterKey := client.ObjectKey{Namespace: req.Namespace, Name: req.Name}
453467

454-
accessor := cc.getOrCreateClusterAccessor(clusterKey)
455-
456468
cluster := &clusterv1.Cluster{}
457469
if err := cc.client.Get(ctx, req.NamespacedName, cluster); err != nil {
458470
if apierrors.IsNotFound(err) {
459471
log.Info("Cluster has been deleted, disconnecting")
460-
accessor.Disconnect(ctx)
472+
accessor := cc.getClusterAccessor(clusterKey)
473+
if accessor != nil {
474+
accessor.Disconnect(ctx)
475+
}
461476
cc.deleteClusterAccessor(clusterKey)
462477
cc.cleanupClusterSourcesForCluster(clusterKey)
463478
return ctrl.Result{}, nil
@@ -468,6 +483,20 @@ func (cc *clusterCache) Reconcile(ctx context.Context, req reconcile.Request) (r
468483
return ctrl.Result{RequeueAfter: defaultRequeueAfter}, nil
469484
}
470485

486+
// Apply cluster filter if set
487+
if cc.clusterFilter != nil && !cc.clusterFilter(cluster) {
488+
log.V(6).Info("Cluster filtered out by ClusterFilter, not connecting")
489+
accessor := cc.getClusterAccessor(clusterKey)
490+
if accessor != nil {
491+
accessor.Disconnect(ctx)
492+
}
493+
cc.deleteClusterAccessor(clusterKey)
494+
cc.cleanupClusterSourcesForCluster(clusterKey)
495+
return ctrl.Result{}, nil
496+
}
497+
498+
accessor := cc.getOrCreateClusterAccessor(clusterKey)
499+
471500
// Return if infrastructure is not ready yet to avoid trying to open a connection when it cannot succeed.
472501
// Requeue is not needed as there will be a new reconcile.Request when Cluster.status.initialization.infrastructureProvisioned is set.
473502
if !ptr.Deref(cluster.Status.Initialization.InfrastructureProvisioned, false) {

controllers/clustercache/cluster_cache_test.go

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,9 @@ func TestReconcile(t *testing.T) {
5757
ObjectMeta: metav1.ObjectMeta{
5858
Name: "test-cluster",
5959
Namespace: metav1.NamespaceDefault,
60+
Labels: map[string]string{
61+
"cluster.x-k8s.io/included-in-clustercache-tests": "true",
62+
},
6063
},
6164
Spec: clusterv1.ClusterSpec{
6265
ControlPlaneRef: clusterv1.ContractVersionedObjectReference{
@@ -87,6 +90,9 @@ func TestReconcile(t *testing.T) {
8790
clusterAccessorConfig: accessorConfig,
8891
clusterAccessors: make(map[client.ObjectKey]*clusterAccessor),
8992
cacheCtx: context.Background(),
93+
clusterFilter: func(cluster *clusterv1.Cluster) bool {
94+
return (cluster.ObjectMeta.Labels["cluster.x-k8s.io/included-in-clustercache-tests"] == "true")
95+
},
9096
}
9197

9298
// Add a Cluster source and start it (queue will be later used to verify the source works correctly)
@@ -110,6 +116,31 @@ func TestReconcile(t *testing.T) {
110116
testCluster.Status.Initialization.InfrastructureProvisioned = ptr.To(true)
111117
g.Expect(env.Status().Patch(ctx, testCluster, patch)).To(Succeed())
112118

119+
// Exclude from clustercache by changing the label
120+
patch = client.MergeFrom(testCluster.DeepCopy())
121+
testCluster.ObjectMeta.Labels = map[string]string{
122+
"cluster.x-k8s.io/included-in-clustercache-tests": "false",
123+
}
124+
g.Expect(env.Patch(ctx, testCluster, patch)).To(Succeed())
125+
// Sanity check that the clusterFIlter does not include the cluster now
126+
g.Expect(cc.clusterFilter(testCluster)).To((BeFalse()))
127+
128+
// Reconcile, cluster should be ignored now
129+
// => no requeue, no cluster accessor created
130+
res, err = cc.Reconcile(ctx, reconcile.Request{NamespacedName: clusterKey})
131+
g.Expect(err).ToNot(HaveOccurred())
132+
g.Expect(res).To(Equal(ctrl.Result{}))
133+
g.Expect(res.IsZero()).To(BeTrue())
134+
135+
// Put the label back
136+
patch = client.MergeFrom(testCluster.DeepCopy())
137+
testCluster.ObjectMeta.Labels = map[string]string{
138+
"cluster.x-k8s.io/included-in-clustercache-tests": "true",
139+
}
140+
g.Expect(env.Patch(ctx, testCluster, patch)).To(Succeed())
141+
// Sanity check that the clusterFIlter does include the cluster now
142+
g.Expect(cc.clusterFilter(testCluster)).To((BeTrue()))
143+
113144
// Reconcile, kubeconfig Secret doesn't exist
114145
// => accessor.Connect will fail so we expect a retry with ConnectionCreationRetryInterval.
115146
res, err = cc.Reconcile(ctx, reconcile.Request{NamespacedName: clusterKey})

0 commit comments

Comments
 (0)