Skip to content

Commit 13d26a5

Browse files
authored
Merge pull request #2378 from detiber/kcpFilterCleanup
🏃 Cleanup KubeadmControlPlane machine filters
2 parents 873422d + ac8b360 commit 13d26a5

File tree

9 files changed

+51
-100
lines changed

9 files changed

+51
-100
lines changed

controlplane/kubeadm/controllers/kubeadm_control_plane_controller.go

Lines changed: 9 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@ package controllers
1919
import (
2020
"context"
2121
"fmt"
22-
"sort"
2322
"strings"
2423
"time"
2524

@@ -67,7 +66,7 @@ const (
6766
)
6867

6968
type managementCluster interface {
70-
GetMachinesForCluster(ctx context.Context, cluster types.NamespacedName, filters ...func(machine *clusterv1.Machine) bool) ([]*clusterv1.Machine, error)
69+
GetMachinesForCluster(ctx context.Context, cluster types.NamespacedName, filters ...internal.MachineFilter) (internal.FilterableMachineCollection, error)
7170
TargetClusterControlPlaneIsHealthy(ctx context.Context, clusterKey types.NamespacedName, controlPlaneName string) error
7271
TargetClusterEtcdIsHealthy(ctx context.Context, clusterKey types.NamespacedName, controlPlaneName string) error
7372
}
@@ -235,8 +234,7 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *
235234
}
236235

237236
currentConfigurationHash := hash.Compute(&kcp.Spec)
238-
requireUpgrade := internal.FilterMachines(
239-
ownedMachines,
237+
requireUpgrade := ownedMachines.AnyFilter(
240238
internal.Not(internal.MatchesConfigurationHash(currentConfigurationHash)),
241239
internal.OlderThan(kcp.Spec.UpgradeAfter),
242240
)
@@ -254,7 +252,7 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *
254252
}
255253

256254
// If we've made it this far, we don't need to worry about Machines that are older than kcp.Spec.UpgradeAfter
257-
currentMachines := internal.FilterMachines(ownedMachines, internal.MatchesConfigurationHash(currentConfigurationHash))
255+
currentMachines := ownedMachines.Filter(internal.MatchesConfigurationHash(currentConfigurationHash))
258256
numMachines := len(currentMachines)
259257
desiredReplicas := int(*kcp.Spec.Replicas)
260258

@@ -312,7 +310,7 @@ func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, kcp *c
312310
return errors.Wrap(err, "failed to get list of owned machines")
313311
}
314312

315-
currentMachines := internal.FilterMachines(ownedMachines, internal.MatchesConfigurationHash(hash.Compute(&kcp.Spec)))
313+
currentMachines := ownedMachines.Filter(internal.MatchesConfigurationHash(hash.Compute(&kcp.Spec)))
316314
kcp.Status.UpdatedReplicas = int32(len(currentMachines))
317315

318316
replicas := int32(len(ownedMachines))
@@ -347,7 +345,7 @@ func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, kcp *c
347345
return nil
348346
}
349347

350-
func (r *KubeadmControlPlaneReconciler) upgradeControlPlane(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, requireUpgrade []*clusterv1.Machine) error {
348+
func (r *KubeadmControlPlaneReconciler) upgradeControlPlane(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, requireUpgrade internal.FilterableMachineCollection) error {
351349

352350
// TODO: verify health for each existing replica
353351
// TODO: mark an old Machine via the label kubeadm.controlplane.cluster.x-k8s.io/selected-for-upgrade
@@ -412,13 +410,13 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane(ctx context.Contex
412410
}
413411

414412
// Wait for any delete in progress to complete before deleting another Machine
415-
if len(internal.FilterMachines(ownedMachines, internal.HasDeletionTimestamp)) > 0 {
413+
if len(ownedMachines.Filter(internal.HasDeletionTimestamp)) > 0 {
416414
return ctrl.Result{RequeueAfter: DeleteRequeueAfter}, nil
417415
}
418416

419-
machineToDelete, err := oldestMachine(ownedMachines)
420-
if err != nil {
421-
return ctrl.Result{}, errors.Wrap(err, "failed to pick control plane Machine to delete")
417+
machineToDelete := ownedMachines.Oldest()
418+
if machineToDelete == nil {
419+
return ctrl.Result{}, errors.New("failed to pick control plane Machine to delete")
422420
}
423421

424422
if err := r.Client.Delete(ctx, machineToDelete); err != nil && !apierrors.IsNotFound(err) {
@@ -719,11 +717,3 @@ func getMachineNode(ctx context.Context, crClient client.Client, machine *cluste
719717

720718
return node, nil
721719
}
722-
723-
func oldestMachine(machines []*clusterv1.Machine) (*clusterv1.Machine, error) {
724-
if len(machines) == 0 {
725-
return &clusterv1.Machine{}, errors.New("no machines given")
726-
}
727-
sort.Sort(util.MachinesByCreationTimestamp(machines))
728-
return machines[0], nil
729-
}

controlplane/kubeadm/controllers/kubeadm_control_plane_controller_test.go

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1319,10 +1319,10 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) {
13191319
type fakeManagementCluster struct {
13201320
ControlPlaneHealthy bool
13211321
EtcdHealthy bool
1322-
Machines []*clusterv1.Machine
1322+
Machines internal.FilterableMachineCollection
13231323
}
13241324

1325-
func (f *fakeManagementCluster) GetMachinesForCluster(ctx context.Context, cluster types.NamespacedName, filters ...func(machine *clusterv1.Machine) bool) ([]*clusterv1.Machine, error) {
1325+
func (f *fakeManagementCluster) GetMachinesForCluster(ctx context.Context, cluster types.NamespacedName, filters ...internal.MachineFilter) (internal.FilterableMachineCollection, error) {
13261326
return f.Machines, nil
13271327
}
13281328

@@ -1351,15 +1351,15 @@ func TestKubeadmControlPlaneReconciler_scaleUpControlPlane(t *testing.T) {
13511351
g.Expect(fakeClient.Create(context.Background(), genericMachineTemplate)).To(Succeed())
13521352

13531353
fmc := &fakeManagementCluster{
1354-
Machines: []*clusterv1.Machine{},
1354+
Machines: internal.NewFilterableMachineCollection(),
13551355
ControlPlaneHealthy: true,
13561356
EtcdHealthy: true,
13571357
}
13581358

13591359
for i := 0; i < 2; i++ {
13601360
m, _ := createMachineNodePair(fmt.Sprintf("test-%d", i), cluster, kcp, true)
13611361
g.Expect(fakeClient.Create(context.Background(), m)).To(Succeed())
1362-
fmc.Machines = append(fmc.Machines, m)
1362+
fmc.Machines.Insert(m.DeepCopy())
13631363
}
13641364

13651365
r := &KubeadmControlPlaneReconciler{
@@ -1410,15 +1410,15 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane(t *testing.T) {
14101410
g.Expect(fakeClient.Create(context.Background(), genericMachineTemplate)).To(Succeed())
14111411

14121412
fmc := &fakeManagementCluster{
1413-
Machines: []*clusterv1.Machine{},
1413+
Machines: internal.NewFilterableMachineCollection(),
14141414
ControlPlaneHealthy: true,
14151415
EtcdHealthy: true,
14161416
}
14171417

14181418
for i := 0; i < 2; i++ {
14191419
m, _ := createMachineNodePair(fmt.Sprintf("test-%d", i), cluster, kcp, true)
14201420
g.Expect(fakeClient.Create(context.Background(), m)).To(Succeed())
1421-
fmc.Machines = append(fmc.Machines, m)
1421+
fmc.Machines.Insert(m.DeepCopy())
14221422
}
14231423

14241424
r := &KubeadmControlPlaneReconciler{
@@ -1446,15 +1446,15 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane(t *testing.T) {
14461446
g.Expect(fakeClient.Create(context.Background(), genericMachineTemplate)).To(Succeed())
14471447

14481448
fmc := &fakeManagementCluster{
1449-
Machines: []*clusterv1.Machine{},
1449+
Machines: internal.NewFilterableMachineCollection(),
14501450
ControlPlaneHealthy: true,
14511451
EtcdHealthy: true,
14521452
}
14531453

14541454
for i := 0; i < 2; i++ {
14551455
m, _ := createMachineNodePair(fmt.Sprintf("test-%d", i), cluster, kcp, true)
14561456
g.Expect(fakeClient.Create(context.Background(), m)).To(Succeed())
1457-
fmc.Machines = append(fmc.Machines, m)
1457+
fmc.Machines.Insert(m.DeepCopy())
14581458
}
14591459

14601460
r := &KubeadmControlPlaneReconciler{

controlplane/kubeadm/internal/cluster.go

Lines changed: 7 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -50,31 +50,9 @@ type ManagementCluster struct {
5050
Client ctrlclient.Client
5151
}
5252

53-
// FilterMachines returns a filtered list of machines
54-
func FilterMachines(machines []*clusterv1.Machine, filters ...func(machine *clusterv1.Machine) bool) []*clusterv1.Machine {
55-
if len(filters) == 0 {
56-
return machines
57-
}
58-
59-
filteredMachines := make([]*clusterv1.Machine, 0, len(machines))
60-
for _, machine := range machines {
61-
add := true
62-
for _, filter := range filters {
63-
if !filter(machine) {
64-
add = false
65-
break
66-
}
67-
}
68-
if add {
69-
filteredMachines = append(filteredMachines, machine)
70-
}
71-
}
72-
return filteredMachines
73-
}
74-
7553
// GetMachinesForCluster returns a list of machines that can be filtered or not.
7654
// If no filter is supplied then all machines associated with the target cluster are returned.
77-
func (m *ManagementCluster) GetMachinesForCluster(ctx context.Context, cluster types.NamespacedName, filters ...func(machine *clusterv1.Machine) bool) ([]*clusterv1.Machine, error) {
55+
func (m *ManagementCluster) GetMachinesForCluster(ctx context.Context, cluster types.NamespacedName, filters ...MachineFilter) (FilterableMachineCollection, error) {
7856
selector := map[string]string{
7957
clusterv1.ClusterLabelName: cluster.Name,
8058
}
@@ -83,12 +61,8 @@ func (m *ManagementCluster) GetMachinesForCluster(ctx context.Context, cluster t
8361
return nil, errors.Wrap(err, "failed to list machines")
8462
}
8563

86-
machines := make([]*clusterv1.Machine, 0, len(ml.Items))
87-
for i := range ml.Items {
88-
machines = append(machines, &ml.Items[i])
89-
}
90-
91-
return FilterMachines(machines, filters...), nil
64+
machines := NewFilterableMachineCollectionFromMachineList(ml)
65+
return machines.Filter(filters...), nil
9266
}
9367

9468
// getCluster builds a cluster object.
@@ -114,7 +88,7 @@ func (m *ManagementCluster) getCluster(ctx context.Context, clusterKey types.Nam
11488
client: c,
11589
restConfig: restConfig,
11690
etcdCACert: etcdCACert,
117-
etcdCAkey: etcdCAKey,
91+
etcdCAKey: etcdCAKey,
11892
}, nil
11993
}
12094

@@ -204,12 +178,12 @@ type cluster struct {
204178
client ctrlclient.Client
205179
// restConfig is required for the proxy.
206180
restConfig *rest.Config
207-
etcdCACert, etcdCAkey []byte
181+
etcdCACert, etcdCAKey []byte
208182
}
209183

210184
// generateEtcdTLSClientBundle builds an etcd client TLS bundle from the Etcd CA for this cluster.
211185
func (c *cluster) generateEtcdTLSClientBundle() (*tls.Config, error) {
212-
clientCert, err := generateClientCert(c.etcdCACert, c.etcdCAkey)
186+
clientCert, err := generateClientCert(c.etcdCACert, c.etcdCAKey)
213187
if err != nil {
214188
return nil, err
215189
}
@@ -361,7 +335,7 @@ func (c *cluster) getEtcdClientForNode(nodeName string, tlsConfig *tls.Config) (
361335
// This does not support external etcd.
362336
p := proxy.Proxy{
363337
Kind: "pods",
364-
Namespace: "kube-system", // TODO, can etcd ever run in a different namespace?
338+
Namespace: metav1.NamespaceSystem, // TODO, can etcd ever run in a different namespace?
365339
ResourceName: staticPodName("etcd", nodeName),
366340
KubeConfig: c.restConfig,
367341
TLSConfig: tlsConfig,

controlplane/kubeadm/internal/cluster_test.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,9 @@ import (
2525
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2626
"k8s.io/apimachinery/pkg/runtime"
2727
"k8s.io/apimachinery/pkg/types"
28-
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
2928
"sigs.k8s.io/controller-runtime/pkg/client"
29+
30+
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
3031
)
3132

3233
func podReady(isReady corev1.ConditionStatus) corev1.PodCondition {

controlplane/kubeadm/internal/etcd/util/set.go

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ import (
2727

2828
type empty struct{}
2929

30-
// util.UInt64Set is a set of int64s, implemented via map[uint64]struct{} for minimal memory consumption.
30+
// util.UInt64Set is a set of uint64s, implemented via map[uint64]struct{} for minimal memory consumption.
3131
type UInt64Set map[uint64]empty
3232

3333
// NewUInt64Set creates a UInt64Set from a list of values.
@@ -156,15 +156,15 @@ func (s UInt64Set) Equal(s2 UInt64Set) bool {
156156
return len(s1) == len(s2) && s1.IsSuperset(s2)
157157
}
158158

159-
type sortableSliceOfInt64 []uint64
159+
type sortableSliceOfUInt64 []uint64
160160

161-
func (s sortableSliceOfInt64) Len() int { return len(s) }
162-
func (s sortableSliceOfInt64) Less(i, j int) bool { return lessInt64(s[i], s[j]) }
163-
func (s sortableSliceOfInt64) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
161+
func (s sortableSliceOfUInt64) Len() int { return len(s) }
162+
func (s sortableSliceOfUInt64) Less(i, j int) bool { return lessUInt64(s[i], s[j]) }
163+
func (s sortableSliceOfUInt64) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
164164

165165
// List returns the contents as a sorted uint64 slice.
166166
func (s UInt64Set) List() []uint64 {
167-
res := make(sortableSliceOfInt64, 0, len(s))
167+
res := make(sortableSliceOfUInt64, 0, len(s))
168168
for key := range s {
169169
res = append(res, key)
170170
}
@@ -196,6 +196,6 @@ func (s UInt64Set) Len() int {
196196
return len(s)
197197
}
198198

199-
func lessInt64(lhs, rhs uint64) bool {
199+
func lessUInt64(lhs, rhs uint64) bool {
200200
return lhs < rhs
201201
}

controlplane/kubeadm/internal/failure_domain.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ func (f failureDomainAggregations) Swap(i, j int) {
4949
}
5050

5151
// PickMost returns the failure domain with the most number of machines.
52-
func PickMost(failureDomains clusterv1.FailureDomains, machines []*clusterv1.Machine) string {
52+
func PickMost(failureDomains clusterv1.FailureDomains, machines FilterableMachineCollection) string {
5353
aggregations := pick(failureDomains, machines)
5454
if len(aggregations) == 0 {
5555
return ""
@@ -60,7 +60,7 @@ func PickMost(failureDomains clusterv1.FailureDomains, machines []*clusterv1.Mac
6060
}
6161

6262
// PickFewest returns the failure domain with the fewest number of machines.
63-
func PickFewest(failureDomains clusterv1.FailureDomains, machines []*clusterv1.Machine) string {
63+
func PickFewest(failureDomains clusterv1.FailureDomains, machines FilterableMachineCollection) string {
6464
aggregations := pick(failureDomains, machines)
6565
if len(aggregations) == 0 {
6666
return ""
@@ -69,7 +69,7 @@ func PickFewest(failureDomains clusterv1.FailureDomains, machines []*clusterv1.M
6969
return aggregations[0].id
7070
}
7171

72-
func pick(failureDomains clusterv1.FailureDomains, machines []*clusterv1.Machine) failureDomainAggregations {
72+
func pick(failureDomains clusterv1.FailureDomains, machines FilterableMachineCollection) failureDomainAggregations {
7373
if len(failureDomains) == 0 {
7474
return failureDomainAggregations{}
7575
}

controlplane/kubeadm/internal/failure_domain_test.go

Lines changed: 6 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ func TestNewFailureDomainPicker(t *testing.T) {
3737
testcases := []struct {
3838
name string
3939
fds clusterv1.FailureDomains
40-
machines []*clusterv1.Machine
40+
machines FilterableMachineCollection
4141
expected []string
4242
}{
4343
{
@@ -52,37 +52,30 @@ func TestNewFailureDomainPicker(t *testing.T) {
5252
expected: []string{a},
5353
},
5454
{
55-
name: "one machine in a failure domain",
56-
fds: fds,
57-
machines: []*clusterv1.Machine{
58-
machinea.DeepCopy(),
59-
},
55+
name: "one machine in a failure domain",
56+
fds: fds,
57+
machines: NewFilterableMachineCollection(machinea.DeepCopy()),
6058
expected: []string{b},
6159
},
6260
{
6361
name: "no failure domain specified on machine",
6462
fds: clusterv1.FailureDomains{
6563
a: clusterv1.FailureDomainSpec{},
6664
},
67-
machines: []*clusterv1.Machine{
68-
machinenil.DeepCopy(),
69-
},
65+
machines: NewFilterableMachineCollection(machinenil.DeepCopy()),
7066
expected: []string{a, b},
7167
},
7268
{
7369
name: "mismatched failure domain on machine",
7470
fds: clusterv1.FailureDomains{
7571
a: clusterv1.FailureDomainSpec{},
7672
},
77-
machines: []*clusterv1.Machine{
78-
machineb.DeepCopy(),
79-
},
73+
machines: NewFilterableMachineCollection(machineb.DeepCopy()),
8074
expected: []string{a},
8175
},
8276
{
8377
name: "failure domains and no machines should return a valid failure domain",
8478
fds: fds,
85-
machines: []*clusterv1.Machine{},
8679
expected: []string{a, b},
8780
},
8881
}

controlplane/kubeadm/internal/machine_filters.go

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -131,12 +131,6 @@ func OlderThan(t *metav1.Time) MachineFilter {
131131
}
132132
}
133133

134-
// SelectedForUpgrade is a MachineFilter to find all machines that have the
135-
// controlplanev1.SelectedForUpgradeAnnotation set.
136-
func SelectedForUpgrade(machine *clusterv1.Machine) bool {
137-
return HasAnnotationKey(controlplanev1.SelectedForUpgradeAnnotation)(machine)
138-
}
139-
140134
// HasAnnotationKey returns a MachineFilter function to find all machines that have the
141135
// specified Annotation key present
142136
func HasAnnotationKey(key string) MachineFilter {

0 commit comments

Comments
 (0)