-
Notifications
You must be signed in to change notification settings - Fork 23
Expand file tree
/
Copy pathcontroller.go
More file actions
125 lines (112 loc) · 4.89 KB
/
controller.go
File metadata and controls
125 lines (112 loc) · 4.89 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package garbagecollection
import (
"context"
"fmt"
"time"
"github.com/awslabs/operatorpkg/singleton"
"github.com/samber/lo"
"go.uber.org/multierr"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1"
"sigs.k8s.io/karpenter/pkg/cloudprovider"
"sigs.k8s.io/karpenter/pkg/operator/injection"
"github.com/cloudpilot-ai/karpenter-provider-alibabacloud/pkg/providers/instance"
)
type Controller struct {
kubeClient client.Client
cloudProvider cloudprovider.CloudProvider
// Keeps track of successful reconciles for more aggressive requeueing near the start of the controller.
successfulCount uint64
}
func NewController(kubeClient client.Client, cloudProvider cloudprovider.CloudProvider) *Controller {
return &Controller{
kubeClient: kubeClient,
cloudProvider: cloudProvider,
successfulCount: 0,
}
}
func (c *Controller) Reconcile(ctx context.Context) (reconcile.Result, error) {
ctx = injection.WithControllerName(ctx, "instance.garbagecollection")
// We LIST machines on the CloudProvider BEFORE we grab Machines/Nodes on the cluster so that we make sure that, if
// LISTing instances takes a long time, our information is more updated by the time we get to Machine and Node LIST
// This works since our CloudProvider instances are deleted based on whether the Machine exists or not, not vise-versa
retrieved, err := c.cloudProvider.List(ctx)
if err != nil {
return reconcile.Result{}, fmt.Errorf("listing cloudprovider machines, %w", err)
}
managedRetrieved := lo.Filter(retrieved, func(nc *karpv1.NodeClaim, _ int) bool {
return nc.DeletionTimestamp.IsZero()
})
nodeClaimList := &karpv1.NodeClaimList{}
if err = c.kubeClient.List(ctx, nodeClaimList); err != nil {
return reconcile.Result{}, err
}
nodeList := &corev1.NodeList{}
if err = c.kubeClient.List(ctx, nodeList); err != nil {
return reconcile.Result{}, err
}
resolvedProviderIDs := sets.New[string](lo.FilterMap(nodeClaimList.Items, func(n karpv1.NodeClaim, _ int) (string, bool) {
return n.Status.ProviderID, n.Status.ProviderID != ""
})...)
errs := make([]error, len(retrieved))
workqueue.ParallelizeUntil(ctx, 100, len(managedRetrieved), func(i int) {
if !resolvedProviderIDs.Has(managedRetrieved[i].Status.ProviderID) &&
time.Since(managedRetrieved[i].CreationTimestamp.Time) > time.Second*30 {
errs[i] = c.garbageCollect(ctx, managedRetrieved[i], nodeList)
}
})
if err = multierr.Combine(errs...); err != nil {
return reconcile.Result{}, err
}
c.successfulCount++
return reconcile.Result{RequeueAfter: lo.Ternary(c.successfulCount <= 20, time.Second*10, time.Minute)}, nil
}
func (c *Controller) garbageCollect(ctx context.Context, nodeClaim *karpv1.NodeClaim, nodeList *corev1.NodeList) error {
ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("provider-id", nodeClaim.Status.ProviderID))
if err := c.cloudProvider.Delete(ctx, nodeClaim); err != nil {
// For instanceStateOperationNotSupportedError, not necessary to throw an error, this happens when you scale up
// one deployment, then scale down immediately. The node is in initializing state, and cloud SDK api will throw
// an unsupported operation error, this controller will reconcile again
if instance.IsInstanceStateOperationNotSupportedError(err) ||
cloudprovider.IsNodeClaimNotFoundError(err) {
return nil
}
return err
}
log.FromContext(ctx).V(1).Info("garbage collected cloudprovider instance")
// Go ahead and cleanup the node if we know that it exists to make scheduling go quicker
if node, ok := lo.Find(nodeList.Items, func(n corev1.Node) bool {
return n.Spec.ProviderID == nodeClaim.Status.ProviderID
}); ok {
if err := c.kubeClient.Delete(ctx, &node); err != nil {
return client.IgnoreNotFound(err)
}
log.FromContext(ctx).WithValues("Node", klog.KRef("", node.Name)).V(1).Info("garbage collected node")
}
return nil
}
func (c *Controller) Register(_ context.Context, m manager.Manager) error {
return controllerruntime.NewControllerManagedBy(m).
Named("instance.garbagecollection").
WatchesRawSource(singleton.Source()).
Complete(singleton.AsReconciler(c))
}