Skip to content

[MIMO] Move cluster certificate functionality to ClientHelper #3736

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Sep 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
56 changes: 56 additions & 0 deletions pkg/cluster/apply.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
package cluster

// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.

import (
"context"
"crypto/x509"
"encoding/pem"

corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"

"github.com/Azure/ARO-RP/pkg/util/clienthelper"
"github.com/Azure/ARO-RP/pkg/util/keyvault"
utilpem "github.com/Azure/ARO-RP/pkg/util/pem"
)

func EnsureTLSSecretFromKeyvault(ctx context.Context, kv keyvault.Manager, ch clienthelper.Writer, target types.NamespacedName, certificateName string) error {
bundle, err := kv.GetSecret(ctx, certificateName)
if err != nil {
return err
}

key, certs, err := utilpem.Parse([]byte(*bundle.Value))
if err != nil {
return err
}

b, err := x509.MarshalPKCS8PrivateKey(key)
if err != nil {
return err
}

var cb []byte
for _, cert := range certs {
cb = append(cb, pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})...)

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Super opinionated and optional: could we please move the line
pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})...
to an aux variable for readability.

}

privateKey := pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: b})

secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: target.Name,
Namespace: target.Namespace,
},
Data: map[string][]byte{
corev1.TLSCertKey: cb,
corev1.TLSPrivateKeyKey: privateKey,
},
Type: corev1.SecretTypeTLS,
}

return ch.Ensure(ctx, secret)

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As previously mentioned, as we just use the Ensure method of ch, maybe we could define here an interface with just this method to stick to the Interface Segregation Principle.

}
4 changes: 2 additions & 2 deletions pkg/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import (
extensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"

"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/cluster/graph"
Expand All @@ -43,6 +42,7 @@ import (
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/network"
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/privatedns"
"github.com/Azure/ARO-RP/pkg/util/billing"
"github.com/Azure/ARO-RP/pkg/util/clienthelper"
"github.com/Azure/ARO-RP/pkg/util/dns"
"github.com/Azure/ARO-RP/pkg/util/encryption"
utilgraph "github.com/Azure/ARO-RP/pkg/util/graph"
Expand Down Expand Up @@ -105,7 +105,7 @@ type manager struct {
graph graph.Manager
rpBlob azblob.Manager

client client.Client
ch clienthelper.Interface
kubernetescli kubernetes.Interface
dynamiccli dynamic.Interface
extensionscli extensionsclient.Interface
Expand Down
7 changes: 5 additions & 2 deletions pkg/cluster/install.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
"github.com/Azure/ARO-RP/pkg/database"
aroclient "github.com/Azure/ARO-RP/pkg/operator/clientset/versioned"
"github.com/Azure/ARO-RP/pkg/operator/deploy"
"github.com/Azure/ARO-RP/pkg/util/clienthelper"
utilgenerics "github.com/Azure/ARO-RP/pkg/util/generics"
"github.com/Azure/ARO-RP/pkg/util/restconfig"
"github.com/Azure/ARO-RP/pkg/util/steps"
Expand Down Expand Up @@ -534,16 +535,18 @@ func (m *manager) initializeKubernetesClients(ctx context.Context) error {
return err
}

m.client, err = client.New(restConfig, client.Options{
client, err := client.New(restConfig, client.Options{
Mapper: mapper,
})

m.ch = clienthelper.NewWithClient(m.log, client)
return err
}

// initializeKubernetesClients initializes clients which are used
// once the cluster is up later on in the install process.
func (m *manager) initializeOperatorDeployer(ctx context.Context) (err error) {
m.aroOperatorDeployer, err = deploy.New(m.log, m.env, m.doc.OpenShiftCluster, m.arocli, m.client, m.extensionscli, m.kubernetescli, m.operatorcli, m.subscriptionDoc)
m.aroOperatorDeployer, err = deploy.New(m.log, m.env, m.doc.OpenShiftCluster, m.subscriptionDoc, m.arocli, m.ch, m.extensionscli, m.kubernetescli, m.operatorcli)
return
}

Expand Down
61 changes: 3 additions & 58 deletions pkg/cluster/tls.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,20 +5,16 @@ package cluster

import (
"context"
"crypto/x509"
"encoding/pem"

configv1 "github.com/openshift/api/config/v1"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"

"github.com/Azure/ARO-RP/pkg/env"
"github.com/Azure/ARO-RP/pkg/util/dns"
"github.com/Azure/ARO-RP/pkg/util/keyvault"
utilpem "github.com/Azure/ARO-RP/pkg/util/pem"
)

func (m *manager) createCertificates(ctx context.Context) error {
Expand Down Expand Up @@ -69,57 +65,6 @@ func (m *manager) createCertificates(ctx context.Context) error {
return nil
}

func (m *manager) ensureSecret(ctx context.Context, secrets corev1client.SecretInterface, certificateName string) error {
bundle, err := m.env.ClusterKeyvault().GetSecret(ctx, certificateName)
if err != nil {
return err
}

key, certs, err := utilpem.Parse([]byte(*bundle.Value))
if err != nil {
return err
}

b, err := x509.MarshalPKCS8PrivateKey(key)
if err != nil {
return err
}

var cb []byte
for _, cert := range certs {
cb = append(cb, pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})...)
}

_, err = secrets.Create(ctx, &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: certificateName,
},
Data: map[string][]byte{
corev1.TLSCertKey: cb,
corev1.TLSPrivateKeyKey: pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: b}),
},
Type: corev1.SecretTypeTLS,
}, metav1.CreateOptions{})
if kerrors.IsAlreadyExists(err) {
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
s, err := secrets.Get(ctx, certificateName, metav1.GetOptions{})
if err != nil {
return err
}

s.Data = map[string][]byte{
corev1.TLSCertKey: cb,
corev1.TLSPrivateKeyKey: pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: b}),
}
s.Type = corev1.SecretTypeTLS

_, err = secrets.Update(ctx, s, metav1.UpdateOptions{})
return err
})
}
return err
}

func (m *manager) configureAPIServerCertificate(ctx context.Context) error {
if m.env.FeatureIsSet(env.FeatureDisableSignedCertificates) {
return nil
Expand All @@ -135,7 +80,7 @@ func (m *manager) configureAPIServerCertificate(ctx context.Context) error {
}

for _, namespace := range []string{"openshift-config", "openshift-azure-operator"} {
err = m.ensureSecret(ctx, m.kubernetescli.CoreV1().Secrets(namespace), m.doc.ID+"-apiserver")
err = EnsureTLSSecretFromKeyvault(ctx, m.env.ClusterKeyvault(), m.ch, types.NamespacedName{Name: m.doc.ID + "-apiserver", Namespace: namespace}, m.doc.ID+"-apiserver")
if err != nil {
return err
}
Expand Down Expand Up @@ -178,7 +123,7 @@ func (m *manager) configureIngressCertificate(ctx context.Context) error {
}

for _, namespace := range []string{"openshift-ingress", "openshift-azure-operator"} {
err = m.ensureSecret(ctx, m.kubernetescli.CoreV1().Secrets(namespace), m.doc.ID+"-ingress")
err = EnsureTLSSecretFromKeyvault(ctx, m.env.ClusterKeyvault(), m.ch, types.NamespacedName{Namespace: namespace, Name: m.doc.ID + "-ingress"}, m.doc.ID+"-ingress")
if err != nil {
return err
}
Expand Down
24 changes: 12 additions & 12 deletions pkg/operator/deploy/deploy.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ import (
appsv1client "k8s.io/client-go/kubernetes/typed/apps/v1"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"

"github.com/Azure/ARO-RP/pkg/api"
Expand All @@ -39,6 +38,7 @@ import (
arov1alpha1 "github.com/Azure/ARO-RP/pkg/operator/apis/aro.openshift.io/v1alpha1"
aroclient "github.com/Azure/ARO-RP/pkg/operator/clientset/versioned"
"github.com/Azure/ARO-RP/pkg/operator/controllers/genevalogging"
"github.com/Azure/ARO-RP/pkg/util/clienthelper"
"github.com/Azure/ARO-RP/pkg/util/dynamichelper"
utilkubernetes "github.com/Azure/ARO-RP/pkg/util/kubernetes"
utilpem "github.com/Azure/ARO-RP/pkg/util/pem"
Expand All @@ -63,20 +63,20 @@ type Operator interface {
}

type operator struct {
log *logrus.Entry
env env.Interface
oc *api.OpenShiftCluster

arocli aroclient.Interface
client client.Client
extensionscli extensionsclient.Interface
kubernetescli kubernetes.Interface
operatorcli operatorclient.Interface
dh dynamichelper.Interface
log *logrus.Entry
env env.Interface
oc *api.OpenShiftCluster
subscriptiondoc *api.SubscriptionDocument

arocli aroclient.Interface
client clienthelper.Interface
extensionscli extensionsclient.Interface
kubernetescli kubernetes.Interface
operatorcli operatorclient.Interface
dh dynamichelper.Interface
}

func New(log *logrus.Entry, env env.Interface, oc *api.OpenShiftCluster, arocli aroclient.Interface, client client.Client, extensionscli extensionsclient.Interface, kubernetescli kubernetes.Interface, operatorcli operatorclient.Interface, subscriptionDoc *api.SubscriptionDocument) (Operator, error) {
func New(log *logrus.Entry, env env.Interface, oc *api.OpenShiftCluster, subscriptionDoc *api.SubscriptionDocument, arocli aroclient.Interface, client clienthelper.Interface, extensionscli extensionsclient.Interface, kubernetescli kubernetes.Interface, operatorcli operatorclient.Interface) (Operator, error) {
restConfig, err := restconfig.RestConfig(env, oc)
if err != nil {
return nil, err
Expand Down
6 changes: 4 additions & 2 deletions pkg/operator/deploy/deploy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (
configv1 "github.com/openshift/api/config/v1"
operatorv1 "github.com/openshift/api/operator/v1"
operatorfake "github.com/openshift/client-go/operator/clientset/versioned/fake"
"github.com/sirupsen/logrus"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand All @@ -20,6 +21,7 @@ import (

"github.com/Azure/ARO-RP/pkg/api"
pkgoperator "github.com/Azure/ARO-RP/pkg/operator"
"github.com/Azure/ARO-RP/pkg/util/clienthelper"
"github.com/Azure/ARO-RP/pkg/util/cmp"
mock_env "github.com/Azure/ARO-RP/pkg/util/mocks/env"
utilerror "github.com/Azure/ARO-RP/test/util/error"
Expand Down Expand Up @@ -233,7 +235,7 @@ func TestCreateDeploymentData(t *testing.T) {
o := operator{
oc: oc,
env: env,
client: ctrlfake.NewClientBuilder().WithObjects(cv).Build(),
client: clienthelper.NewWithClient(logrus.NewEntry(logrus.StandardLogger()), ctrlfake.NewClientBuilder().WithObjects(cv).Build()),
}

deploymentData, err := o.createDeploymentData(ctx)
Expand Down Expand Up @@ -306,7 +308,7 @@ func TestOperatorVersion(t *testing.T) {
o := &operator{
oc: &api.OpenShiftCluster{Properties: *oc},
env: _env,
client: ctrlfake.NewClientBuilder().WithObjects(cv).Build(),
client: clienthelper.NewWithClient(logrus.NewEntry(logrus.StandardLogger()), ctrlfake.NewClientBuilder().WithObjects(cv).Build()),
}

staticResources, err := o.createObjects(ctx)
Expand Down
2 changes: 1 addition & 1 deletion pkg/operator/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ func GatewayEnabled(cluster *arov1alpha1.Cluster) bool {
// ShouldUsePodSecurityStandard is an admissions controller
// for pods which replaces pod security policies, enabled on
// OpenShift 4.11 and up
func ShouldUsePodSecurityStandard(ctx context.Context, client client.Client) (bool, error) {
func ShouldUsePodSecurityStandard(ctx context.Context, client client.Reader) (bool, error) {
cv := &configv1.ClusterVersion{}

err := client.Get(ctx, types.NamespacedName{Name: "version"}, cv)
Expand Down
44 changes: 21 additions & 23 deletions pkg/util/clienthelper/clienthelper.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
Expand All @@ -34,35 +33,34 @@ import (
_ "github.com/Azure/ARO-RP/pkg/util/scheme"
)

type Interface interface {
EnsureDeleted(ctx context.Context, gvk schema.GroupVersionKind, key types.NamespacedName) error
type Writer interface {
client.Writer
// Ensure applies self-contained objects to a Kubernetes API, merging
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I know you're not changing this in the PR, so this is not a blocker here, but we should use server-side apply instead of whatever client-side logic exists, as it's guaranteed to be a better implementation :)

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I would love to use server-side apply, but the fake implementation of controller-runtime doesn't yet support it :(

Refs:

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No comment, just waving at @hawkowl 👋🏽

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, the controller-runtime fakes are horrendous and IMO moving away from them as soon as possible is also a great boon to the project :)

SSA is very complex and faking it is likely not going to happen - not even just in some soon time-frame, but perhaps ever. Client fakes have fallen out of favor and everyone has a horror story of working around the mess that is envtest without gaining any confidence in their application functioning in the real world against a real API server. Even past that, the concerns Alvaro has in Troy's PR are a mountain of work that the upstream SIGs are not interested in taking up or sponsoring, from my understanding.

This is neither here nor there, but you might want to consider not putting SSA support in the controller-runtime fakes as a precondition to using this (amazingly useful and beneficial) technology in your production deployments.
 
See an example of how to test controllers or similar using static data in, static data out here: https://github.com/openshift/hypershift/blob/main/control-plane-pki-operator/certificaterevocationcontroller/certificaterevocationcontroller_test.go

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The issue beyond the requisite work are the apply types that are not scaffold with crd/operator-sdk/kubebuilder. That work is a side effect of the client (controller-runtime) not implementing that whereas client-go has that necessary logic.

The interface client being used here is the mechanism where that can't be utilized. I have no context around what this is trying to accomplish but if it is indeed using corev1 types where a kubernetes.Interface can be used, those applyConfigurations are available to fulfil the server side apply.

The faking of the clients are tracked with a "tracker" and trying to track a server side apply in any client would be trying to hold state for things it wouldn't worry about because of the client's responsibility. Having a test suite where that can happen seems outside of the scope of what the client/fakes could do

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have worked with the controller-runtime ecosystem and upstream Kubernetes client-sets for many years, so I'm pretty familiar with the structure. I don't think I communicated the "static data in, static data out" link well - my suggestion is not that some client helper hold on to apply configurations, nor is it to somehow teach the controller-runtime client how to apply.

In case we get too far in the weeds - I just want to re-iterate that the comments here are not review comments for this PR, just a general comment. I'm not suggesting any changes are made right now.


What I am suggesting, though, is that operator reconciliation loops are written as pure functions - either take some set of cluster state as input, or allow dependency injection to take producers of cluster state as input, and produce state as output. Apply configurations have a wonderful property in that they encode your intent in static data - so, testing your lower-case r reconcile() is straightforward, and no mocks of any persuasion are necessary.

The top-level Reconcile() might look like:

func (r *reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
    // assemble the data necessary by using listers, etc
    input := r.getInput()
    
    // transform input state into intended state
    intent, err := r.reconcile(input)
    
    // realize intended state onto the cluster
    return r.execute(intent)
}

Where r.execute(intent) takes apply configurations and uses a kubernetes.Clientset to call Apply.


In my experience, relying heavily on mocks leads many teams to stumble into deeply frustrating issues, often many years down the line. Examples I've seen:

  • implementing highly-performant caching using metadata-only watches requires 100% of tests to be rewritten, as the fakes don't know how to propagate data between the metadata fake and the "real" fake
  • simplifying controller code to reduce maintenance burden and eliminate incorrect client-side patching by using server-side apply means all unit tests fail
  • fragile assumptions about the number of times your reconciliation is called and when, in what order, etc, lead integration tests to be a poor substitute for end-to-end testing against actual API servers, leading to test duplication
  • fakes make many highly surprising decisions, taking valuable mental overhead and productivty away from the team, like:
    • faking out field selectors (usually handled in the server) with indices, which requires test code to be written to create indices
    • objects with deletion timestamps but no finalizers cause some internal panic even though it's totally valid for your controller to see one in production
    • fake clients need subresources registered manually

Teams that spend time writing unit tests for unit-level concerns and do not reach to mocks, in my experience, move faster and get a higher RoI on their tests. End-to-end tests and chaos tests validate production modalities that integration tests attempt to, but cannot.

Copy link

@AldoFusterTurpin AldoFusterTurpin Aug 7, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@stevekuznetsov I completely agree with that. 🥇

Even though I am by far the one less experienced in this conversation about this domain, I have seen similar problems in other domains that made me resonate a lot with your comment while reading it.

Sometimes we focus too much on trying to reproduce the state of the world where our app will run, and we forget that we should primarily test what our app does, and what our business logic is. It is almost always much simpler and easier to understand to just have our business logic in pure functions that expect input data and produce output data. We then forget about how this data will be gathered and what will be done with the result, and we can simply ensure that domainLogicFn(x) == expectedOutput.

This is a common concern that applies to a lot of different domains, but it is pretty common in k8s because most of the time the business logic gets mixed/loosed in the effort to understand/simulate the "state" where all that business logic will happen (due to the stateful nature of k8s controllers). So I just want to reaffirm that avoiding complex mocks when possible with pure functions is a great idea, and not just in this context 🙂

// client-side if required.
Ensure(ctx context.Context, objs ...kruntime.Object) error
GetOne(ctx context.Context, key types.NamespacedName, obj kruntime.Object) error
EnsureDeleted(ctx context.Context, gvk schema.GroupVersionKind, key types.NamespacedName) error
}

type clientHelper struct {
log *logrus.Entry
type Reader interface {
client.Reader
GetOne(ctx context.Context, key types.NamespacedName, obj kruntime.Object) error
}

client client.Client
type Interface interface {
Reader
Writer
}

func New(log *logrus.Entry, restconfig *rest.Config) (Interface, error) {
mapper, err := apiutil.NewDynamicRESTMapper(restconfig, apiutil.WithLazyDiscovery)
if err != nil {
return nil, err
}
type clientHelper struct {
client.Client

client, err := client.New(restconfig, client.Options{Mapper: mapper})
if err != nil {
return nil, err
}
return NewWithClient(log, client), nil
log *logrus.Entry
}

func NewWithClient(log *logrus.Entry, client client.Client) Interface {
return &clientHelper{
log: log,
client: client,
Client: client,
}
}

Expand All @@ -74,7 +72,7 @@ func (ch *clientHelper) EnsureDeleted(ctx context.Context, gvk schema.GroupVersi
a.SetGroupVersionKind(gvk)

ch.log.Infof("Delete kind %s ns %s name %s", gvk.Kind, key.Namespace, key.Name)
err := ch.client.Delete(ctx, a)
err := ch.Delete(ctx, a)
if kerrors.IsNotFound(err) {
return nil
}
Expand All @@ -87,7 +85,7 @@ func (ch *clientHelper) GetOne(ctx context.Context, key types.NamespacedName, ob
return errors.New("can't convert object")
}

return ch.client.Get(ctx, key, newObj)
return ch.Get(ctx, key, newObj)
}

// Ensure that one or more objects match their desired state. Only update
Expand Down Expand Up @@ -125,10 +123,10 @@ func (ch *clientHelper) ensureOne(ctx context.Context, new kruntime.Object) erro
return fmt.Errorf("object of kind %s can't be made a client.Object", gvk.String())
}

err = ch.client.Get(ctx, client.ObjectKey{Namespace: newObj.GetNamespace(), Name: newObj.GetName()}, oldObj)
err = ch.Get(ctx, client.ObjectKey{Namespace: newObj.GetNamespace(), Name: newObj.GetName()}, oldObj)
if kerrors.IsNotFound(err) {
ch.log.Infof("Create %s", keyFunc(gvk.GroupKind(), newObj.GetNamespace(), newObj.GetName()))
return ch.client.Create(ctx, newObj)
return ch.Create(ctx, newObj)
}
if err != nil {
return err
Expand All @@ -138,7 +136,7 @@ func (ch *clientHelper) ensureOne(ctx context.Context, new kruntime.Object) erro
return err
}
ch.log.Infof("Update %s: %s", keyFunc(gvk.GroupKind(), candidate.GetNamespace(), candidate.GetName()), diff)
return ch.client.Update(ctx, candidate)
return ch.Update(ctx, candidate)
})
}

Expand Down
Loading
Loading