Skip to content

Only reconcile owned deployments #424

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Oct 22, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions pkg/operator/baremetal_pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,9 @@ func newMetal3Deployment(config *OperatorConfig) *appsv1.Deployment {
ObjectMeta: metav1.ObjectMeta{
Name: "metal3",
Namespace: config.TargetNamespace,
Annotations: map[string]string{
maoOwnedAnnotation: "",
},
Labels: map[string]string{
"api": "clusterapi",
"k8s-app": "controller",
Expand Down
70 changes: 65 additions & 5 deletions pkg/operator/operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,8 @@ const (
// a machineconfig pool is going to be requeued:
//
// 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s
maxRetries = 15
maxRetries = 15
maoOwnedAnnotation = "machine.openshift.io/owned"
)

// Operator defines machine api operator.
Expand Down Expand Up @@ -88,7 +89,7 @@ func New(
operandVersions: operandVersions,
}

deployInformer.Informer().AddEventHandler(optr.eventHandler())
deployInformer.Informer().AddEventHandler(optr.eventHandlerDeployments())
featureGateInformer.Informer().AddEventHandler(optr.eventHandler())

optr.config = config
Expand Down Expand Up @@ -125,13 +126,72 @@ func (optr *Operator) Run(workers int, stopCh <-chan struct{}) {
<-stopCh
}

func logResource(obj interface{}) {
metaObj, okObject := obj.(metav1.Object)
if !okObject {
glog.Errorf("Error assigning type to interface when logging")
}
glog.V(4).Infof("Resource type: %T", obj)
glog.V(4).Infof("Resource: %v", metaObj.GetSelfLink())
}

func (optr *Operator) eventHandler() cache.ResourceEventHandler {
workQueueKey := fmt.Sprintf("%s/%s", optr.namespace, optr.name)
return cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { optr.queue.Add(workQueueKey) },
UpdateFunc: func(old, new interface{}) { optr.queue.Add(workQueueKey) },
DeleteFunc: func(obj interface{}) { optr.queue.Add(workQueueKey) },
AddFunc: func(obj interface{}) {
glog.V(4).Infof("Event: Add")
logResource(obj)
optr.queue.Add(workQueueKey)
},
UpdateFunc: func(old, new interface{}) {
glog.V(4).Infof("Event: Update")
logResource(old)
optr.queue.Add(workQueueKey)
},
DeleteFunc: func(obj interface{}) {
glog.V(4).Infof("Event: Delete")
logResource(obj)
optr.queue.Add(workQueueKey)
},
}
}

// on deployments we only reconcile on update/delete events if its owned by mao
func (optr *Operator) eventHandlerDeployments() cache.ResourceEventHandler {
workQueueKey := fmt.Sprintf("%s/%s", optr.namespace, optr.name)
return cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
glog.V(4).Infof("Event: Add")
logResource(obj)
optr.queue.Add(workQueueKey)
},
UpdateFunc: func(old, new interface{}) {
glog.V(4).Infof("Event: Update")
logResource(old)
if owned, err := isOwned(old); !owned || err != nil {
return
}
optr.queue.Add(workQueueKey)
},
DeleteFunc: func(obj interface{}) {
glog.V(4).Infof("Event: Delete")
logResource(obj)
if owned, err := isOwned(obj); !owned || err != nil {
return
}
optr.queue.Add(workQueueKey)
},
}
}

func isOwned(obj interface{}) (bool, error) {
metaObj, okObject := obj.(metav1.Object)
if !okObject {
glog.Errorf("Error assigning metav1.Object type to object: %T", obj)
return false, fmt.Errorf("error assigning metav1.Object type to object: %T", obj)
}
_, ok := metaObj.GetAnnotations()[maoOwnedAnnotation]
return ok, nil
}

func (optr *Operator) worker() {
Expand Down
101 changes: 75 additions & 26 deletions pkg/operator/operator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,18 +5,18 @@ import (
"testing"
"time"

openshiftv1 "github.com/openshift/api/config/v1"
fakeos "github.com/openshift/client-go/config/clientset/versioned/fake"
configinformersv1 "github.com/openshift/client-go/config/informers/externalversions"
"github.com/stretchr/testify/assert"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
fakekube "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"

v1 "github.com/openshift/api/config/v1"
fakeos "github.com/openshift/client-go/config/clientset/versioned/fake"
configinformersv1 "github.com/openshift/client-go/config/informers/externalversions"
"github.com/stretchr/testify/assert"
)

const (
Expand All @@ -25,12 +25,12 @@ const (
hcControllerName = "machine-healthcheck-controller"
)

func newFeatureGate(featureSet v1.FeatureSet) *v1.FeatureGate {
return &v1.FeatureGate{
func newFeatureGate(featureSet openshiftv1.FeatureSet) *openshiftv1.FeatureGate {
return &openshiftv1.FeatureGate{
ObjectMeta: metav1.ObjectMeta{
Name: MachineAPIFeatureGateName,
},
Spec: v1.FeatureGateSpec{
Spec: openshiftv1.FeatureGateSpec{
FeatureSet: featureSet,
},
}
Expand Down Expand Up @@ -75,7 +75,7 @@ func newFakeOperator(kubeObjects []runtime.Object, osObjects []runtime.Object, s
kubeNamespacedSharedInformer.Start(stopCh)

optr.syncHandler = optr.sync
deployInformer.Informer().AddEventHandler(optr.eventHandler())
deployInformer.Informer().AddEventHandler(optr.eventHandlerDeployments())
featureGateInformer.Informer().AddEventHandler(optr.eventHandler())

return optr
Expand All @@ -85,43 +85,43 @@ func newFakeOperator(kubeObjects []runtime.Object, osObjects []runtime.Object, s
// for platforms that are no-ops.
func TestOperatorSync_NoOp(t *testing.T) {
cases := []struct {
platform v1.PlatformType
platform openshiftv1.PlatformType
expectedNoop bool
}{
{
platform: v1.AWSPlatformType,
platform: openshiftv1.AWSPlatformType,
expectedNoop: false,
},
{
platform: v1.LibvirtPlatformType,
platform: openshiftv1.LibvirtPlatformType,
expectedNoop: false,
},
{
platform: v1.OpenStackPlatformType,
platform: openshiftv1.OpenStackPlatformType,
expectedNoop: false,
},
{
platform: v1.AzurePlatformType,
platform: openshiftv1.AzurePlatformType,
expectedNoop: false,
},
{
platform: v1.BareMetalPlatformType,
platform: openshiftv1.BareMetalPlatformType,
expectedNoop: false,
},
{
platform: v1.GCPPlatformType,
platform: openshiftv1.GCPPlatformType,
expectedNoop: false,
},
{
platform: kubemarkPlatform,
expectedNoop: false,
},
{
platform: v1.VSpherePlatformType,
platform: openshiftv1.VSpherePlatformType,
expectedNoop: true,
},
{
platform: v1.NonePlatformType,
platform: openshiftv1.NonePlatformType,
expectedNoop: true,
},
{
Expand All @@ -132,17 +132,17 @@ func TestOperatorSync_NoOp(t *testing.T) {

for _, tc := range cases {
t.Run(string(tc.platform), func(t *testing.T) {
infra := &v1.Infrastructure{
infra := &openshiftv1.Infrastructure{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster",
},
Status: v1.InfrastructureStatus{
Status: openshiftv1.InfrastructureStatus{
Platform: tc.platform,
},
}

stopCh := make(<-chan struct{})
optr := newFakeOperator(nil, []runtime.Object{newFeatureGate(v1.TechPreviewNoUpgrade), infra}, stopCh)
optr := newFakeOperator(nil, []runtime.Object{newFeatureGate(openshiftv1.TechPreviewNoUpgrade), infra}, stopCh)
go optr.Run(2, stopCh)

err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) {
Expand All @@ -162,15 +162,64 @@ func TestOperatorSync_NoOp(t *testing.T) {
if !assert.NoError(t, err, "failed to get clusteroperator") {
t.Fatal()
}
expectedConditions := map[v1.ClusterStatusConditionType]v1.ConditionStatus{
v1.OperatorAvailable: v1.ConditionTrue,
v1.OperatorProgressing: v1.ConditionFalse,
v1.OperatorDegraded: v1.ConditionFalse,
v1.OperatorUpgradeable: v1.ConditionTrue,
expectedConditions := map[openshiftv1.ClusterStatusConditionType]openshiftv1.ConditionStatus{
openshiftv1.OperatorAvailable: openshiftv1.ConditionTrue,
openshiftv1.OperatorProgressing: openshiftv1.ConditionFalse,
openshiftv1.OperatorDegraded: openshiftv1.ConditionFalse,
openshiftv1.OperatorUpgradeable: openshiftv1.ConditionTrue,
}
for _, c := range o.Status.Conditions {
assert.Equal(t, expectedConditions[c.Type], c.Status, fmt.Sprintf("unexpected clusteroperator condition %s status", c.Type))
}
})
}
}

func TestIsOwned(t *testing.T) {
testCases := []struct {
testCase string
obj interface{}
expected bool
expectedError bool
}{
{
testCase: "with maoOwnedAnnotation returns true",
obj: &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
maoOwnedAnnotation: "",
},
},
},
expected: true,
},
{
testCase: "with no maoOwnedAnnotation returns false",
obj: &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
"any": "",
},
},
},
expected: false,
},
{
testCase: "bad type object returns error",
obj: "bad object",
expected: false,
expectedError: true,
},
}
for _, tc := range testCases {
t.Run(string(tc.testCase), func(t *testing.T) {
got, err := isOwned(tc.obj)
if got != tc.expected {
t.Errorf("Expected: %v, got: %v", tc.expected, got)
}
if tc.expectedError != (err != nil) {
t.Errorf("ExpectedError: %v, got: %v", tc.expectedError, err)
}
})
}
}
3 changes: 3 additions & 0 deletions pkg/operator/sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,9 @@ func newDeployment(config *OperatorConfig, features map[string]bool) *appsv1.Dep
ObjectMeta: metav1.ObjectMeta{
Name: "machine-api-controllers",
Namespace: config.TargetNamespace,
Annotations: map[string]string{
maoOwnedAnnotation: "",
},
Labels: map[string]string{
"api": "clusterapi",
"k8s-app": "controller",
Expand Down